1 // Copyright 2022 Google LLC 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Code generated by aliasgen. DO NOT EDIT. 16 17 // Package automl aliases all exported identifiers in package 18 // "cloud.google.com/go/automl/apiv1/automlpb". 19 // 20 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb. 21 // Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md 22 // for more details. 23 package automl 24 25 import ( 26 src "cloud.google.com/go/automl/apiv1/automlpb" 27 grpc "google.golang.org/grpc" 28 ) 29 30 // Deprecated: Please use consts in: cloud.google.com/go/automl/apiv1/automlpb 31 const ( 32 ClassificationType_CLASSIFICATION_TYPE_UNSPECIFIED = src.ClassificationType_CLASSIFICATION_TYPE_UNSPECIFIED 33 ClassificationType_MULTICLASS = src.ClassificationType_MULTICLASS 34 ClassificationType_MULTILABEL = src.ClassificationType_MULTILABEL 35 DocumentDimensions_CENTIMETER = src.DocumentDimensions_CENTIMETER 36 DocumentDimensions_DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = src.DocumentDimensions_DOCUMENT_DIMENSION_UNIT_UNSPECIFIED 37 DocumentDimensions_INCH = src.DocumentDimensions_INCH 38 DocumentDimensions_POINT = src.DocumentDimensions_POINT 39 Document_Layout_FORM_FIELD = src.Document_Layout_FORM_FIELD 40 Document_Layout_FORM_FIELD_CONTENTS = src.Document_Layout_FORM_FIELD_CONTENTS 41 Document_Layout_FORM_FIELD_NAME = src.Document_Layout_FORM_FIELD_NAME 42 Document_Layout_PARAGRAPH = src.Document_Layout_PARAGRAPH 43 Document_Layout_TABLE = src.Document_Layout_TABLE 44 Document_Layout_TABLE_CELL = src.Document_Layout_TABLE_CELL 45 Document_Layout_TABLE_HEADER = src.Document_Layout_TABLE_HEADER 46 Document_Layout_TABLE_ROW = src.Document_Layout_TABLE_ROW 47 Document_Layout_TEXT_SEGMENT_TYPE_UNSPECIFIED = src.Document_Layout_TEXT_SEGMENT_TYPE_UNSPECIFIED 48 Document_Layout_TOKEN = src.Document_Layout_TOKEN 49 Model_DEPLOYED = src.Model_DEPLOYED 50 Model_DEPLOYMENT_STATE_UNSPECIFIED = src.Model_DEPLOYMENT_STATE_UNSPECIFIED 51 Model_UNDEPLOYED = src.Model_UNDEPLOYED 52 ) 53 54 // Deprecated: Please use vars in: cloud.google.com/go/automl/apiv1/automlpb 55 var ( 56 ClassificationType_name = src.ClassificationType_name 57 ClassificationType_value = src.ClassificationType_value 58 DocumentDimensions_DocumentDimensionUnit_name = src.DocumentDimensions_DocumentDimensionUnit_name 59 DocumentDimensions_DocumentDimensionUnit_value = src.DocumentDimensions_DocumentDimensionUnit_value 60 Document_Layout_TextSegmentType_name = src.Document_Layout_TextSegmentType_name 61 Document_Layout_TextSegmentType_value = src.Document_Layout_TextSegmentType_value 62 File_google_cloud_automl_v1_annotation_payload_proto = src.File_google_cloud_automl_v1_annotation_payload_proto 63 File_google_cloud_automl_v1_annotation_spec_proto = src.File_google_cloud_automl_v1_annotation_spec_proto 64 File_google_cloud_automl_v1_classification_proto = src.File_google_cloud_automl_v1_classification_proto 65 File_google_cloud_automl_v1_data_items_proto = src.File_google_cloud_automl_v1_data_items_proto 66 File_google_cloud_automl_v1_dataset_proto = src.File_google_cloud_automl_v1_dataset_proto 67 File_google_cloud_automl_v1_detection_proto = src.File_google_cloud_automl_v1_detection_proto 68 File_google_cloud_automl_v1_geometry_proto = src.File_google_cloud_automl_v1_geometry_proto 69 File_google_cloud_automl_v1_image_proto = src.File_google_cloud_automl_v1_image_proto 70 File_google_cloud_automl_v1_io_proto = src.File_google_cloud_automl_v1_io_proto 71 File_google_cloud_automl_v1_model_evaluation_proto = src.File_google_cloud_automl_v1_model_evaluation_proto 72 File_google_cloud_automl_v1_model_proto = src.File_google_cloud_automl_v1_model_proto 73 File_google_cloud_automl_v1_operations_proto = src.File_google_cloud_automl_v1_operations_proto 74 File_google_cloud_automl_v1_prediction_service_proto = src.File_google_cloud_automl_v1_prediction_service_proto 75 File_google_cloud_automl_v1_service_proto = src.File_google_cloud_automl_v1_service_proto 76 File_google_cloud_automl_v1_text_extraction_proto = src.File_google_cloud_automl_v1_text_extraction_proto 77 File_google_cloud_automl_v1_text_proto = src.File_google_cloud_automl_v1_text_proto 78 File_google_cloud_automl_v1_text_segment_proto = src.File_google_cloud_automl_v1_text_segment_proto 79 File_google_cloud_automl_v1_text_sentiment_proto = src.File_google_cloud_automl_v1_text_sentiment_proto 80 File_google_cloud_automl_v1_translation_proto = src.File_google_cloud_automl_v1_translation_proto 81 Model_DeploymentState_name = src.Model_DeploymentState_name 82 Model_DeploymentState_value = src.Model_DeploymentState_value 83 ) 84 85 // Contains annotation information that is relevant to AutoML. 86 // 87 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 88 type AnnotationPayload = src.AnnotationPayload 89 type AnnotationPayload_Classification = src.AnnotationPayload_Classification 90 type AnnotationPayload_ImageObjectDetection = src.AnnotationPayload_ImageObjectDetection 91 type AnnotationPayload_TextExtraction = src.AnnotationPayload_TextExtraction 92 type AnnotationPayload_TextSentiment = src.AnnotationPayload_TextSentiment 93 type AnnotationPayload_Translation = src.AnnotationPayload_Translation 94 95 // A definition of an annotation spec. 96 // 97 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 98 type AnnotationSpec = src.AnnotationSpec 99 100 // AutoMlClient is the client API for AutoMl service. For semantics around ctx 101 // use and closing/ending streaming RPCs, please refer to 102 // https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. 103 // 104 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 105 type AutoMlClient = src.AutoMlClient 106 107 // AutoMlServer is the server API for AutoMl service. 108 // 109 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 110 type AutoMlServer = src.AutoMlServer 111 112 // Input configuration for BatchPredict Action. The format of input depends on 113 // the ML problem of the model used for prediction. As input source the 114 // [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is expected, 115 // unless specified otherwise. The formats are represented in EBNF with commas 116 // being literal and with non-terminal symbols defined near the end of this 117 // comment. The formats are: <h4>AutoML Vision</h4> <div 118 // class="ds-selector-tabs"><section><h5>Classification</h5> One or more CSV 119 // files where each line is a single column: GCS_FILE_PATH The Google Cloud 120 // Storage location of an image of up to 30MB in size. Supported extensions: 121 // .JPEG, .GIF, .PNG. This path is treated as the ID in the batch predict 122 // output. Sample rows: gs://folder/image1.jpeg gs://folder/image2.gif 123 // gs://folder/image3.png </section><section><h5>Object Detection</h5> One or 124 // more CSV files where each line is a single column: GCS_FILE_PATH The Google 125 // Cloud Storage location of an image of up to 30MB in size. Supported 126 // extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the batch 127 // predict output. Sample rows: gs://folder/image1.jpeg gs://folder/image2.gif 128 // gs://folder/image3.png </section> </div> <h4>AutoML Video Intelligence</h4> 129 // <div class="ds-selector-tabs"><section><h5>Classification</h5> One or more 130 // CSV files where each line is a single column: 131 // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END `GCS_FILE_PATH` is the 132 // Google Cloud Storage location of video up to 50GB in size and up to 3h in 133 // duration duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. 134 // `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the length of the 135 // video, and the end time must be after the start time. Sample rows: 136 // gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 137 // gs://folder/vid2.mov,0,inf </section><section><h5>Object Tracking</h5> One 138 // or more CSV files where each line is a single column: 139 // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END `GCS_FILE_PATH` is the 140 // Google Cloud Storage location of video up to 50GB in size and up to 3h in 141 // duration duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. 142 // `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the length of the 143 // video, and the end time must be after the start time. Sample rows: 144 // gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 145 // gs://folder/vid2.mov,0,inf </section> </div> <h4>AutoML Natural 146 // Language</h4> <div class="ds-selector-tabs"><section><h5>Classification</h5> 147 // One or more CSV files where each line is a single column: GCS_FILE_PATH 148 // `GCS_FILE_PATH` is the Google Cloud Storage location of a text file. 149 // Supported file extensions: .TXT, .PDF, .TIF, .TIFF Text files can be no 150 // larger than 10MB in size. Sample rows: gs://folder/text1.txt 151 // gs://folder/text2.pdf gs://folder/text3.tif </section><section><h5>Sentiment 152 // Analysis</h5> One or more CSV files where each line is a single column: 153 // GCS_FILE_PATH `GCS_FILE_PATH` is the Google Cloud Storage location of a text 154 // file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF Text files can be 155 // no larger than 128kB in size. Sample rows: gs://folder/text1.txt 156 // gs://folder/text2.pdf gs://folder/text3.tif </section><section><h5>Entity 157 // Extraction</h5> One or more JSONL (JSON Lines) files that either provide 158 // inline text or documents. You can only use one format, either inline text or 159 // documents, for a single call to [AutoMl.BatchPredict]. Each JSONL file 160 // contains a per line a proto that wraps a temporary user-assigned TextSnippet 161 // ID (string up to 2000 characters long) called "id", a TextSnippet proto (in 162 // JSON representation) and zero or more TextFeature protos. Any given text 163 // snippet content must have 30,000 characters or less, and also be UTF-8 NFC 164 // encoded (ASCII already is). The IDs provided should be unique. Each document 165 // JSONL file contains, per line, a proto that wraps a Document proto with 166 // `input_config` set. Each document cannot exceed 2MB in size. Supported 167 // document extensions: .PDF, .TIF, .TIFF Each JSONL file must not exceed 100MB 168 // in size, and no more than 20 JSONL files may be passed. Sample inline JSONL 169 // file (Shown with artificial line breaks. Actual line breaks are denoted by 170 // "\n".): { "id": "my_first_id", "text_snippet": { "content": "dog car cat"}, 171 // "text_features": [ { "text_segment": {"start_offset": 4, "end_offset": 6}, 172 // "structural_type": PARAGRAPH, "bounding_poly": { "normalized_vertices": [ 173 // {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, 174 // "y": 0.1}, ] }, } ], }\n { "id": "2", "text_snippet": { "content": "Extended 175 // sample content", "mime_type": "text/plain" } } Sample document JSONL file 176 // (Shown with artificial line breaks. Actual line breaks are denoted by 177 // "\n".): { "document": { "input_config": { "gcs_source": { "input_uris": [ 178 // "gs://folder/document1.pdf" ] } } } }\n { "document": { "input_config": { 179 // "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] } } } } 180 // </section> </div> <h4>AutoML Tables</h4><div 181 // class="ui-datasection-main"><section class="selected"> See [Preparing your 182 // training data](https://cloud.google.com/automl-tables/docs/predict-batch) 183 // for more information. You can use either 184 // [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source] or 185 // [bigquery_source][BatchPredictInputConfig.bigquery_source]. **For 186 // gcs_source:** CSV file(s), each by itself 10GB or smaller and total size 187 // must be 100GB or smaller, where first file must have a header containing 188 // column names. If the first row of a subsequent file is the same as the 189 // header, then it is also treated as a header. All other rows contain values 190 // for the corresponding columns. The column names must contain the model's 191 // [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] 192 // [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] (order 193 // doesn't matter). The columns corresponding to the model's input feature 194 // column specs must contain values compatible with the column spec's data 195 // types. Prediction on all the rows, i.e. the CSV lines, will be attempted. 196 // Sample rows from a CSV file: <pre> "First Name","Last 197 // Name","Dob","Addresses" 198 // "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" 199 // "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} 200 // </pre> **For bigquery_source:** The URI of a BigQuery table. The user data 201 // size of the BigQuery table must be 100GB or smaller. The column names must 202 // contain the model's 203 // [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] 204 // [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] (order 205 // doesn't matter). The columns corresponding to the model's input feature 206 // column specs must contain values compatible with the column spec's data 207 // types. Prediction on all the rows of the table will be attempted. </section> 208 // </div> **Input field definitions:** `GCS_FILE_PATH` : The path to a file on 209 // Google Cloud Storage. For example, "gs://folder/video.avi". 210 // `TIME_SEGMENT_START` : (`TIME_OFFSET`) Expresses a beginning, inclusive, of 211 // a time segment within an example that has a time dimension (e.g. video). 212 // `TIME_SEGMENT_END` : (`TIME_OFFSET`) Expresses an end, exclusive, of a time 213 // segment within n example that has a time dimension (e.g. video). 214 // `TIME_OFFSET` : A number of seconds as measured from the start of an example 215 // (e.g. video). Fractions are allowed, up to a microsecond precision. "inf" is 216 // allowed, and it means the end of the example. **Errors:** If any of the 217 // provided CSV files can't be parsed or if more than certain percent of CSV 218 // rows cannot be processed then the operation fails and prediction does not 219 // happen. Regardless of overall success or failure the per-row failures, up to 220 // a certain count cap, will be listed in Operation.metadata.partial_failures. 221 // 222 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 223 type BatchPredictInputConfig = src.BatchPredictInputConfig 224 type BatchPredictInputConfig_GcsSource = src.BatchPredictInputConfig_GcsSource 225 226 // Details of BatchPredict operation. 227 // 228 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 229 type BatchPredictOperationMetadata = src.BatchPredictOperationMetadata 230 231 // Further describes this batch predict's output. Supplements 232 // [BatchPredictOutputConfig][google.cloud.automl.v1.BatchPredictOutputConfig]. 233 // 234 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 235 type BatchPredictOperationMetadata_BatchPredictOutputInfo = src.BatchPredictOperationMetadata_BatchPredictOutputInfo 236 type BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory = src.BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory 237 238 // Output configuration for BatchPredict Action. As destination the 239 // [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination] 240 // must be set unless specified otherwise for a domain. If gcs_destination is 241 // set then in the given directory a new directory is created. Its name will be 242 // "prediction-<model-display-name>-<timestamp-of-prediction-call>", where 243 // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it 244 // depends on the ML problem the predictions are made for. - For Image 245 // Classification: In the created directory files 246 // `image_classification_1.jsonl`, 247 // `image_classification_2.jsonl`,...,`image_classification_N.jsonl` will be 248 // created, where N may be 1, and depends on the total number of the 249 // successfully predicted images and annotations. A single image will be listed 250 // only once with all its annotations, and its annotations will never be split 251 // across files. Each .JSONL file will contain, per line, a JSON representation 252 // of a proto that wraps image's "ID" : "<id_value>" followed by a list of zero 253 // or more AnnotationPayload protos (called annotations), which have 254 // classification detail populated. If prediction for any image failed 255 // (partially or completely), then an additional `errors_1.jsonl`, 256 // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on 257 // total number of failed predictions). These files will have a JSON 258 // representation of a proto that wraps the same "ID" : "<id_value>" but here 259 // followed by exactly one 260 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 261 // containing only `code` and `message`fields. - For Image Object Detection: In 262 // the created directory files `image_object_detection_1.jsonl`, 263 // `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl` will 264 // be created, where N may be 1, and depends on the total number of the 265 // successfully predicted images and annotations. Each .JSONL file will 266 // contain, per line, a JSON representation of a proto that wraps image's "ID" 267 // : "<id_value>" followed by a list of zero or more AnnotationPayload protos 268 // (called annotations), which have image_object_detection detail populated. A 269 // single image will be listed only once with all its annotations, and its 270 // annotations will never be split across files. If prediction for any image 271 // failed (partially or completely), then additional `errors_1.jsonl`, 272 // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on 273 // total number of failed predictions). These files will have a JSON 274 // representation of a proto that wraps the same "ID" : "<id_value>" but here 275 // followed by exactly one 276 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 277 // containing only `code` and `message`fields. - For Video Classification: In 278 // the created directory a video_classification.csv file, and a .JSON file per 279 // each video classification requested in the input (i.e. each line in given 280 // CSV(s)), will be created. The format of video_classification.csv is: 281 // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS 282 // where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 283 // the prediction input lines (i.e. video_classification.csv has precisely the 284 // same number of lines as the prediction input had.) JSON_FILE_NAME = Name of 285 // .JSON file in the output directory, which contains prediction responses for 286 // the video time segment. STATUS = "OK" if prediction completed successfully, 287 // or an error code with message otherwise. If STATUS is not "OK" then the 288 // .JSON file for that line may not exist or be empty. Each .JSON file, 289 // assuming STATUS is "OK", will contain a list of AnnotationPayload protos in 290 // JSON format, which are the predictions for the video time segment the file 291 // is assigned to in the video_classification.csv. All AnnotationPayload protos 292 // will have video_classification field set, and will be sorted by 293 // video_classification.type field (note that the returned types are governed 294 // by `classifaction_types` parameter in 295 // [PredictService.BatchPredictRequest.params][]). - For Video Object Tracking: 296 // In the created directory a video_object_tracking.csv file will be created, 297 // and multiple files video_object_trackinng_1.json, 298 // video_object_trackinng_2.json,..., video_object_trackinng_N.json, where N is 299 // the number of requests in the input (i.e. the number of lines in given 300 // CSV(s)). The format of video_object_tracking.csv is: 301 // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS 302 // where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 303 // the prediction input lines (i.e. video_object_tracking.csv has precisely the 304 // same number of lines as the prediction input had.) JSON_FILE_NAME = Name of 305 // .JSON file in the output directory, which contains prediction responses for 306 // the video time segment. STATUS = "OK" if prediction completed successfully, 307 // or an error code with message otherwise. If STATUS is not "OK" then the 308 // .JSON file for that line may not exist or be empty. Each .JSON file, 309 // assuming STATUS is "OK", will contain a list of AnnotationPayload protos in 310 // JSON format, which are the predictions for each frame of the video time 311 // segment the file is assigned to in video_object_tracking.csv. All 312 // AnnotationPayload protos will have video_object_tracking field set. - For 313 // Text Classification: In the created directory files 314 // `text_classification_1.jsonl`, 315 // `text_classification_2.jsonl`,...,`text_classification_N.jsonl` will be 316 // created, where N may be 1, and depends on the total number of inputs and 317 // annotations found. Each .JSONL file will contain, per line, a JSON 318 // representation of a proto that wraps input text file (or document) in the 319 // text snippet (or document) proto and a list of zero or more 320 // AnnotationPayload protos (called annotations), which have classification 321 // detail populated. A single text file (or document) will be listed only once 322 // with all its annotations, and its annotations will never be split across 323 // files. If prediction for any input file (or document) failed (partially or 324 // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., 325 // `errors_N.jsonl` files will be created (N depends on total number of failed 326 // predictions). These files will have a JSON representation of a proto that 327 // wraps input file followed by exactly one 328 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 329 // containing only `code` and `message`. - For Text Sentiment: In the created 330 // directory files `text_sentiment_1.jsonl`, 331 // `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl` will be created, where 332 // N may be 1, and depends on the total number of inputs and annotations found. 333 // Each .JSONL file will contain, per line, a JSON representation of a proto 334 // that wraps input text file (or document) in the text snippet (or document) 335 // proto and a list of zero or more AnnotationPayload protos (called 336 // annotations), which have text_sentiment detail populated. A single text file 337 // (or document) will be listed only once with all its annotations, and its 338 // annotations will never be split across files. If prediction for any input 339 // file (or document) failed (partially or completely), then additional 340 // `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` files will be 341 // created (N depends on total number of failed predictions). These files will 342 // have a JSON representation of a proto that wraps input file followed by 343 // exactly one 344 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 345 // containing only `code` and `message`. - For Text Extraction: In the created 346 // directory files `text_extraction_1.jsonl`, 347 // `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl` will be created, 348 // where N may be 1, and depends on the total number of inputs and annotations 349 // found. The contents of these .JSONL file(s) depend on whether the input used 350 // inline text, or documents. If input was inline, then each .JSONL file will 351 // contain, per line, a JSON representation of a proto that wraps given in 352 // request text snippet's "id" (if specified), followed by input text snippet, 353 // and a list of zero or more AnnotationPayload protos (called annotations), 354 // which have text_extraction detail populated. A single text snippet will be 355 // listed only once with all its annotations, and its annotations will never be 356 // split across files. If input used documents, then each .JSONL file will 357 // contain, per line, a JSON representation of a proto that wraps given in 358 // request document proto, followed by its OCR-ed representation in the form of 359 // a text snippet, finally followed by a list of zero or more AnnotationPayload 360 // protos (called annotations), which have text_extraction detail populated and 361 // refer, via their indices, to the OCR-ed text snippet. A single document (and 362 // its text snippet) will be listed only once with all its annotations, and its 363 // annotations will never be split across files. If prediction for any text 364 // snippet failed (partially or completely), then additional `errors_1.jsonl`, 365 // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on 366 // total number of failed predictions). These files will have a JSON 367 // representation of a proto that wraps either the "id" : "<id_value>" (in case 368 // of inline) or the document proto (in case of document) but here followed by 369 // exactly one 370 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 371 // containing only `code` and `message`. - For Tables: Output depends on 372 // whether 373 // [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] 374 // or 375 // [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] 376 // is set (either is allowed). Google Cloud Storage case: In the created 377 // directory files `tables_1.csv`, `tables_2.csv`,..., `tables_N.csv` will be 378 // created, where N may be 1, and depends on the total number of the 379 // successfully predicted rows. For all CLASSIFICATION 380 // [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: 381 // Each .csv file will contain a header, listing all columns' 382 // [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] given 383 // on input followed by M target column names in the format of 384 // "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] 385 // [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>_<target 386 // value>_score" where M is the number of distinct target values, i.e. number 387 // of distinct values in the target column of the table used to train the 388 // model. Subsequent lines will contain the respective values of successfully 389 // predicted rows, with the last, i.e. the target, columns having the 390 // corresponding prediction 391 // [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For 392 // REGRESSION and FORECASTING 393 // [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: 394 // Each .csv file will contain a header, listing all columns' 395 // [display_name-s][google.cloud.automl.v1p1beta.display_name] given on input 396 // followed by the predicted target column with name in the format of 397 // "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] 398 // [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" 399 // Subsequent lines will contain the respective values of successfully 400 // predicted rows, with the last, i.e. the target, column having the predicted 401 // target value. If prediction for any rows failed, then an additional 402 // `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be created (N 403 // depends on total number of failed rows). These files will have analogous 404 // format as `tables_*.csv`, but always with a single target column having 405 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 406 // represented as a JSON string, and containing only `code` and `message`. 407 // BigQuery case: 408 // [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] 409 // pointing to a BigQuery project must be set. In the given project a new 410 // dataset will be created with name 411 // `prediction_<model-display-name>_<timestamp-of-prediction-call>` where 412 // <model-display-name> will be made BigQuery-dataset-name compatible (e.g. 413 // most special characters will become underscores), and timestamp will be in 414 // YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two 415 // tables will be created, `predictions`, and `errors`. The `predictions` 416 // table's column names will be the input columns' 417 // [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] 418 // followed by the target column with name in the format of 419 // "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] 420 // [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" The 421 // input feature columns will contain the respective values of successfully 422 // predicted rows, with the target column having an ARRAY of 423 // [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], 424 // represented as STRUCT-s, containing 425 // [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. The 426 // `errors` table contains rows for which the prediction has failed, it has 427 // analogous input columns while the target column name is in the format of 428 // "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] 429 // [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", and 430 // as a value has 431 // [`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) 432 // represented as a STRUCT, and containing only `code` and `message`. 433 // 434 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 435 type BatchPredictOutputConfig = src.BatchPredictOutputConfig 436 type BatchPredictOutputConfig_GcsDestination = src.BatchPredictOutputConfig_GcsDestination 437 438 // Request message for 439 // [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. 440 // 441 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 442 type BatchPredictRequest = src.BatchPredictRequest 443 444 // Result of the Batch Predict. This message is returned in 445 // [response][google.longrunning.Operation.response] of the operation returned 446 // by the 447 // [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. 448 // 449 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 450 type BatchPredictResult = src.BatchPredictResult 451 452 // Bounding box matching model metrics for a single intersection-over-union 453 // threshold and multiple label match confidence thresholds. 454 // 455 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 456 type BoundingBoxMetricsEntry = src.BoundingBoxMetricsEntry 457 458 // Metrics for a single confidence threshold. 459 // 460 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 461 type BoundingBoxMetricsEntry_ConfidenceMetricsEntry = src.BoundingBoxMetricsEntry_ConfidenceMetricsEntry 462 463 // A bounding polygon of a detected object on a plane. On output both vertices 464 // and normalized_vertices are provided. The polygon is formed by connecting 465 // vertices in the order they are listed. 466 // 467 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 468 type BoundingPoly = src.BoundingPoly 469 470 // Contains annotation details specific to classification. 471 // 472 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 473 type ClassificationAnnotation = src.ClassificationAnnotation 474 475 // Model evaluation metrics for classification problems. Note: For Video 476 // Classification this metrics only describe quality of the Video 477 // Classification predictions of "segment_classification" type. 478 // 479 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 480 type ClassificationEvaluationMetrics = src.ClassificationEvaluationMetrics 481 482 // Metrics for a single confidence threshold. 483 // 484 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 485 type ClassificationEvaluationMetrics_ConfidenceMetricsEntry = src.ClassificationEvaluationMetrics_ConfidenceMetricsEntry 486 487 // Confusion matrix of the model running the classification. 488 // 489 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 490 type ClassificationEvaluationMetrics_ConfusionMatrix = src.ClassificationEvaluationMetrics_ConfusionMatrix 491 492 // Output only. A row in the confusion matrix. 493 // 494 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 495 type ClassificationEvaluationMetrics_ConfusionMatrix_Row = src.ClassificationEvaluationMetrics_ConfusionMatrix_Row 496 497 // Type of the classification problem. 498 // 499 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 500 type ClassificationType = src.ClassificationType 501 502 // Details of CreateDataset operation. 503 // 504 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 505 type CreateDatasetOperationMetadata = src.CreateDatasetOperationMetadata 506 507 // Request message for 508 // [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. 509 // 510 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 511 type CreateDatasetRequest = src.CreateDatasetRequest 512 513 // Details of CreateModel operation. 514 // 515 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 516 type CreateModelOperationMetadata = src.CreateModelOperationMetadata 517 518 // Request message for 519 // [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. 520 // 521 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 522 type CreateModelRequest = src.CreateModelRequest 523 524 // A workspace for solving a single, particular machine learning (ML) problem. 525 // A workspace contains examples that may be annotated. 526 // 527 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 528 type Dataset = src.Dataset 529 type Dataset_ImageClassificationDatasetMetadata = src.Dataset_ImageClassificationDatasetMetadata 530 type Dataset_ImageObjectDetectionDatasetMetadata = src.Dataset_ImageObjectDetectionDatasetMetadata 531 type Dataset_TextClassificationDatasetMetadata = src.Dataset_TextClassificationDatasetMetadata 532 type Dataset_TextExtractionDatasetMetadata = src.Dataset_TextExtractionDatasetMetadata 533 type Dataset_TextSentimentDatasetMetadata = src.Dataset_TextSentimentDatasetMetadata 534 type Dataset_TranslationDatasetMetadata = src.Dataset_TranslationDatasetMetadata 535 536 // Request message for 537 // [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. 538 // 539 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 540 type DeleteDatasetRequest = src.DeleteDatasetRequest 541 542 // Request message for 543 // [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. 544 // 545 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 546 type DeleteModelRequest = src.DeleteModelRequest 547 548 // Details of operations that perform deletes of any entities. 549 // 550 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 551 type DeleteOperationMetadata = src.DeleteOperationMetadata 552 553 // Details of DeployModel operation. 554 // 555 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 556 type DeployModelOperationMetadata = src.DeployModelOperationMetadata 557 558 // Request message for 559 // [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. 560 // 561 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 562 type DeployModelRequest = src.DeployModelRequest 563 type DeployModelRequest_ImageClassificationModelDeploymentMetadata = src.DeployModelRequest_ImageClassificationModelDeploymentMetadata 564 type DeployModelRequest_ImageObjectDetectionModelDeploymentMetadata = src.DeployModelRequest_ImageObjectDetectionModelDeploymentMetadata 565 566 // A structured text document e.g. a PDF. 567 // 568 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 569 type Document = src.Document 570 571 // Message that describes dimension of a document. 572 // 573 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 574 type DocumentDimensions = src.DocumentDimensions 575 576 // Unit of the document dimension. 577 // 578 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 579 type DocumentDimensions_DocumentDimensionUnit = src.DocumentDimensions_DocumentDimensionUnit 580 581 // Input configuration of a [Document][google.cloud.automl.v1.Document]. 582 // 583 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 584 type DocumentInputConfig = src.DocumentInputConfig 585 586 // Describes the layout information of a 587 // [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the 588 // document. 589 // 590 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 591 type Document_Layout = src.Document_Layout 592 593 // The type of TextSegment in the context of the original document. 594 // 595 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 596 type Document_Layout_TextSegmentType = src.Document_Layout_TextSegmentType 597 598 // Example data used for training or prediction. 599 // 600 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 601 type ExamplePayload = src.ExamplePayload 602 type ExamplePayload_Document = src.ExamplePayload_Document 603 type ExamplePayload_Image = src.ExamplePayload_Image 604 type ExamplePayload_TextSnippet = src.ExamplePayload_TextSnippet 605 606 // Details of ExportData operation. 607 // 608 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 609 type ExportDataOperationMetadata = src.ExportDataOperationMetadata 610 611 // Further describes this export data's output. Supplements 612 // [OutputConfig][google.cloud.automl.v1.OutputConfig]. 613 // 614 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 615 type ExportDataOperationMetadata_ExportDataOutputInfo = src.ExportDataOperationMetadata_ExportDataOutputInfo 616 type ExportDataOperationMetadata_ExportDataOutputInfo_GcsOutputDirectory = src.ExportDataOperationMetadata_ExportDataOutputInfo_GcsOutputDirectory 617 618 // Request message for 619 // [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. 620 // 621 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 622 type ExportDataRequest = src.ExportDataRequest 623 624 // Details of ExportModel operation. 625 // 626 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 627 type ExportModelOperationMetadata = src.ExportModelOperationMetadata 628 629 // Further describes the output of model export. Supplements 630 // [ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig]. 631 // 632 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 633 type ExportModelOperationMetadata_ExportModelOutputInfo = src.ExportModelOperationMetadata_ExportModelOutputInfo 634 635 // Request message for 636 // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need 637 // to be enabled for exporting, otherwise an error code will be returned. 638 // 639 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 640 type ExportModelRequest = src.ExportModelRequest 641 642 // The Google Cloud Storage location where the output is to be written to. 643 // 644 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 645 type GcsDestination = src.GcsDestination 646 647 // The Google Cloud Storage location for the input content. 648 // 649 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 650 type GcsSource = src.GcsSource 651 652 // Request message for 653 // [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. 654 // 655 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 656 type GetAnnotationSpecRequest = src.GetAnnotationSpecRequest 657 658 // Request message for 659 // [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. 660 // 661 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 662 type GetDatasetRequest = src.GetDatasetRequest 663 664 // Request message for 665 // [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. 666 // 667 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 668 type GetModelEvaluationRequest = src.GetModelEvaluationRequest 669 670 // Request message for 671 // [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. 672 // 673 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 674 type GetModelRequest = src.GetModelRequest 675 676 // A representation of an image. Only images up to 30MB in size are supported. 677 // 678 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 679 type Image = src.Image 680 681 // Dataset metadata that is specific to image classification. 682 // 683 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 684 type ImageClassificationDatasetMetadata = src.ImageClassificationDatasetMetadata 685 686 // Model deployment metadata specific to Image Classification. 687 // 688 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 689 type ImageClassificationModelDeploymentMetadata = src.ImageClassificationModelDeploymentMetadata 690 691 // Model metadata for image classification. 692 // 693 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 694 type ImageClassificationModelMetadata = src.ImageClassificationModelMetadata 695 696 // Annotation details for image object detection. 697 // 698 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 699 type ImageObjectDetectionAnnotation = src.ImageObjectDetectionAnnotation 700 701 // Dataset metadata specific to image object detection. 702 // 703 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 704 type ImageObjectDetectionDatasetMetadata = src.ImageObjectDetectionDatasetMetadata 705 706 // Model evaluation metrics for image object detection problems. Evaluates 707 // prediction quality of labeled bounding boxes. 708 // 709 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 710 type ImageObjectDetectionEvaluationMetrics = src.ImageObjectDetectionEvaluationMetrics 711 712 // Model deployment metadata specific to Image Object Detection. 713 // 714 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 715 type ImageObjectDetectionModelDeploymentMetadata = src.ImageObjectDetectionModelDeploymentMetadata 716 717 // Model metadata specific to image object detection. 718 // 719 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 720 type ImageObjectDetectionModelMetadata = src.ImageObjectDetectionModelMetadata 721 type Image_ImageBytes = src.Image_ImageBytes 722 723 // Details of ImportData operation. 724 // 725 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 726 type ImportDataOperationMetadata = src.ImportDataOperationMetadata 727 728 // Request message for 729 // [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. 730 // 731 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 732 type ImportDataRequest = src.ImportDataRequest 733 734 // Input configuration for 735 // [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action. The 736 // format of input depends on dataset_metadata the Dataset into which the 737 // import is happening has. As input source the 738 // [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is expected, 739 // unless specified otherwise. Additionally any input .CSV file by itself must 740 // be 100MB or smaller, unless specified otherwise. If an "example" file (that 741 // is, image, video etc.) with identical content (even if it had different 742 // `GCS_FILE_PATH`) is mentioned multiple times, then its label, bounding boxes 743 // etc. are appended. The same file should be always provided with the same 744 // `ML_USE` and `GCS_FILE_PATH`, if it is not, then these values are 745 // nondeterministically selected from the given ones. The formats are 746 // represented in EBNF with commas being literal and with non-terminal symbols 747 // defined near the end of this comment. The formats are: <h4>AutoML 748 // Vision</h4> <div class="ds-selector-tabs"><section><h5>Classification</h5> 749 // See [Preparing your training 750 // data](https://cloud.google.com/vision/automl/docs/prepare) for more 751 // information. CSV file(s) with each line in format: 752 // ML_USE,GCS_FILE_PATH,LABEL,LABEL,... * `ML_USE` - Identifies the data set 753 // that the current row (file) applies to. This value can be one of the 754 // following: * `TRAIN` - Rows in this file are used to train the model. * 755 // `TEST` - Rows in this file are used to test the model during training. * 756 // `UNASSIGNED` - Rows in this file are not categorized. They are Automatically 757 // divided into train and test data. 80% for training and 20% for testing. - 758 // `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to 759 // 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, 760 // .ICO. * `LABEL` - A label that identifies the object in the image. For the 761 // `MULTICLASS` classification type, at most one `LABEL` is allowed per image. 762 // If an image has not yet been labeled, then it should be mentioned just once 763 // with no `LABEL`. Some sample rows: TRAIN,gs://folder/image1.jpg,daisy 764 // TEST,gs://folder/image2.jpg,dandelion,tulip,rose 765 // UNASSIGNED,gs://folder/image3.jpg,daisy UNASSIGNED,gs://folder/image4.jpg 766 // </section><section><h5>Object Detection</h5> See [Preparing your training 767 // data](https://cloud.google.com/vision/automl/object-detection/docs/prepare) 768 // for more information. A CSV file(s) with each line in format: 769 // ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) * `ML_USE` - 770 // Identifies the data set that the current row (file) applies to. This value 771 // can be one of the following: * `TRAIN` - Rows in this file are used to train 772 // the model. * `TEST` - Rows in this file are used to test the model during 773 // training. * `UNASSIGNED` - Rows in this file are not categorized. They are 774 // Automatically divided into train and test data. 80% for training and 20% for 775 // testing. - `GCS_FILE_PATH` - The Google Cloud Storage location of an image 776 // of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image 777 // is assumed to be exhaustively labeled. - `LABEL` - A label that identifies 778 // the object in the image specified by the `BOUNDING_BOX`. - `BOUNDING BOX` - 779 // The vertices of an object in the example image. The minimum allowed 780 // `BOUNDING_BOX` edge length is 0.01, and no more than 500 `BOUNDING_BOX` 781 // instances per image are allowed (one `BOUNDING_BOX` per line). If an image 782 // has no looked for objects then it should be mentioned just once with no 783 // LABEL and the ",,,,,,," in place of the `BOUNDING_BOX`. **Four sample 784 // rows:** TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, 785 // TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, 786 // UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 787 // TEST,gs://folder/im3.png,,,,,,,,, </section> </div> <h4>AutoML Video 788 // Intelligence</h4> <div 789 // class="ds-selector-tabs"><section><h5>Classification</h5> See [Preparing 790 // your training 791 // data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for 792 // more information. CSV file(s) with each line in format: ML_USE,GCS_FILE_PATH 793 // For `ML_USE`, do not use `VALIDATE`. `GCS_FILE_PATH` is the path to another 794 // .csv file that describes training example for a given `ML_USE`, using the 795 // following row format: 796 // GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) Here 797 // `GCS_FILE_PATH` leads to a video of up to 50GB in size and up to 3h 798 // duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. 799 // `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the length of the 800 // video, and the end time must be after the start time. Any segment of a video 801 // which has one or more labels on it, is considered a hard negative for all 802 // other labels. Any segment with no labels on it is considered to be unknown. 803 // If a whole video is unknown, then it should be mentioned just once with ",," 804 // in place of `LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END`. Sample top level 805 // CSV file: TRAIN,gs://folder/train_videos.csv 806 // TEST,gs://folder/test_videos.csv UNASSIGNED,gs://folder/other_videos.csv 807 // Sample rows of a CSV file for a particular ML_USE: 808 // gs://folder/video1.avi,car,120,180.000021 809 // gs://folder/video1.avi,bike,150,180.000021 gs://folder/vid2.avi,car,0,60.5 810 // gs://folder/vid3.avi,,, </section><section><h5>Object Tracking</h5> See 811 // [Preparing your training 812 // data](/video-intelligence/automl/object-tracking/docs/prepare) for more 813 // information. CSV file(s) with each line in format: ML_USE,GCS_FILE_PATH For 814 // `ML_USE`, do not use `VALIDATE`. `GCS_FILE_PATH` is the path to another .csv 815 // file that describes training example for a given `ML_USE`, using the 816 // following row format: 817 // GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or 818 // GCS_FILE_PATH,,,,,,,,,, Here `GCS_FILE_PATH` leads to a video of up to 50GB 819 // in size and up to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, 820 // .AVI. Providing `INSTANCE_ID`s can help to obtain a better model. When a 821 // specific labeled entity leaves the video frame, and shows up afterwards it 822 // is not required, albeit preferable, that the same `INSTANCE_ID` is given to 823 // it. `TIMESTAMP` must be within the length of the video, the `BOUNDING_BOX` 824 // is assumed to be drawn on the closest video's frame to the `TIMESTAMP`. Any 825 // mentioned by the `TIMESTAMP` frame is expected to be exhaustively labeled 826 // and no more than 500 `BOUNDING_BOX`-es per frame are allowed. If a whole 827 // video is unknown, then it should be mentioned just once with ",,,,,,,,,," in 828 // place of `LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`. Sample top level CSV 829 // file: TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv 830 // UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV file for 831 // a particular ML_USE: 832 // gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 833 // gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 834 // gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 835 // gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, 836 // gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, 837 // gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, 838 // gs://folder/video2.avi,,,,,,,,,,, </section> </div> <h4>AutoML Natural 839 // Language</h4> <div class="ds-selector-tabs"><section><h5>Entity 840 // Extraction</h5> See [Preparing your training 841 // data](/natural-language/automl/entity-analysis/docs/prepare) for more 842 // information. One or more CSV file(s) with each line in the following format: 843 // ML_USE,GCS_FILE_PATH * `ML_USE` - Identifies the data set that the current 844 // row (file) applies to. This value can be one of the following: * `TRAIN` - 845 // Rows in this file are used to train the model. * `TEST` - Rows in this file 846 // are used to test the model during training. * `UNASSIGNED` - Rows in this 847 // file are not categorized. They are Automatically divided into train and test 848 // data. 80% for training and 20% for testing.. - `GCS_FILE_PATH` - a 849 // Identifies JSON Lines (.JSONL) file stored in Google Cloud Storage that 850 // contains in-line text in-line as documents for model training. After the 851 // training data set has been determined from the `TRAIN` and `UNASSIGNED` CSV 852 // files, the training data is divided into train and validation data sets. 70% 853 // for training and 30% for validation. For example: 854 // TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl 855 // TEST,gs://folder/file3.jsonl **In-line JSONL files** In-line .JSONL files 856 // contain, per line, a JSON document that wraps a 857 // [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by one 858 // or more [`annotations`][google.cloud.automl.v1.AnnotationPayload] fields, 859 // which have `display_name` and `text_extraction` fields to describe the 860 // entity from the text snippet. Multiple JSON documents can be separated using 861 // line breaks (\n). The supplied text must be annotated exhaustively. For 862 // example, if you include the text "horse", but do not label it as "animal", 863 // then "horse" is assumed to not be an "animal". Any given text snippet 864 // content must have 30,000 characters or less, and also be UTF-8 NFC encoded. 865 // ASCII is accepted as it is UTF-8 NFC encoded. For example: { "text_snippet": 866 // { "content": "dog car cat" }, "annotations": [ { "display_name": "animal", 867 // "text_extraction": { "text_segment": {"start_offset": 0, "end_offset": 2} } 868 // }, { "display_name": "vehicle", "text_extraction": { "text_segment": 869 // {"start_offset": 4, "end_offset": 6} } }, { "display_name": "animal", 870 // "text_extraction": { "text_segment": {"start_offset": 8, "end_offset": 10} } 871 // } ] }\n { "text_snippet": { "content": "This dog is good." }, "annotations": 872 // [ { "display_name": "animal", "text_extraction": { "text_segment": 873 // {"start_offset": 5, "end_offset": 7} } } ] } **JSONL files that reference 874 // documents** .JSONL files contain, per line, a JSON document that wraps a 875 // `input_config` that contains the path to a source document. Multiple JSON 876 // documents can be separated using line breaks (\n). Supported document 877 // extensions: .PDF, .TIF, .TIFF For example: { "document": { "input_config": { 878 // "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] } } } }\n { 879 // "document": { "input_config": { "gcs_source": { "input_uris": [ 880 // "gs://folder/document2.tif" ] } } } } **In-line JSONL files with document 881 // layout information** **Note:** You can only annotate documents using the UI. 882 // The format described below applies to annotated documents exported using the 883 // UI or `exportData`. In-line .JSONL files for documents contain, per line, a 884 // JSON document that wraps a `document` field that provides the textual 885 // content of the document and the layout information. For example: { 886 // "document": { "document_text": { "content": "dog car cat" } "layout": [ { 887 // "text_segment": { "start_offset": 0, "end_offset": 11, }, "page_number": 1, 888 // "bounding_poly": { "normalized_vertices": [ {"x": 0.1, "y": 0.1}, {"x": 0.1, 889 // "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, ], }, 890 // "text_segment_type": TOKEN, } ], "document_dimensions": { "width": 8.27, 891 // "height": 11.69, "unit": INCH, } "page_count": 3, }, "annotations": [ { 892 // "display_name": "animal", "text_extraction": { "text_segment": 893 // {"start_offset": 0, "end_offset": 3} } }, { "display_name": "vehicle", 894 // "text_extraction": { "text_segment": {"start_offset": 4, "end_offset": 7} } 895 // }, { "display_name": "animal", "text_extraction": { "text_segment": 896 // {"start_offset": 8, "end_offset": 11} } }, ], 897 // </section><section><h5>Classification</h5> See [Preparing your training 898 // data](https://cloud.google.com/natural-language/automl/docs/prepare) for 899 // more information. One or more CSV file(s) with each line in the following 900 // format: ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... * `ML_USE` - 901 // Identifies the data set that the current row (file) applies to. This value 902 // can be one of the following: * `TRAIN` - Rows in this file are used to train 903 // the model. * `TEST` - Rows in this file are used to test the model during 904 // training. * `UNASSIGNED` - Rows in this file are not categorized. They are 905 // Automatically divided into train and test data. 80% for training and 20% for 906 // testing. - `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a 907 // pattern. If the column content is a valid Google Cloud Storage file path, 908 // that is, prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, 909 // if the content is enclosed in double quotes (""), it is treated as a 910 // `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a file with 911 // supported extension and UTF-8 encoding, for example, 912 // "gs://folder/content.txt" AutoML imports the file content as a text snippet. 913 // For `TEXT_SNIPPET`, AutoML imports the column content excluding quotes. In 914 // both cases, size of the content must be 10MB or less in size. For zip files, 915 // the size of each file inside the zip must be 10MB or less in size. For the 916 // `MULTICLASS` classification type, at most one `LABEL` is allowed. The 917 // `ML_USE` and `LABEL` columns are optional. Supported file extensions: .TXT, 918 // .PDF, .TIF, .TIFF, .ZIP A maximum of 100 unique labels are allowed per CSV 919 // row. Sample rows: TRAIN,"They have bad food and very 920 // rude",RudeService,BadFood gs://folder/content.txt,SlowService 921 // TEST,gs://folder/document.pdf VALIDATE,gs://folder/text_files.zip,BadFood 922 // </section><section><h5>Sentiment Analysis</h5> See [Preparing your training 923 // data](https://cloud.google.com/natural-language/automl/docs/prepare) for 924 // more information. CSV file(s) with each line in format: ML_USE,(TEXT_SNIPPET 925 // | GCS_FILE_PATH),SENTIMENT * `ML_USE` - Identifies the data set that the 926 // current row (file) applies to. This value can be one of the following: * 927 // `TRAIN` - Rows in this file are used to train the model. * `TEST` - Rows in 928 // this file are used to test the model during training. * `UNASSIGNED` - Rows 929 // in this file are not categorized. They are Automatically divided into train 930 // and test data. 80% for training and 20% for testing. - `TEXT_SNIPPET` and 931 // `GCS_FILE_PATH` are distinguished by a pattern. If the column content is a 932 // valid Google Cloud Storage file path, that is, prefixed by "gs://", it is 933 // treated as a `GCS_FILE_PATH`. Otherwise, if the content is enclosed in 934 // double quotes (""), it is treated as a `TEXT_SNIPPET`. For `GCS_FILE_PATH`, 935 // the path must lead to a file with supported extension and UTF-8 encoding, 936 // for example, "gs://folder/content.txt" AutoML imports the file content as a 937 // text snippet. For `TEXT_SNIPPET`, AutoML imports the column content 938 // excluding quotes. In both cases, size of the content must be 128kB or less 939 // in size. For zip files, the size of each file inside the zip must be 128kB 940 // or less in size. The `ML_USE` and `SENTIMENT` columns are optional. 941 // Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - `SENTIMENT` - An 942 // integer between 0 and Dataset.text_sentiment_dataset_metadata.sentiment_max 943 // (inclusive). Describes the ordinal of the sentiment - higher value means a 944 // more positive sentiment. All the values are completely relative, i.e. 945 // neither 0 needs to mean a negative or neutral sentiment nor sentiment_max 946 // needs to mean a positive one - it is just required that 0 is the least 947 // positive sentiment in the data, and sentiment_max is the most positive one. 948 // The SENTIMENT shouldn't be confused with "score" or "magnitude" from the 949 // previous Natural Language Sentiment Analysis API. All SENTIMENT values 950 // between 0 and sentiment_max must be represented in the imported data. On 951 // prediction the same 0 to sentiment_max range will be used. The difference 952 // between neighboring sentiment values needs not to be uniform, e.g. 1 and 2 953 // may be similar whereas the difference between 2 and 3 may be large. Sample 954 // rows: TRAIN,"@freewrytin this is way too good for your product",2 955 // gs://folder/content.txt,3 TEST,gs://folder/document.pdf 956 // VALIDATE,gs://folder/text_files.zip,2 </section> </div> <h4>AutoML 957 // Tables</h4><div class="ui-datasection-main"><section class="selected"> See 958 // [Preparing your training 959 // data](https://cloud.google.com/automl-tables/docs/prepare) for more 960 // information. You can use either 961 // [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or 962 // [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. All 963 // input is concatenated into a single 964 // [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] 965 // **For gcs_source:** CSV file(s), where the first row of the first file is 966 // the header, containing unique column names. If the first row of a subsequent 967 // file is the same as the header, then it is also treated as a header. All 968 // other rows contain values for the corresponding columns. Each .CSV file by 969 // itself must be 10GB or smaller, and their total size must be 100GB or 970 // smaller. First three sample rows of a CSV file: <pre> "Id","First 971 // Name","Last Name","Dob","Addresses" 972 // "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" 973 // "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} 974 // </pre> **For bigquery_source:** An URI of a BigQuery table. The user data 975 // size of the BigQuery table must be 100GB or smaller. An imported table must 976 // have between 2 and 1,000 columns, inclusive, and between 1000 and 977 // 100,000,000 rows, inclusive. There are at most 5 import data running in 978 // parallel. </section> </div> **Input field definitions:** `ML_USE` : ("TRAIN" 979 // | "VALIDATE" | "TEST" | "UNASSIGNED") Describes how the given example (file) 980 // should be used for model training. "UNASSIGNED" can be used when user has no 981 // preference. `GCS_FILE_PATH` : The path to a file on Google Cloud Storage. 982 // For example, "gs://folder/image1.png". `LABEL` : A display name of an object 983 // on an image, video etc., e.g. "dog". Must be up to 32 characters long and 984 // can consist only of ASCII Latin letters A-Z and a-z, underscores(_), and 985 // ASCII digits 0-9. For each label an AnnotationSpec is created which 986 // display_name becomes the label; AnnotationSpecs are given back in 987 // predictions. `INSTANCE_ID` : A positive integer that identifies a specific 988 // instance of a labeled entity on an example. Used e.g. to track two cars on a 989 // video while being able to tell apart which one is which. `BOUNDING_BOX` : 990 // (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`) A rectangle parallel 991 // to the frame of the example (image, video). If 4 vertices are given they are 992 // connected by edges in the order provided, if 2 are given they are recognized 993 // as diagonally opposite vertices of the rectangle. `VERTEX` : 994 // (`COORDINATE,COORDINATE`) First coordinate is horizontal (x), the second is 995 // vertical (y). `COORDINATE` : A float in 0 to 1 range, relative to total 996 // length of image or video in given dimension. For fractions the leading 997 // non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left. 998 // `TIME_SEGMENT_START` : (`TIME_OFFSET`) Expresses a beginning, inclusive, of 999 // a time segment within an example that has a time dimension (e.g. video). 1000 // `TIME_SEGMENT_END` : (`TIME_OFFSET`) Expresses an end, exclusive, of a time 1001 // segment within n example that has a time dimension (e.g. video). 1002 // `TIME_OFFSET` : A number of seconds as measured from the start of an example 1003 // (e.g. video). Fractions are allowed, up to a microsecond precision. "inf" is 1004 // allowed, and it means the end of the example. `TEXT_SNIPPET` : The content 1005 // of a text snippet, UTF-8 encoded, enclosed within double quotes (""). 1006 // `DOCUMENT` : A field that provides the textual content with document and the 1007 // layout information. **Errors:** If any of the provided CSV files can't be 1008 // parsed or if more than certain percent of CSV rows cannot be processed then 1009 // the operation fails and nothing is imported. Regardless of overall success 1010 // or failure the per-row failures, up to a certain count cap, is listed in 1011 // Operation.metadata.partial_failures. 1012 // 1013 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1014 type InputConfig = src.InputConfig 1015 type InputConfig_GcsSource = src.InputConfig_GcsSource 1016 1017 // Request message for 1018 // [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. 1019 // 1020 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1021 type ListDatasetsRequest = src.ListDatasetsRequest 1022 1023 // Response message for 1024 // [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. 1025 // 1026 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1027 type ListDatasetsResponse = src.ListDatasetsResponse 1028 1029 // Request message for 1030 // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. 1031 // 1032 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1033 type ListModelEvaluationsRequest = src.ListModelEvaluationsRequest 1034 1035 // Response message for 1036 // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. 1037 // 1038 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1039 type ListModelEvaluationsResponse = src.ListModelEvaluationsResponse 1040 1041 // Request message for 1042 // [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. 1043 // 1044 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1045 type ListModelsRequest = src.ListModelsRequest 1046 1047 // Response message for 1048 // [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. 1049 // 1050 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1051 type ListModelsResponse = src.ListModelsResponse 1052 1053 // API proto representing a trained machine learning model. 1054 // 1055 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1056 type Model = src.Model 1057 1058 // Evaluation results of a model. 1059 // 1060 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1061 type ModelEvaluation = src.ModelEvaluation 1062 type ModelEvaluation_ClassificationEvaluationMetrics = src.ModelEvaluation_ClassificationEvaluationMetrics 1063 type ModelEvaluation_ImageObjectDetectionEvaluationMetrics = src.ModelEvaluation_ImageObjectDetectionEvaluationMetrics 1064 type ModelEvaluation_TextExtractionEvaluationMetrics = src.ModelEvaluation_TextExtractionEvaluationMetrics 1065 type ModelEvaluation_TextSentimentEvaluationMetrics = src.ModelEvaluation_TextSentimentEvaluationMetrics 1066 type ModelEvaluation_TranslationEvaluationMetrics = src.ModelEvaluation_TranslationEvaluationMetrics 1067 1068 // Output configuration for ModelExport Action. 1069 // 1070 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1071 type ModelExportOutputConfig = src.ModelExportOutputConfig 1072 type ModelExportOutputConfig_GcsDestination = src.ModelExportOutputConfig_GcsDestination 1073 1074 // Deployment state of the model. 1075 // 1076 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1077 type Model_DeploymentState = src.Model_DeploymentState 1078 type Model_ImageClassificationModelMetadata = src.Model_ImageClassificationModelMetadata 1079 type Model_ImageObjectDetectionModelMetadata = src.Model_ImageObjectDetectionModelMetadata 1080 type Model_TextClassificationModelMetadata = src.Model_TextClassificationModelMetadata 1081 type Model_TextExtractionModelMetadata = src.Model_TextExtractionModelMetadata 1082 type Model_TextSentimentModelMetadata = src.Model_TextSentimentModelMetadata 1083 type Model_TranslationModelMetadata = src.Model_TranslationModelMetadata 1084 1085 // A vertex represents a 2D point in the image. The normalized vertex 1086 // coordinates are between 0 to 1 fractions relative to the original plane 1087 // (image, video). E.g. if the plane (e.g. whole image) would have size 10 x 20 1088 // then a point with normalized coordinates (0.1, 0.3) would be at the position 1089 // (1, 6) on that plane. 1090 // 1091 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1092 type NormalizedVertex = src.NormalizedVertex 1093 1094 // Metadata used across all long running operations returned by AutoML API. 1095 // 1096 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1097 type OperationMetadata = src.OperationMetadata 1098 type OperationMetadata_BatchPredictDetails = src.OperationMetadata_BatchPredictDetails 1099 type OperationMetadata_CreateDatasetDetails = src.OperationMetadata_CreateDatasetDetails 1100 type OperationMetadata_CreateModelDetails = src.OperationMetadata_CreateModelDetails 1101 type OperationMetadata_DeleteDetails = src.OperationMetadata_DeleteDetails 1102 type OperationMetadata_DeployModelDetails = src.OperationMetadata_DeployModelDetails 1103 type OperationMetadata_ExportDataDetails = src.OperationMetadata_ExportDataDetails 1104 type OperationMetadata_ExportModelDetails = src.OperationMetadata_ExportModelDetails 1105 type OperationMetadata_ImportDataDetails = src.OperationMetadata_ImportDataDetails 1106 type OperationMetadata_UndeployModelDetails = src.OperationMetadata_UndeployModelDetails 1107 1108 // - For Translation: CSV file `translation.csv`, with each line in format: 1109 // ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which describes 1110 // examples that have given ML_USE, using the following row format per line: 1111 // TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target language) - For 1112 // Tables: Output depends on whether the dataset was imported from Google Cloud 1113 // Storage or BigQuery. Google Cloud Storage case: 1114 // [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] 1115 // must be set. Exported are CSV file(s) `tables_1.csv`, 1116 // `tables_2.csv`,...,`tables_N.csv` with each having as header line the 1117 // table's column names, and all other lines contain values for the header 1118 // columns. BigQuery case: 1119 // [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] 1120 // pointing to a BigQuery project must be set. In the given project a new 1121 // dataset will be created with name 1122 // `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>` where 1123 // <automl-dataset-display-name> will be made BigQuery-dataset-name compatible 1124 // (e.g. most special characters will become underscores), and timestamp will 1125 // be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that dataset a 1126 // new table called `primary_table` will be created, and filled with precisely 1127 // the same data as this obtained on import. 1128 // 1129 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1130 type OutputConfig = src.OutputConfig 1131 type OutputConfig_GcsDestination = src.OutputConfig_GcsDestination 1132 1133 // Request message for 1134 // [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. 1135 // 1136 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1137 type PredictRequest = src.PredictRequest 1138 1139 // Response message for 1140 // [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. 1141 // 1142 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1143 type PredictResponse = src.PredictResponse 1144 1145 // PredictionServiceClient is the client API for PredictionService service. 1146 // For semantics around ctx use and closing/ending streaming RPCs, please refer 1147 // to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. 1148 // 1149 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1150 type PredictionServiceClient = src.PredictionServiceClient 1151 1152 // PredictionServiceServer is the server API for PredictionService service. 1153 // 1154 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1155 type PredictionServiceServer = src.PredictionServiceServer 1156 1157 // Dataset metadata for classification. 1158 // 1159 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1160 type TextClassificationDatasetMetadata = src.TextClassificationDatasetMetadata 1161 1162 // Model metadata that is specific to text classification. 1163 // 1164 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1165 type TextClassificationModelMetadata = src.TextClassificationModelMetadata 1166 1167 // Annotation for identifying spans of text. 1168 // 1169 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1170 type TextExtractionAnnotation = src.TextExtractionAnnotation 1171 type TextExtractionAnnotation_TextSegment = src.TextExtractionAnnotation_TextSegment 1172 1173 // Dataset metadata that is specific to text extraction 1174 // 1175 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1176 type TextExtractionDatasetMetadata = src.TextExtractionDatasetMetadata 1177 1178 // Model evaluation metrics for text extraction problems. 1179 // 1180 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1181 type TextExtractionEvaluationMetrics = src.TextExtractionEvaluationMetrics 1182 1183 // Metrics for a single confidence threshold. 1184 // 1185 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1186 type TextExtractionEvaluationMetrics_ConfidenceMetricsEntry = src.TextExtractionEvaluationMetrics_ConfidenceMetricsEntry 1187 1188 // Model metadata that is specific to text extraction. 1189 // 1190 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1191 type TextExtractionModelMetadata = src.TextExtractionModelMetadata 1192 1193 // A contiguous part of a text (string), assuming it has an UTF-8 NFC 1194 // encoding. 1195 // 1196 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1197 type TextSegment = src.TextSegment 1198 1199 // Contains annotation details specific to text sentiment. 1200 // 1201 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1202 type TextSentimentAnnotation = src.TextSentimentAnnotation 1203 1204 // Dataset metadata for text sentiment. 1205 // 1206 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1207 type TextSentimentDatasetMetadata = src.TextSentimentDatasetMetadata 1208 1209 // Model evaluation metrics for text sentiment problems. 1210 // 1211 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1212 type TextSentimentEvaluationMetrics = src.TextSentimentEvaluationMetrics 1213 1214 // Model metadata that is specific to text sentiment. 1215 // 1216 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1217 type TextSentimentModelMetadata = src.TextSentimentModelMetadata 1218 1219 // A representation of a text snippet. 1220 // 1221 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1222 type TextSnippet = src.TextSnippet 1223 1224 // Annotation details specific to translation. 1225 // 1226 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1227 type TranslationAnnotation = src.TranslationAnnotation 1228 1229 // Dataset metadata that is specific to translation. 1230 // 1231 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1232 type TranslationDatasetMetadata = src.TranslationDatasetMetadata 1233 1234 // Evaluation metrics for the dataset. 1235 // 1236 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1237 type TranslationEvaluationMetrics = src.TranslationEvaluationMetrics 1238 1239 // Model metadata that is specific to translation. 1240 // 1241 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1242 type TranslationModelMetadata = src.TranslationModelMetadata 1243 1244 // Details of UndeployModel operation. 1245 // 1246 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1247 type UndeployModelOperationMetadata = src.UndeployModelOperationMetadata 1248 1249 // Request message for 1250 // [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. 1251 // 1252 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1253 type UndeployModelRequest = src.UndeployModelRequest 1254 1255 // UnimplementedAutoMlServer can be embedded to have forward compatible 1256 // implementations. 1257 // 1258 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1259 type UnimplementedAutoMlServer = src.UnimplementedAutoMlServer 1260 1261 // UnimplementedPredictionServiceServer can be embedded to have forward 1262 // compatible implementations. 1263 // 1264 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1265 type UnimplementedPredictionServiceServer = src.UnimplementedPredictionServiceServer 1266 1267 // Request message for 1268 // [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] 1269 // 1270 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1271 type UpdateDatasetRequest = src.UpdateDatasetRequest 1272 1273 // Request message for 1274 // [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] 1275 // 1276 // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1/automlpb 1277 type UpdateModelRequest = src.UpdateModelRequest 1278 1279 // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1/automlpb 1280 func NewAutoMlClient(cc grpc.ClientConnInterface) AutoMlClient { return src.NewAutoMlClient(cc) } 1281 1282 // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1/automlpb 1283 func NewPredictionServiceClient(cc grpc.ClientConnInterface) PredictionServiceClient { 1284 return src.NewPredictionServiceClient(cc) 1285 } 1286 1287 // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1/automlpb 1288 func RegisterAutoMlServer(s *grpc.Server, srv AutoMlServer) { src.RegisterAutoMlServer(s, srv) } 1289 1290 // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1/automlpb 1291 func RegisterPredictionServiceServer(s *grpc.Server, srv PredictionServiceServer) { 1292 src.RegisterPredictionServiceServer(s, srv) 1293 } 1294