...

Text file src/google.golang.org/api/speech/v2beta/speech-api.json

Documentation: google.golang.org/api/speech/v2beta

     1{
     2  "auth": {
     3    "oauth2": {
     4      "scopes": {
     5        "https://www.googleapis.com/auth/cloud-platform": {
     6          "description": "View and manage your data across Google Cloud Platform services"
     7        }
     8      }
     9    }
    10  },
    11  "basePath": "",
    12  "baseUrl": "https://speech.googleapis.com/",
    13  "batchPath": "batch",
    14  "canonicalName": "Speech",
    15  "description": "Converts audio to text by applying powerful neural network models.",
    16  "discoveryVersion": "v1",
    17  "documentationLink": "https://cloud.google.com/speech-to-text/docs/quickstart-protocol",
    18  "fullyEncodeReservedExpansion": true,
    19  "icons": {
    20    "x16": "http://www.google.com/images/icons/product/search-16.gif",
    21    "x32": "http://www.google.com/images/icons/product/search-32.gif"
    22  },
    23  "id": "speech:v2beta",
    24  "kind": "discovery#restDescription",
    25  "name": "speech",
    26  "ownerDomain": "google.com",
    27  "ownerName": "Google",
    28  "parameters": {
    29    "$.xgafv": {
    30      "description": "V1 error format.",
    31      "enum": [
    32        "1",
    33        "2"
    34      ],
    35      "enumDescriptions": [
    36        "v1 error format",
    37        "v2 error format"
    38      ],
    39      "location": "query",
    40      "type": "string"
    41    },
    42    "access_token": {
    43      "description": "OAuth access token.",
    44      "location": "query",
    45      "type": "string"
    46    },
    47    "alt": {
    48      "default": "json",
    49      "description": "Data format for response.",
    50      "enum": [
    51        "json",
    52        "media",
    53        "proto"
    54      ],
    55      "enumDescriptions": [
    56        "Responses with Content-Type of application/json",
    57        "Media download with context-dependent Content-Type",
    58        "Responses with Content-Type of application/x-protobuf"
    59      ],
    60      "location": "query",
    61      "type": "string"
    62    },
    63    "callback": {
    64      "description": "JSONP",
    65      "location": "query",
    66      "type": "string"
    67    },
    68    "fields": {
    69      "description": "Selector specifying which fields to include in a partial response.",
    70      "location": "query",
    71      "type": "string"
    72    },
    73    "key": {
    74      "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
    75      "location": "query",
    76      "type": "string"
    77    },
    78    "oauth_token": {
    79      "description": "OAuth 2.0 token for the current user.",
    80      "location": "query",
    81      "type": "string"
    82    },
    83    "prettyPrint": {
    84      "default": "true",
    85      "description": "Returns response with indentations and line breaks.",
    86      "location": "query",
    87      "type": "boolean"
    88    },
    89    "quotaUser": {
    90      "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
    91      "location": "query",
    92      "type": "string"
    93    },
    94    "uploadType": {
    95      "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
    96      "location": "query",
    97      "type": "string"
    98    },
    99    "upload_protocol": {
   100      "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
   101      "location": "query",
   102      "type": "string"
   103    }
   104  },
   105  "protocol": "rest",
   106  "resources": {
   107    "projects": {
   108      "resources": {
   109        "locations": {
   110          "resources": {
   111            "operations": {
   112              "methods": {
   113                "get": {
   114                  "description": "Gets the latest state of a long-running operation.  Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
   115                  "flatPath": "v2beta/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
   116                  "httpMethod": "GET",
   117                  "id": "speech.projects.locations.operations.get",
   118                  "parameterOrder": [
   119                    "name"
   120                  ],
   121                  "parameters": {
   122                    "name": {
   123                      "description": "The name of the operation resource.",
   124                      "location": "path",
   125                      "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
   126                      "required": true,
   127                      "type": "string"
   128                    }
   129                  },
   130                  "path": "v2beta/{+name}",
   131                  "response": {
   132                    "$ref": "Operation"
   133                  },
   134                  "scopes": [
   135                    "https://www.googleapis.com/auth/cloud-platform"
   136                  ]
   137                },
   138                "list": {
   139                  "description": "Lists operations that match the specified filter in the request. If the\nserver doesn't support this method, it returns `UNIMPLEMENTED`.\n\nNOTE: the `name` binding allows API services to override the binding\nto use different resource name schemes, such as `users/*/operations`. To\noverride the binding, API services can add a binding such as\n`\"/v1/{name=users/*}/operations\"` to their service configuration.\nFor backwards compatibility, the default name includes the operations\ncollection id, however overriding users must ensure the name binding\nis the parent resource, without the operations collection id.",
   140                  "flatPath": "v2beta/projects/{projectsId}/locations/{locationsId}/operations",
   141                  "httpMethod": "GET",
   142                  "id": "speech.projects.locations.operations.list",
   143                  "parameterOrder": [
   144                    "name"
   145                  ],
   146                  "parameters": {
   147                    "filter": {
   148                      "description": "The standard list filter.",
   149                      "location": "query",
   150                      "type": "string"
   151                    },
   152                    "name": {
   153                      "description": "The name of the operation's parent resource.",
   154                      "location": "path",
   155                      "pattern": "^projects/[^/]+/locations/[^/]+$",
   156                      "required": true,
   157                      "type": "string"
   158                    },
   159                    "pageSize": {
   160                      "description": "The standard list page size.",
   161                      "format": "int32",
   162                      "location": "query",
   163                      "type": "integer"
   164                    },
   165                    "pageToken": {
   166                      "description": "The standard list page token.",
   167                      "location": "query",
   168                      "type": "string"
   169                    }
   170                  },
   171                  "path": "v2beta/{+name}/operations",
   172                  "response": {
   173                    "$ref": "ListOperationsResponse"
   174                  },
   175                  "scopes": [
   176                    "https://www.googleapis.com/auth/cloud-platform"
   177                  ]
   178                }
   179              }
   180            }
   181          }
   182        }
   183      }
   184    }
   185  },
   186  "revision": "20190918",
   187  "rootUrl": "https://speech.googleapis.com/",
   188  "schemas": {
   189    "ListOperationsResponse": {
   190      "description": "The response message for Operations.ListOperations.",
   191      "id": "ListOperationsResponse",
   192      "properties": {
   193        "nextPageToken": {
   194          "description": "The standard List next-page token.",
   195          "type": "string"
   196        },
   197        "operations": {
   198          "description": "A list of operations that matches the specified filter in the request.",
   199          "items": {
   200            "$ref": "Operation"
   201          },
   202          "type": "array"
   203        }
   204      },
   205      "type": "object"
   206    },
   207    "LongRunningRecognizeMetadata": {
   208      "description": "Describes the progress of a long-running `LongRunningRecognize` call. It is\nincluded in the `metadata` field of the `Operation` returned by the\n`GetOperation` call of the `google::longrunning::Operations` service.",
   209      "id": "LongRunningRecognizeMetadata",
   210      "properties": {
   211        "lastUpdateTime": {
   212          "description": "Output only. Time of the most recent processing update.",
   213          "format": "google-datetime",
   214          "type": "string"
   215        },
   216        "progressPercent": {
   217          "description": "Output only. Approximate percentage of audio processed thus far. Guaranteed to be 100\nwhen the audio is fully processed and the results are available.",
   218          "format": "int32",
   219          "type": "integer"
   220        },
   221        "startTime": {
   222          "description": "Output only. Time when the request was received.",
   223          "format": "google-datetime",
   224          "type": "string"
   225        }
   226      },
   227      "type": "object"
   228    },
   229    "LongRunningRecognizeResponse": {
   230      "description": "The only message returned to the client by the `LongRunningRecognize` method.\nIt contains the result as zero or more sequential SpeechRecognitionResult\nmessages. It is included in the `result.response` field of the `Operation`\nreturned by the `GetOperation` call of the `google::longrunning::Operations`\nservice.",
   231      "id": "LongRunningRecognizeResponse",
   232      "properties": {
   233        "results": {
   234          "description": "Output only. Sequential list of transcription results corresponding to\nsequential portions of audio.",
   235          "items": {
   236            "$ref": "SpeechRecognitionResult"
   237          },
   238          "type": "array"
   239        }
   240      },
   241      "type": "object"
   242    },
   243    "Operation": {
   244      "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.",
   245      "id": "Operation",
   246      "properties": {
   247        "done": {
   248          "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.",
   249          "type": "boolean"
   250        },
   251        "error": {
   252          "$ref": "Status",
   253          "description": "The error result of the operation in case of failure or cancellation."
   254        },
   255        "metadata": {
   256          "additionalProperties": {
   257            "description": "Properties of the object. Contains field @type with type URL.",
   258            "type": "any"
   259          },
   260          "description": "Service-specific metadata associated with the operation.  It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata.  Any method that returns a\nlong-running operation should document the metadata type, if any.",
   261          "type": "object"
   262        },
   263        "name": {
   264          "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.",
   265          "type": "string"
   266        },
   267        "response": {
   268          "additionalProperties": {
   269            "description": "Properties of the object. Contains field @type with type URL.",
   270            "type": "any"
   271          },
   272          "description": "The normal response of the operation in case of success.  If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`.  If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource.  For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name.  For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.",
   273          "type": "object"
   274        }
   275      },
   276      "type": "object"
   277    },
   278    "SpeechRecognitionAlternative": {
   279      "description": "Alternative hypotheses (a.k.a. n-best list).",
   280      "id": "SpeechRecognitionAlternative",
   281      "properties": {
   282        "confidence": {
   283          "description": "Output only. The confidence estimate between 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that the recognized words are\ncorrect. This field is set only for the top alternative of a non-streaming\nresult or, of a streaming result where `is_final=true`.\nThis field is not guaranteed to be accurate and users should not rely on it\nto be always provided.\nThe default of 0.0 is a sentinel value indicating `confidence` was not set.",
   284          "format": "float",
   285          "type": "number"
   286        },
   287        "transcript": {
   288          "description": "Output only. Transcript text representing the words that the user spoke.",
   289          "type": "string"
   290        },
   291        "words": {
   292          "description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
   293          "items": {
   294            "$ref": "WordInfo"
   295          },
   296          "type": "array"
   297        }
   298      },
   299      "type": "object"
   300    },
   301    "SpeechRecognitionResult": {
   302      "description": "A speech recognition result corresponding to a portion of the audio.",
   303      "id": "SpeechRecognitionResult",
   304      "properties": {
   305        "alternatives": {
   306          "description": "Output only. May contain one or more recognition hypotheses (up to the\nmaximum specified in `max_alternatives`).\nThese alternatives are ordered in terms of accuracy, with the top (first)\nalternative being the most probable, as ranked by the recognizer.",
   307          "items": {
   308            "$ref": "SpeechRecognitionAlternative"
   309          },
   310          "type": "array"
   311        },
   312        "channelTag": {
   313          "description": "Output only. For multi-channel audio, this is the channel number corresponding to the\nrecognized result for the audio from that channel.\nFor `audio_channel_count` = N, its output values can range from `1` to `N`.",
   314          "format": "int32",
   315          "type": "integer"
   316        },
   317        "languageCode": {
   318          "description": "Output only. The\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the\nlanguage in this result. This language code was detected to have the most\nlikelihood of being spoken in the audio.",
   319          "type": "string"
   320        }
   321      },
   322      "type": "object"
   323    },
   324    "Status": {
   325      "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).",
   326      "id": "Status",
   327      "properties": {
   328        "code": {
   329          "description": "The status code, which should be an enum value of google.rpc.Code.",
   330          "format": "int32",
   331          "type": "integer"
   332        },
   333        "details": {
   334          "description": "A list of messages that carry the error details.  There is a common set of\nmessage types for APIs to use.",
   335          "items": {
   336            "additionalProperties": {
   337              "description": "Properties of the object. Contains field @type with type URL.",
   338              "type": "any"
   339            },
   340            "type": "object"
   341          },
   342          "type": "array"
   343        },
   344        "message": {
   345          "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.",
   346          "type": "string"
   347        }
   348      },
   349      "type": "object"
   350    },
   351    "WordInfo": {
   352      "description": "Word-specific information for recognized words.",
   353      "id": "WordInfo",
   354      "properties": {
   355        "confidence": {
   356          "description": "Output only. The confidence estimate between 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that the recognized words are\ncorrect. This field is set only for the top alternative of a non-streaming\nresult or, of a streaming result where `is_final=true`.\nThis field is not guaranteed to be accurate and users should not rely on it\nto be always provided.\nThe default of 0.0 is a sentinel value indicating `confidence` was not set.",
   357          "format": "float",
   358          "type": "number"
   359        },
   360        "endOffset": {
   361          "description": "Output only. Time offset relative to the beginning of the audio,\nand corresponding to the end of the spoken word.\nThis field is only set if `enable_word_time_offsets=true` and only\nin the top hypothesis.\nThis is an experimental feature and the accuracy of the time offset can\nvary.",
   362          "format": "google-duration",
   363          "type": "string"
   364        },
   365        "speakerTag": {
   366          "description": "Output only. A distinct integer value is assigned for every speaker within\nthe audio. This field specifies which one of those speakers was detected to\nhave spoken this word. Value ranges from `1` to\n`diarization_speaker_count`. speaker_tag is set if\n`enable_speaker_diarization` = `true` and only in the top alternative.",
   367          "format": "int32",
   368          "type": "integer"
   369        },
   370        "startOffset": {
   371          "description": "Output only. Time offset relative to the beginning of the audio,\nand corresponding to the start of the spoken word.\nThis field is only set if `enable_word_time_offsets=true` and only\nin the top hypothesis.\nThis is an experimental feature and the accuracy of the time offset can\nvary.",
   372          "format": "google-duration",
   373          "type": "string"
   374        },
   375        "word": {
   376          "description": "Output only. The word corresponding to this set of information.",
   377          "type": "string"
   378        }
   379      },
   380      "type": "object"
   381    }
   382  },
   383  "servicePath": "",
   384  "title": "Cloud Speech-to-Text API",
   385  "version": "v2beta",
   386  "version_module": true
   387}

View as plain text