// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by aliasgen. DO NOT EDIT. // Package speech aliases all exported identifiers in package // "cloud.google.com/go/speech/apiv1/speechpb". // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb. // Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md // for more details. package speech import ( src "cloud.google.com/go/speech/apiv1/speechpb" grpc "google.golang.org/grpc" ) // Deprecated: Please use consts in: cloud.google.com/go/speech/apiv1/speechpb const ( RecognitionConfig_AMR = src.RecognitionConfig_AMR RecognitionConfig_AMR_WB = src.RecognitionConfig_AMR_WB RecognitionConfig_ENCODING_UNSPECIFIED = src.RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_FLAC = src.RecognitionConfig_FLAC RecognitionConfig_LINEAR16 = src.RecognitionConfig_LINEAR16 RecognitionConfig_MULAW = src.RecognitionConfig_MULAW RecognitionConfig_OGG_OPUS = src.RecognitionConfig_OGG_OPUS RecognitionConfig_SPEEX_WITH_HEADER_BYTE = src.RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_WEBM_OPUS = src.RecognitionConfig_WEBM_OPUS RecognitionMetadata_AUDIO = src.RecognitionMetadata_AUDIO RecognitionMetadata_DICTATION = src.RecognitionMetadata_DICTATION RecognitionMetadata_DISCUSSION = src.RecognitionMetadata_DISCUSSION RecognitionMetadata_FARFIELD = src.RecognitionMetadata_FARFIELD RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED = src.RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED = src.RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MIDFIELD = src.RecognitionMetadata_MIDFIELD RecognitionMetadata_NEARFIELD = src.RecognitionMetadata_NEARFIELD RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED = src.RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OTHER_INDOOR_DEVICE = src.RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_OTHER_OUTDOOR_DEVICE = src.RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_PC = src.RecognitionMetadata_PC RecognitionMetadata_PHONE_CALL = src.RecognitionMetadata_PHONE_CALL RecognitionMetadata_PHONE_LINE = src.RecognitionMetadata_PHONE_LINE RecognitionMetadata_PRESENTATION = src.RecognitionMetadata_PRESENTATION RecognitionMetadata_PROFESSIONALLY_PRODUCED = src.RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED = src.RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_SMARTPHONE = src.RecognitionMetadata_SMARTPHONE RecognitionMetadata_VEHICLE = src.RecognitionMetadata_VEHICLE RecognitionMetadata_VIDEO = src.RecognitionMetadata_VIDEO RecognitionMetadata_VOICEMAIL = src.RecognitionMetadata_VOICEMAIL RecognitionMetadata_VOICE_COMMAND = src.RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_VOICE_SEARCH = src.RecognitionMetadata_VOICE_SEARCH StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE = src.StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED = src.StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED ) // Deprecated: Please use vars in: cloud.google.com/go/speech/apiv1/speechpb var ( File_google_cloud_speech_v1_cloud_speech_adaptation_proto = src.File_google_cloud_speech_v1_cloud_speech_adaptation_proto File_google_cloud_speech_v1_cloud_speech_proto = src.File_google_cloud_speech_v1_cloud_speech_proto File_google_cloud_speech_v1_resource_proto = src.File_google_cloud_speech_v1_resource_proto RecognitionConfig_AudioEncoding_name = src.RecognitionConfig_AudioEncoding_name RecognitionConfig_AudioEncoding_value = src.RecognitionConfig_AudioEncoding_value RecognitionMetadata_InteractionType_name = src.RecognitionMetadata_InteractionType_name RecognitionMetadata_InteractionType_value = src.RecognitionMetadata_InteractionType_value RecognitionMetadata_MicrophoneDistance_name = src.RecognitionMetadata_MicrophoneDistance_name RecognitionMetadata_MicrophoneDistance_value = src.RecognitionMetadata_MicrophoneDistance_value RecognitionMetadata_OriginalMediaType_name = src.RecognitionMetadata_OriginalMediaType_name RecognitionMetadata_OriginalMediaType_value = src.RecognitionMetadata_OriginalMediaType_value RecognitionMetadata_RecordingDeviceType_name = src.RecognitionMetadata_RecordingDeviceType_name RecognitionMetadata_RecordingDeviceType_value = src.RecognitionMetadata_RecordingDeviceType_value StreamingRecognizeResponse_SpeechEventType_name = src.StreamingRecognizeResponse_SpeechEventType_name StreamingRecognizeResponse_SpeechEventType_value = src.StreamingRecognizeResponse_SpeechEventType_value ) // AdaptationClient is the client API for Adaptation service. For semantics // around ctx use and closing/ending streaming RPCs, please refer to // https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type AdaptationClient = src.AdaptationClient // AdaptationServer is the server API for Adaptation service. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type AdaptationServer = src.AdaptationServer // Message sent by the client for the `CreateCustomClass` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type CreateCustomClassRequest = src.CreateCustomClassRequest // Message sent by the client for the `CreatePhraseSet` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type CreatePhraseSetRequest = src.CreatePhraseSetRequest // A set of words or phrases that represents a common concept likely to appear // in your audio, for example a list of passenger ship names. CustomClass items // can be substituted into placeholders that you set in PhraseSet phrases. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type CustomClass = src.CustomClass // An item of the class. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type CustomClass_ClassItem = src.CustomClass_ClassItem // Message sent by the client for the `DeleteCustomClass` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type DeleteCustomClassRequest = src.DeleteCustomClassRequest // Message sent by the client for the `DeletePhraseSet` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type DeletePhraseSetRequest = src.DeletePhraseSetRequest // Message sent by the client for the `GetCustomClass` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type GetCustomClassRequest = src.GetCustomClassRequest // Message sent by the client for the `GetPhraseSet` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type GetPhraseSetRequest = src.GetPhraseSetRequest // Message sent by the client for the `ListCustomClasses` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type ListCustomClassesRequest = src.ListCustomClassesRequest // Message returned to the client by the `ListCustomClasses` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type ListCustomClassesResponse = src.ListCustomClassesResponse // Message sent by the client for the `ListPhraseSet` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type ListPhraseSetRequest = src.ListPhraseSetRequest // Message returned to the client by the `ListPhraseSet` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type ListPhraseSetResponse = src.ListPhraseSetResponse // Describes the progress of a long-running `LongRunningRecognize` call. It is // included in the `metadata` field of the `Operation` returned by the // `GetOperation` call of the `google::longrunning::Operations` service. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type LongRunningRecognizeMetadata = src.LongRunningRecognizeMetadata // The top-level message sent by the client for the `LongRunningRecognize` // method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type LongRunningRecognizeRequest = src.LongRunningRecognizeRequest // The only message returned to the client by the `LongRunningRecognize` // method. It contains the result as zero or more sequential // `SpeechRecognitionResult` messages. It is included in the `result.response` // field of the `Operation` returned by the `GetOperation` call of the // `google::longrunning::Operations` service. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type LongRunningRecognizeResponse = src.LongRunningRecognizeResponse // Provides "hints" to the speech recognizer to favor specific words and // phrases in the results. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type PhraseSet = src.PhraseSet // A phrases containing words and phrase "hints" so that the speech // recognition is more likely to recognize them. This can be used to improve // the accuracy for specific words and phrases, for example, if specific // commands are typically spoken by the user. This can also be used to add // additional words to the vocabulary of the recognizer. See [usage // limits](https://cloud.google.com/speech-to-text/quotas#content). List items // can also include pre-built or custom classes containing groups of words that // represent common concepts that occur in natural language. For example, // rather than providing a phrase hint for every month of the year (e.g. "i was // born in january", "i was born in febuary", ...), use the pre-built `$MONTH` // class improves the likelihood of correctly transcribing audio that includes // months (e.g. "i was born in $month"). To refer to pre-built classes, use the // class' symbol prepended with `$` e.g. `$MONTH`. To refer to custom classes // that were defined inline in the request, set the class's `custom_class_id` // to a string unique to all class resources and inline classes. Then use the // class' id wrapped in $`{...}` e.g. "${my-months}". To refer to custom // classes resources, use the class' id wrapped in `${}` (e.g. `${my-months}`). // Speech-to-Text supports three locations: `global`, `us` (US North America), // and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, // use the `global` location. To specify a region, use a [regional // endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` // location value. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type PhraseSet_Phrase = src.PhraseSet_Phrase // Contains audio data in the encoding specified in the `RecognitionConfig`. // Either `content` or `uri` must be supplied. Supplying both or neither // returns // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See // [content limits](https://cloud.google.com/speech-to-text/quotas#content). // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionAudio = src.RecognitionAudio type RecognitionAudio_Content = src.RecognitionAudio_Content type RecognitionAudio_Uri = src.RecognitionAudio_Uri // Provides information to the recognizer that specifies how to process the // request. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionConfig = src.RecognitionConfig // The encoding of the audio data sent in the request. All encodings support // only 1 channel (mono) audio, unless the `audio_channel_count` and // `enable_separate_recognition_per_channel` fields are set. For best results, // the audio source should be captured and transmitted using a lossless // encoding (`FLAC` or `LINEAR16`). The accuracy of the speech recognition can // be reduced if lossy codecs are used to capture or transmit audio, // particularly if background noise is present. Lossy codecs include `MULAW`, // `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`, and // `WEBM_OPUS`. The `FLAC` and `WAV` audio file formats include a header that // describes the included audio content. You can request recognition for `WAV` // files that contain either `LINEAR16` or `MULAW` encoded audio. If you send // `FLAC` or `WAV` audio file format in your request, you do not need to // specify an `AudioEncoding`; the audio encoding format is determined from the // file header. If you specify an `AudioEncoding` when you send send `FLAC` or // `WAV` audio, the encoding configuration must match the encoding described in // the audio header; otherwise the request returns an // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error // code. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionConfig_AudioEncoding = src.RecognitionConfig_AudioEncoding // Description of audio data to be recognized. Deprecated: Do not use. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionMetadata = src.RecognitionMetadata // Use case categories that the audio recognition request can be described by. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionMetadata_InteractionType = src.RecognitionMetadata_InteractionType // Enumerates the types of capture settings describing an audio file. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionMetadata_MicrophoneDistance = src.RecognitionMetadata_MicrophoneDistance // The original media the speech was recorded on. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionMetadata_OriginalMediaType = src.RecognitionMetadata_OriginalMediaType // The type of device the speech was recorded with. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognitionMetadata_RecordingDeviceType = src.RecognitionMetadata_RecordingDeviceType // The top-level message sent by the client for the `Recognize` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognizeRequest = src.RecognizeRequest // The only message returned to the client by the `Recognize` method. It // contains the result as zero or more sequential `SpeechRecognitionResult` // messages. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type RecognizeResponse = src.RecognizeResponse // Config to enable speaker diarization. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeakerDiarizationConfig = src.SpeakerDiarizationConfig // Speech adaptation configuration. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeechAdaptation = src.SpeechAdaptation // SpeechClient is the client API for Speech service. For semantics around ctx // use and closing/ending streaming RPCs, please refer to // https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeechClient = src.SpeechClient // Provides "hints" to the speech recognizer to favor specific words and // phrases in the results. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeechContext = src.SpeechContext // Alternative hypotheses (a.k.a. n-best list). // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeechRecognitionAlternative = src.SpeechRecognitionAlternative // A speech recognition result corresponding to a portion of the audio. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeechRecognitionResult = src.SpeechRecognitionResult // SpeechServer is the server API for Speech service. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type SpeechServer = src.SpeechServer type Speech_StreamingRecognizeClient = src.Speech_StreamingRecognizeClient type Speech_StreamingRecognizeServer = src.Speech_StreamingRecognizeServer // Provides information to the recognizer that specifies how to process the // request. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type StreamingRecognitionConfig = src.StreamingRecognitionConfig // A streaming speech recognition result corresponding to a portion of the // audio that is currently being processed. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type StreamingRecognitionResult = src.StreamingRecognitionResult // The top-level message sent by the client for the `StreamingRecognize` // method. Multiple `StreamingRecognizeRequest` messages are sent. The first // message must contain a `streaming_config` message and must not contain // `audio_content`. All subsequent messages must contain `audio_content` and // must not contain a `streaming_config` message. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type StreamingRecognizeRequest = src.StreamingRecognizeRequest type StreamingRecognizeRequest_AudioContent = src.StreamingRecognizeRequest_AudioContent type StreamingRecognizeRequest_StreamingConfig = src.StreamingRecognizeRequest_StreamingConfig // `StreamingRecognizeResponse` is the only message returned to the client by // `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse` // messages are streamed back to the client. If there is no recognizable audio, // and `single_utterance` is set to false, then no messages are streamed back // to the client. Here's an example of a series of // `StreamingRecognizeResponse`s that might be returned while processing audio: // 1. results { alternatives { transcript: "tube" } stability: 0.01 } 2. // results { alternatives { transcript: "to be a" } stability: 0.01 } 3. // results { alternatives { transcript: "to be" } stability: 0.9 } results { // alternatives { transcript: " or not to be" } stability: 0.01 } 4. results { // alternatives { transcript: "to be or not to be" confidence: 0.92 } // alternatives { transcript: "to bee or not to bee" } is_final: true } 5. // results { alternatives { transcript: " that's" } stability: 0.01 } 6. // results { alternatives { transcript: " that is" } stability: 0.9 } results { // alternatives { transcript: " the question" } stability: 0.01 } 7. results { // alternatives { transcript: " that is the question" confidence: 0.98 } // alternatives { transcript: " that was the question" } is_final: true } // Notes: - Only two of the above responses #4 and #7 contain final results; // they are indicated by `is_final: true`. Concatenating these together // generates the full transcript: "to be or not to be that is the question". - // The others contain interim `results`. #3 and #6 contain two interim // `results`: the first portion has a high stability and is less likely to // change; the second portion has a low stability and is very likely to change. // A UI designer might choose to show only high stability `results`. - The // specific `stability` and `confidence` values shown above are only for // illustrative purposes. Actual values may vary. - In each response, only one // of these fields will be set: `error`, `speech_event_type`, or one or more // (repeated) `results`. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type StreamingRecognizeResponse = src.StreamingRecognizeResponse // Indicates the type of speech event. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type StreamingRecognizeResponse_SpeechEventType = src.StreamingRecognizeResponse_SpeechEventType // Specifies an optional destination for the recognition results. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type TranscriptOutputConfig = src.TranscriptOutputConfig type TranscriptOutputConfig_GcsUri = src.TranscriptOutputConfig_GcsUri // UnimplementedAdaptationServer can be embedded to have forward compatible // implementations. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type UnimplementedAdaptationServer = src.UnimplementedAdaptationServer // UnimplementedSpeechServer can be embedded to have forward compatible // implementations. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type UnimplementedSpeechServer = src.UnimplementedSpeechServer // Message sent by the client for the `UpdateCustomClass` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type UpdateCustomClassRequest = src.UpdateCustomClassRequest // Message sent by the client for the `UpdatePhraseSet` method. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type UpdatePhraseSetRequest = src.UpdatePhraseSetRequest // Word-specific information for recognized words. // // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1/speechpb type WordInfo = src.WordInfo // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1/speechpb func NewAdaptationClient(cc grpc.ClientConnInterface) AdaptationClient { return src.NewAdaptationClient(cc) } // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1/speechpb func NewSpeechClient(cc grpc.ClientConnInterface) SpeechClient { return src.NewSpeechClient(cc) } // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1/speechpb func RegisterAdaptationServer(s *grpc.Server, srv AdaptationServer) { src.RegisterAdaptationServer(s, srv) } // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1/speechpb func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) { src.RegisterSpeechServer(s, srv) }