...

Source file src/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/alias.go

Documentation: google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1

     1  // Copyright 2022 Google LLC
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by aliasgen. DO NOT EDIT.
    16  
    17  // Package speech aliases all exported identifiers in package
    18  // "cloud.google.com/go/speech/apiv1p1beta1/speechpb".
    19  //
    20  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb.
    21  // Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md
    22  // for more details.
    23  package speech
    24  
    25  import (
    26  	src "cloud.google.com/go/speech/apiv1p1beta1/speechpb"
    27  	grpc "google.golang.org/grpc"
    28  )
    29  
    30  // Deprecated: Please use consts in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
    31  const (
    32  	RecognitionConfig_AMR                                 = src.RecognitionConfig_AMR
    33  	RecognitionConfig_AMR_WB                              = src.RecognitionConfig_AMR_WB
    34  	RecognitionConfig_ENCODING_UNSPECIFIED                = src.RecognitionConfig_ENCODING_UNSPECIFIED
    35  	RecognitionConfig_FLAC                                = src.RecognitionConfig_FLAC
    36  	RecognitionConfig_LINEAR16                            = src.RecognitionConfig_LINEAR16
    37  	RecognitionConfig_MP3                                 = src.RecognitionConfig_MP3
    38  	RecognitionConfig_MULAW                               = src.RecognitionConfig_MULAW
    39  	RecognitionConfig_OGG_OPUS                            = src.RecognitionConfig_OGG_OPUS
    40  	RecognitionConfig_SPEEX_WITH_HEADER_BYTE              = src.RecognitionConfig_SPEEX_WITH_HEADER_BYTE
    41  	RecognitionConfig_WEBM_OPUS                           = src.RecognitionConfig_WEBM_OPUS
    42  	RecognitionMetadata_AUDIO                             = src.RecognitionMetadata_AUDIO
    43  	RecognitionMetadata_DICTATION                         = src.RecognitionMetadata_DICTATION
    44  	RecognitionMetadata_DISCUSSION                        = src.RecognitionMetadata_DISCUSSION
    45  	RecognitionMetadata_FARFIELD                          = src.RecognitionMetadata_FARFIELD
    46  	RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED      = src.RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED
    47  	RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED   = src.RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED
    48  	RecognitionMetadata_MIDFIELD                          = src.RecognitionMetadata_MIDFIELD
    49  	RecognitionMetadata_NEARFIELD                         = src.RecognitionMetadata_NEARFIELD
    50  	RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED   = src.RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED
    51  	RecognitionMetadata_OTHER_INDOOR_DEVICE               = src.RecognitionMetadata_OTHER_INDOOR_DEVICE
    52  	RecognitionMetadata_OTHER_OUTDOOR_DEVICE              = src.RecognitionMetadata_OTHER_OUTDOOR_DEVICE
    53  	RecognitionMetadata_PC                                = src.RecognitionMetadata_PC
    54  	RecognitionMetadata_PHONE_CALL                        = src.RecognitionMetadata_PHONE_CALL
    55  	RecognitionMetadata_PHONE_LINE                        = src.RecognitionMetadata_PHONE_LINE
    56  	RecognitionMetadata_PRESENTATION                      = src.RecognitionMetadata_PRESENTATION
    57  	RecognitionMetadata_PROFESSIONALLY_PRODUCED           = src.RecognitionMetadata_PROFESSIONALLY_PRODUCED
    58  	RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED = src.RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED
    59  	RecognitionMetadata_SMARTPHONE                        = src.RecognitionMetadata_SMARTPHONE
    60  	RecognitionMetadata_VEHICLE                           = src.RecognitionMetadata_VEHICLE
    61  	RecognitionMetadata_VIDEO                             = src.RecognitionMetadata_VIDEO
    62  	RecognitionMetadata_VOICEMAIL                         = src.RecognitionMetadata_VOICEMAIL
    63  	RecognitionMetadata_VOICE_COMMAND                     = src.RecognitionMetadata_VOICE_COMMAND
    64  	RecognitionMetadata_VOICE_SEARCH                      = src.RecognitionMetadata_VOICE_SEARCH
    65  	StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE    = src.StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE
    66  	StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED   = src.StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED
    67  )
    68  
    69  // Deprecated: Please use vars in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
    70  var (
    71  	File_google_cloud_speech_v1p1beta1_cloud_speech_adaptation_proto = src.File_google_cloud_speech_v1p1beta1_cloud_speech_adaptation_proto
    72  	File_google_cloud_speech_v1p1beta1_cloud_speech_proto            = src.File_google_cloud_speech_v1p1beta1_cloud_speech_proto
    73  	File_google_cloud_speech_v1p1beta1_resource_proto                = src.File_google_cloud_speech_v1p1beta1_resource_proto
    74  	RecognitionConfig_AudioEncoding_name                             = src.RecognitionConfig_AudioEncoding_name
    75  	RecognitionConfig_AudioEncoding_value                            = src.RecognitionConfig_AudioEncoding_value
    76  	RecognitionMetadata_InteractionType_name                         = src.RecognitionMetadata_InteractionType_name
    77  	RecognitionMetadata_InteractionType_value                        = src.RecognitionMetadata_InteractionType_value
    78  	RecognitionMetadata_MicrophoneDistance_name                      = src.RecognitionMetadata_MicrophoneDistance_name
    79  	RecognitionMetadata_MicrophoneDistance_value                     = src.RecognitionMetadata_MicrophoneDistance_value
    80  	RecognitionMetadata_OriginalMediaType_name                       = src.RecognitionMetadata_OriginalMediaType_name
    81  	RecognitionMetadata_OriginalMediaType_value                      = src.RecognitionMetadata_OriginalMediaType_value
    82  	RecognitionMetadata_RecordingDeviceType_name                     = src.RecognitionMetadata_RecordingDeviceType_name
    83  	RecognitionMetadata_RecordingDeviceType_value                    = src.RecognitionMetadata_RecordingDeviceType_value
    84  	StreamingRecognizeResponse_SpeechEventType_name                  = src.StreamingRecognizeResponse_SpeechEventType_name
    85  	StreamingRecognizeResponse_SpeechEventType_value                 = src.StreamingRecognizeResponse_SpeechEventType_value
    86  )
    87  
    88  // AdaptationClient is the client API for Adaptation service. For semantics
    89  // around ctx use and closing/ending streaming RPCs, please refer to
    90  // https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
    91  //
    92  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
    93  type AdaptationClient = src.AdaptationClient
    94  
    95  // AdaptationServer is the server API for Adaptation service.
    96  //
    97  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
    98  type AdaptationServer = src.AdaptationServer
    99  
   100  // Message sent by the client for the `CreateCustomClass` method.
   101  //
   102  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   103  type CreateCustomClassRequest = src.CreateCustomClassRequest
   104  
   105  // Message sent by the client for the `CreatePhraseSet` method.
   106  //
   107  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   108  type CreatePhraseSetRequest = src.CreatePhraseSetRequest
   109  
   110  // A set of words or phrases that represents a common concept likely to appear
   111  // in your audio, for example a list of passenger ship names. CustomClass items
   112  // can be substituted into placeholders that you set in PhraseSet phrases.
   113  //
   114  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   115  type CustomClass = src.CustomClass
   116  
   117  // An item of the class.
   118  //
   119  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   120  type CustomClass_ClassItem = src.CustomClass_ClassItem
   121  
   122  // Message sent by the client for the `DeleteCustomClass` method.
   123  //
   124  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   125  type DeleteCustomClassRequest = src.DeleteCustomClassRequest
   126  
   127  // Message sent by the client for the `DeletePhraseSet` method.
   128  //
   129  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   130  type DeletePhraseSetRequest = src.DeletePhraseSetRequest
   131  
   132  // Message sent by the client for the `GetCustomClass` method.
   133  //
   134  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   135  type GetCustomClassRequest = src.GetCustomClassRequest
   136  
   137  // Message sent by the client for the `GetPhraseSet` method.
   138  //
   139  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   140  type GetPhraseSetRequest = src.GetPhraseSetRequest
   141  
   142  // Message sent by the client for the `ListCustomClasses` method.
   143  //
   144  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   145  type ListCustomClassesRequest = src.ListCustomClassesRequest
   146  
   147  // Message returned to the client by the `ListCustomClasses` method.
   148  //
   149  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   150  type ListCustomClassesResponse = src.ListCustomClassesResponse
   151  
   152  // Message sent by the client for the `ListPhraseSet` method.
   153  //
   154  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   155  type ListPhraseSetRequest = src.ListPhraseSetRequest
   156  
   157  // Message returned to the client by the `ListPhraseSet` method.
   158  //
   159  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   160  type ListPhraseSetResponse = src.ListPhraseSetResponse
   161  
   162  // Describes the progress of a long-running `LongRunningRecognize` call. It is
   163  // included in the `metadata` field of the `Operation` returned by the
   164  // `GetOperation` call of the `google::longrunning::Operations` service.
   165  //
   166  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   167  type LongRunningRecognizeMetadata = src.LongRunningRecognizeMetadata
   168  
   169  // The top-level message sent by the client for the `LongRunningRecognize`
   170  // method.
   171  //
   172  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   173  type LongRunningRecognizeRequest = src.LongRunningRecognizeRequest
   174  
   175  // The only message returned to the client by the `LongRunningRecognize`
   176  // method. It contains the result as zero or more sequential
   177  // `SpeechRecognitionResult` messages. It is included in the `result.response`
   178  // field of the `Operation` returned by the `GetOperation` call of the
   179  // `google::longrunning::Operations` service.
   180  //
   181  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   182  type LongRunningRecognizeResponse = src.LongRunningRecognizeResponse
   183  
   184  // Provides "hints" to the speech recognizer to favor specific words and
   185  // phrases in the results.
   186  //
   187  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   188  type PhraseSet = src.PhraseSet
   189  
   190  // A phrases containing words and phrase "hints" so that the speech
   191  // recognition is more likely to recognize them. This can be used to improve
   192  // the accuracy for specific words and phrases, for example, if specific
   193  // commands are typically spoken by the user. This can also be used to add
   194  // additional words to the vocabulary of the recognizer. See [usage
   195  // limits](https://cloud.google.com/speech-to-text/quotas#content). List items
   196  // can also include pre-built or custom classes containing groups of words that
   197  // represent common concepts that occur in natural language. For example,
   198  // rather than providing a phrase hint for every month of the year (e.g. "i was
   199  // born in january", "i was born in febuary", ...), use the pre-built `$MONTH`
   200  // class improves the likelihood of correctly transcribing audio that includes
   201  // months (e.g. "i was born in $month"). To refer to pre-built classes, use the
   202  // class' symbol prepended with `$` e.g. `$MONTH`. To refer to custom classes
   203  // that were defined inline in the request, set the class's `custom_class_id`
   204  // to a string unique to all class resources and inline classes. Then use the
   205  // class' id wrapped in $`{...}` e.g. "${my-months}". To refer to custom
   206  // classes resources, use the class' id wrapped in `${}` (e.g. `${my-months}`).
   207  // Speech-to-Text supports three locations: `global`, `us` (US North America),
   208  // and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint,
   209  // use the `global` location. To specify a region, use a [regional
   210  // endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu`
   211  // location value.
   212  //
   213  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   214  type PhraseSet_Phrase = src.PhraseSet_Phrase
   215  
   216  // Contains audio data in the encoding specified in the `RecognitionConfig`.
   217  // Either `content` or `uri` must be supplied. Supplying both or neither
   218  // returns
   219  // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
   220  // [content limits](https://cloud.google.com/speech-to-text/quotas#content).
   221  //
   222  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   223  type RecognitionAudio = src.RecognitionAudio
   224  type RecognitionAudio_Content = src.RecognitionAudio_Content
   225  type RecognitionAudio_Uri = src.RecognitionAudio_Uri
   226  
   227  // Provides information to the recognizer that specifies how to process the
   228  // request.
   229  //
   230  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   231  type RecognitionConfig = src.RecognitionConfig
   232  
   233  // The encoding of the audio data sent in the request. All encodings support
   234  // only 1 channel (mono) audio, unless the `audio_channel_count` and
   235  // `enable_separate_recognition_per_channel` fields are set. For best results,
   236  // the audio source should be captured and transmitted using a lossless
   237  // encoding (`FLAC` or `LINEAR16`). The accuracy of the speech recognition can
   238  // be reduced if lossy codecs are used to capture or transmit audio,
   239  // particularly if background noise is present. Lossy codecs include `MULAW`,
   240  // `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`, and
   241  // `WEBM_OPUS`. The `FLAC` and `WAV` audio file formats include a header that
   242  // describes the included audio content. You can request recognition for `WAV`
   243  // files that contain either `LINEAR16` or `MULAW` encoded audio. If you send
   244  // `FLAC` or `WAV` audio file format in your request, you do not need to
   245  // specify an `AudioEncoding`; the audio encoding format is determined from the
   246  // file header. If you specify an `AudioEncoding` when you send send `FLAC` or
   247  // `WAV` audio, the encoding configuration must match the encoding described in
   248  // the audio header; otherwise the request returns an
   249  // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
   250  // code.
   251  //
   252  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   253  type RecognitionConfig_AudioEncoding = src.RecognitionConfig_AudioEncoding
   254  
   255  // Description of audio data to be recognized. Deprecated: Do not use.
   256  //
   257  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   258  type RecognitionMetadata = src.RecognitionMetadata
   259  
   260  // Use case categories that the audio recognition request can be described by.
   261  //
   262  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   263  type RecognitionMetadata_InteractionType = src.RecognitionMetadata_InteractionType
   264  
   265  // Enumerates the types of capture settings describing an audio file.
   266  //
   267  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   268  type RecognitionMetadata_MicrophoneDistance = src.RecognitionMetadata_MicrophoneDistance
   269  
   270  // The original media the speech was recorded on.
   271  //
   272  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   273  type RecognitionMetadata_OriginalMediaType = src.RecognitionMetadata_OriginalMediaType
   274  
   275  // The type of device the speech was recorded with.
   276  //
   277  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   278  type RecognitionMetadata_RecordingDeviceType = src.RecognitionMetadata_RecordingDeviceType
   279  
   280  // The top-level message sent by the client for the `Recognize` method.
   281  //
   282  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   283  type RecognizeRequest = src.RecognizeRequest
   284  
   285  // The only message returned to the client by the `Recognize` method. It
   286  // contains the result as zero or more sequential `SpeechRecognitionResult`
   287  // messages.
   288  //
   289  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   290  type RecognizeResponse = src.RecognizeResponse
   291  
   292  // Config to enable speaker diarization.
   293  //
   294  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   295  type SpeakerDiarizationConfig = src.SpeakerDiarizationConfig
   296  
   297  // Speech adaptation configuration.
   298  //
   299  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   300  type SpeechAdaptation = src.SpeechAdaptation
   301  
   302  // SpeechClient is the client API for Speech service. For semantics around ctx
   303  // use and closing/ending streaming RPCs, please refer to
   304  // https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
   305  //
   306  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   307  type SpeechClient = src.SpeechClient
   308  
   309  // Provides "hints" to the speech recognizer to favor specific words and
   310  // phrases in the results.
   311  //
   312  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   313  type SpeechContext = src.SpeechContext
   314  
   315  // Alternative hypotheses (a.k.a. n-best list).
   316  //
   317  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   318  type SpeechRecognitionAlternative = src.SpeechRecognitionAlternative
   319  
   320  // A speech recognition result corresponding to a portion of the audio.
   321  //
   322  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   323  type SpeechRecognitionResult = src.SpeechRecognitionResult
   324  
   325  // SpeechServer is the server API for Speech service.
   326  //
   327  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   328  type SpeechServer = src.SpeechServer
   329  type Speech_StreamingRecognizeClient = src.Speech_StreamingRecognizeClient
   330  type Speech_StreamingRecognizeServer = src.Speech_StreamingRecognizeServer
   331  
   332  // Provides information to the recognizer that specifies how to process the
   333  // request.
   334  //
   335  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   336  type StreamingRecognitionConfig = src.StreamingRecognitionConfig
   337  
   338  // A streaming speech recognition result corresponding to a portion of the
   339  // audio that is currently being processed.
   340  //
   341  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   342  type StreamingRecognitionResult = src.StreamingRecognitionResult
   343  
   344  // The top-level message sent by the client for the `StreamingRecognize`
   345  // method. Multiple `StreamingRecognizeRequest` messages are sent. The first
   346  // message must contain a `streaming_config` message and must not contain
   347  // `audio_content`. All subsequent messages must contain `audio_content` and
   348  // must not contain a `streaming_config` message.
   349  //
   350  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   351  type StreamingRecognizeRequest = src.StreamingRecognizeRequest
   352  type StreamingRecognizeRequest_AudioContent = src.StreamingRecognizeRequest_AudioContent
   353  type StreamingRecognizeRequest_StreamingConfig = src.StreamingRecognizeRequest_StreamingConfig
   354  
   355  // `StreamingRecognizeResponse` is the only message returned to the client by
   356  // `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
   357  // messages are streamed back to the client. If there is no recognizable audio,
   358  // and `single_utterance` is set to false, then no messages are streamed back
   359  // to the client. Here's an example of a series of
   360  // `StreamingRecognizeResponse`s that might be returned while processing audio:
   361  // 1. results { alternatives { transcript: "tube" } stability: 0.01 } 2.
   362  // results { alternatives { transcript: "to be a" } stability: 0.01 } 3.
   363  // results { alternatives { transcript: "to be" } stability: 0.9 } results {
   364  // alternatives { transcript: " or not to be" } stability: 0.01 } 4. results {
   365  // alternatives { transcript: "to be or not to be" confidence: 0.92 }
   366  // alternatives { transcript: "to bee or not to bee" } is_final: true } 5.
   367  // results { alternatives { transcript: " that's" } stability: 0.01 } 6.
   368  // results { alternatives { transcript: " that is" } stability: 0.9 } results {
   369  // alternatives { transcript: " the question" } stability: 0.01 } 7. results {
   370  // alternatives { transcript: " that is the question" confidence: 0.98 }
   371  // alternatives { transcript: " that was the question" } is_final: true }
   372  // Notes: - Only two of the above responses #4 and #7 contain final results;
   373  // they are indicated by `is_final: true`. Concatenating these together
   374  // generates the full transcript: "to be or not to be that is the question". -
   375  // The others contain interim `results`. #3 and #6 contain two interim
   376  // `results`: the first portion has a high stability and is less likely to
   377  // change; the second portion has a low stability and is very likely to change.
   378  // A UI designer might choose to show only high stability `results`. - The
   379  // specific `stability` and `confidence` values shown above are only for
   380  // illustrative purposes. Actual values may vary. - In each response, only one
   381  // of these fields will be set: `error`, `speech_event_type`, or one or more
   382  // (repeated) `results`.
   383  //
   384  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   385  type StreamingRecognizeResponse = src.StreamingRecognizeResponse
   386  
   387  // Indicates the type of speech event.
   388  //
   389  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   390  type StreamingRecognizeResponse_SpeechEventType = src.StreamingRecognizeResponse_SpeechEventType
   391  
   392  // Transcription normalization configuration. Use transcription normalization
   393  // to automatically replace parts of the transcript with phrases of your
   394  // choosing. For StreamingRecognize, this normalization only applies to stable
   395  // partial transcripts (stability > 0.8) and final transcripts.
   396  //
   397  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   398  type TranscriptNormalization = src.TranscriptNormalization
   399  
   400  // A single replacement configuration.
   401  //
   402  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   403  type TranscriptNormalization_Entry = src.TranscriptNormalization_Entry
   404  
   405  // Specifies an optional destination for the recognition results.
   406  //
   407  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   408  type TranscriptOutputConfig = src.TranscriptOutputConfig
   409  type TranscriptOutputConfig_GcsUri = src.TranscriptOutputConfig_GcsUri
   410  
   411  // UnimplementedAdaptationServer can be embedded to have forward compatible
   412  // implementations.
   413  //
   414  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   415  type UnimplementedAdaptationServer = src.UnimplementedAdaptationServer
   416  
   417  // UnimplementedSpeechServer can be embedded to have forward compatible
   418  // implementations.
   419  //
   420  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   421  type UnimplementedSpeechServer = src.UnimplementedSpeechServer
   422  
   423  // Message sent by the client for the `UpdateCustomClass` method.
   424  //
   425  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   426  type UpdateCustomClassRequest = src.UpdateCustomClassRequest
   427  
   428  // Message sent by the client for the `UpdatePhraseSet` method.
   429  //
   430  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   431  type UpdatePhraseSetRequest = src.UpdatePhraseSetRequest
   432  
   433  // Word-specific information for recognized words.
   434  //
   435  // Deprecated: Please use types in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   436  type WordInfo = src.WordInfo
   437  
   438  // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   439  func NewAdaptationClient(cc grpc.ClientConnInterface) AdaptationClient {
   440  	return src.NewAdaptationClient(cc)
   441  }
   442  
   443  // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   444  func NewSpeechClient(cc grpc.ClientConnInterface) SpeechClient { return src.NewSpeechClient(cc) }
   445  
   446  // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   447  func RegisterAdaptationServer(s *grpc.Server, srv AdaptationServer) {
   448  	src.RegisterAdaptationServer(s, srv)
   449  }
   450  
   451  // Deprecated: Please use funcs in: cloud.google.com/go/speech/apiv1p1beta1/speechpb
   452  func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) { src.RegisterSpeechServer(s, srv) }
   453  

View as plain text