...

Package videointelligence

import "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1"
Overview
Index

Overview ▾

Index ▾

Variables
func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)
type AnnotateVideoProgress
    func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)
    func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress
    func (*AnnotateVideoProgress) ProtoMessage()
    func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message
    func (x *AnnotateVideoProgress) Reset()
    func (x *AnnotateVideoProgress) String() string
type AnnotateVideoRequest
    func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)
    func (x *AnnotateVideoRequest) GetFeatures() []Feature
    func (x *AnnotateVideoRequest) GetInputContent() []byte
    func (x *AnnotateVideoRequest) GetInputUri() string
    func (x *AnnotateVideoRequest) GetLocationId() string
    func (x *AnnotateVideoRequest) GetOutputUri() string
    func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext
    func (*AnnotateVideoRequest) ProtoMessage()
    func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message
    func (x *AnnotateVideoRequest) Reset()
    func (x *AnnotateVideoRequest) String() string
type AnnotateVideoResponse
    func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)
    func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults
    func (*AnnotateVideoResponse) ProtoMessage()
    func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message
    func (x *AnnotateVideoResponse) Reset()
    func (x *AnnotateVideoResponse) String() string
type Entity
    func (*Entity) Descriptor() ([]byte, []int)
    func (x *Entity) GetDescription() string
    func (x *Entity) GetEntityId() string
    func (x *Entity) GetLanguageCode() string
    func (*Entity) ProtoMessage()
    func (x *Entity) ProtoReflect() protoreflect.Message
    func (x *Entity) Reset()
    func (x *Entity) String() string
type ExplicitContentAnnotation
    func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)
    func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame
    func (*ExplicitContentAnnotation) ProtoMessage()
    func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Message
    func (x *ExplicitContentAnnotation) Reset()
    func (x *ExplicitContentAnnotation) String() string
type ExplicitContentDetectionConfig
    func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)
    func (x *ExplicitContentDetectionConfig) GetModel() string
    func (*ExplicitContentDetectionConfig) ProtoMessage()
    func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message
    func (x *ExplicitContentDetectionConfig) Reset()
    func (x *ExplicitContentDetectionConfig) String() string
type ExplicitContentFrame
    func (*ExplicitContentFrame) Descriptor() ([]byte, []int)
    func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood
    func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Duration
    func (*ExplicitContentFrame) ProtoMessage()
    func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message
    func (x *ExplicitContentFrame) Reset()
    func (x *ExplicitContentFrame) String() string
type Feature
    func (Feature) Descriptor() protoreflect.EnumDescriptor
    func (x Feature) Enum() *Feature
    func (Feature) EnumDescriptor() ([]byte, []int)
    func (x Feature) Number() protoreflect.EnumNumber
    func (x Feature) String() string
    func (Feature) Type() protoreflect.EnumType
type LabelAnnotation
    func (*LabelAnnotation) Descriptor() ([]byte, []int)
    func (x *LabelAnnotation) GetCategoryEntities() []*Entity
    func (x *LabelAnnotation) GetEntity() *Entity
    func (x *LabelAnnotation) GetFrames() []*LabelFrame
    func (x *LabelAnnotation) GetSegments() []*LabelSegment
    func (*LabelAnnotation) ProtoMessage()
    func (x *LabelAnnotation) ProtoReflect() protoreflect.Message
    func (x *LabelAnnotation) Reset()
    func (x *LabelAnnotation) String() string
type LabelDetectionConfig
    func (*LabelDetectionConfig) Descriptor() ([]byte, []int)
    func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode
    func (x *LabelDetectionConfig) GetModel() string
    func (x *LabelDetectionConfig) GetStationaryCamera() bool
    func (*LabelDetectionConfig) ProtoMessage()
    func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message
    func (x *LabelDetectionConfig) Reset()
    func (x *LabelDetectionConfig) String() string
type LabelDetectionMode
    func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptor
    func (x LabelDetectionMode) Enum() *LabelDetectionMode
    func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)
    func (x LabelDetectionMode) Number() protoreflect.EnumNumber
    func (x LabelDetectionMode) String() string
    func (LabelDetectionMode) Type() protoreflect.EnumType
type LabelFrame
    func (*LabelFrame) Descriptor() ([]byte, []int)
    func (x *LabelFrame) GetConfidence() float32
    func (x *LabelFrame) GetTimeOffset() *durationpb.Duration
    func (*LabelFrame) ProtoMessage()
    func (x *LabelFrame) ProtoReflect() protoreflect.Message
    func (x *LabelFrame) Reset()
    func (x *LabelFrame) String() string
type LabelSegment
    func (*LabelSegment) Descriptor() ([]byte, []int)
    func (x *LabelSegment) GetConfidence() float32
    func (x *LabelSegment) GetSegment() *VideoSegment
    func (*LabelSegment) ProtoMessage()
    func (x *LabelSegment) ProtoReflect() protoreflect.Message
    func (x *LabelSegment) Reset()
    func (x *LabelSegment) String() string
type Likelihood
    func (Likelihood) Descriptor() protoreflect.EnumDescriptor
    func (x Likelihood) Enum() *Likelihood
    func (Likelihood) EnumDescriptor() ([]byte, []int)
    func (x Likelihood) Number() protoreflect.EnumNumber
    func (x Likelihood) String() string
    func (Likelihood) Type() protoreflect.EnumType
type ShotChangeDetectionConfig
    func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)
    func (x *ShotChangeDetectionConfig) GetModel() string
    func (*ShotChangeDetectionConfig) ProtoMessage()
    func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Message
    func (x *ShotChangeDetectionConfig) Reset()
    func (x *ShotChangeDetectionConfig) String() string
type SpeechContext
    func (*SpeechContext) Descriptor() ([]byte, []int)
    func (x *SpeechContext) GetPhrases() []string
    func (*SpeechContext) ProtoMessage()
    func (x *SpeechContext) ProtoReflect() protoreflect.Message
    func (x *SpeechContext) Reset()
    func (x *SpeechContext) String() string
type SpeechRecognitionAlternative
    func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)
    func (x *SpeechRecognitionAlternative) GetConfidence() float32
    func (x *SpeechRecognitionAlternative) GetTranscript() string
    func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo
    func (*SpeechRecognitionAlternative) ProtoMessage()
    func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message
    func (x *SpeechRecognitionAlternative) Reset()
    func (x *SpeechRecognitionAlternative) String() string
type SpeechTranscription
    func (*SpeechTranscription) Descriptor() ([]byte, []int)
    func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative
    func (*SpeechTranscription) ProtoMessage()
    func (x *SpeechTranscription) ProtoReflect() protoreflect.Message
    func (x *SpeechTranscription) Reset()
    func (x *SpeechTranscription) String() string
type SpeechTranscriptionConfig
    func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)
    func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32
    func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool
    func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool
    func (x *SpeechTranscriptionConfig) GetLanguageCode() string
    func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32
    func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext
    func (*SpeechTranscriptionConfig) ProtoMessage()
    func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Message
    func (x *SpeechTranscriptionConfig) Reset()
    func (x *SpeechTranscriptionConfig) String() string
type UnimplementedVideoIntelligenceServiceServer
    func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
type VideoAnnotationProgress
    func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)
    func (x *VideoAnnotationProgress) GetInputUri() string
    func (x *VideoAnnotationProgress) GetProgressPercent() int32
    func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestamp
    func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestamp
    func (*VideoAnnotationProgress) ProtoMessage()
    func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message
    func (x *VideoAnnotationProgress) Reset()
    func (x *VideoAnnotationProgress) String() string
type VideoAnnotationResults
    func (*VideoAnnotationResults) Descriptor() ([]byte, []int)
    func (x *VideoAnnotationResults) GetError() *status.Status
    func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation
    func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation
    func (x *VideoAnnotationResults) GetInputUri() string
    func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation
    func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
    func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation
    func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription
    func (*VideoAnnotationResults) ProtoMessage()
    func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message
    func (x *VideoAnnotationResults) Reset()
    func (x *VideoAnnotationResults) String() string
type VideoContext
    func (*VideoContext) Descriptor() ([]byte, []int)
    func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig
    func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig
    func (x *VideoContext) GetSegments() []*VideoSegment
    func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig
    func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig
    func (*VideoContext) ProtoMessage()
    func (x *VideoContext) ProtoReflect() protoreflect.Message
    func (x *VideoContext) Reset()
    func (x *VideoContext) String() string
type VideoIntelligenceServiceClient
    func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClient
type VideoIntelligenceServiceServer
type VideoSegment
    func (*VideoSegment) Descriptor() ([]byte, []int)
    func (x *VideoSegment) GetEndTimeOffset() *durationpb.Duration
    func (x *VideoSegment) GetStartTimeOffset() *durationpb.Duration
    func (*VideoSegment) ProtoMessage()
    func (x *VideoSegment) ProtoReflect() protoreflect.Message
    func (x *VideoSegment) Reset()
    func (x *VideoSegment) String() string
type WordInfo
    func (*WordInfo) Descriptor() ([]byte, []int)
    func (x *WordInfo) GetEndTime() *durationpb.Duration
    func (x *WordInfo) GetStartTime() *durationpb.Duration
    func (x *WordInfo) GetWord() string
    func (*WordInfo) ProtoMessage()
    func (x *WordInfo) ProtoReflect() protoreflect.Message
    func (x *WordInfo) Reset()
    func (x *WordInfo) String() string

Package files

video_intelligence.pb.go

Variables

Enum value maps for Feature.

var (
    Feature_name = map[int32]string{
        0: "FEATURE_UNSPECIFIED",
        1: "LABEL_DETECTION",
        2: "SHOT_CHANGE_DETECTION",
        3: "EXPLICIT_CONTENT_DETECTION",
        6: "SPEECH_TRANSCRIPTION",
    }
    Feature_value = map[string]int32{
        "FEATURE_UNSPECIFIED":        0,
        "LABEL_DETECTION":            1,
        "SHOT_CHANGE_DETECTION":      2,
        "EXPLICIT_CONTENT_DETECTION": 3,
        "SPEECH_TRANSCRIPTION":       6,
    }
)

Enum value maps for LabelDetectionMode.

var (
    LabelDetectionMode_name = map[int32]string{
        0: "LABEL_DETECTION_MODE_UNSPECIFIED",
        1: "SHOT_MODE",
        2: "FRAME_MODE",
        3: "SHOT_AND_FRAME_MODE",
    }
    LabelDetectionMode_value = map[string]int32{
        "LABEL_DETECTION_MODE_UNSPECIFIED": 0,
        "SHOT_MODE":                        1,
        "FRAME_MODE":                       2,
        "SHOT_AND_FRAME_MODE":              3,
    }
)

Enum value maps for Likelihood.

var (
    Likelihood_name = map[int32]string{
        0: "LIKELIHOOD_UNSPECIFIED",
        1: "VERY_UNLIKELY",
        2: "UNLIKELY",
        3: "POSSIBLE",
        4: "LIKELY",
        5: "VERY_LIKELY",
    }
    Likelihood_value = map[string]int32{
        "LIKELIHOOD_UNSPECIFIED": 0,
        "VERY_UNLIKELY":          1,
        "UNLIKELY":               2,
        "POSSIBLE":               3,
        "LIKELY":                 4,
        "VERY_LIKELY":            5,
    }
)
var File_google_cloud_videointelligence_v1p1beta1_video_intelligence_proto protoreflect.FileDescriptor

func RegisterVideoIntelligenceServiceServer

func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)

type AnnotateVideoProgress

Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

type AnnotateVideoProgress struct {

    // Progress metadata for all videos specified in `AnnotateVideoRequest`.
    AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
    // contains filtered or unexported fields
}

func (*AnnotateVideoProgress) Descriptor

func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.

func (*AnnotateVideoProgress) GetAnnotationProgress

func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress

func (*AnnotateVideoProgress) ProtoMessage

func (*AnnotateVideoProgress) ProtoMessage()

func (*AnnotateVideoProgress) ProtoReflect

func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message

func (*AnnotateVideoProgress) Reset

func (x *AnnotateVideoProgress) Reset()

func (*AnnotateVideoProgress) String

func (x *AnnotateVideoProgress) String() string

type AnnotateVideoRequest

Video annotation request.

type AnnotateVideoRequest struct {

    // Input video location. Currently, only
    // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
    // supported, which must be specified in the following format:
    // `gs://bucket-id/object-id` (other URI formats return
    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
    // more information, see [Request
    // URIs](https://cloud.google.com/storage/docs/request-endpoints). A video URI
    // may include wildcards in `object-id`, and thus identify multiple videos.
    // Supported wildcards: '*' to match 0 or more characters;
    // '?' to match 1 character. If unset, the input video should be embedded
    // in the request as `input_content`. If set, `input_content` should be unset.
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
    // The video data bytes.
    // If unset, the input video(s) should be specified via `input_uri`.
    // If set, `input_uri` should be unset.
    InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
    // Required. Requested video annotation features.
    Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1p1beta1.Feature" json:"features,omitempty"`
    // Additional video context and/or feature-specific parameters.
    VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
    // Optional. Location where the output (in JSON format) should be stored.
    // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
    // URIs are supported, which must be specified in the following format:
    // `gs://bucket-id/object-id` (other URI formats return
    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
    // more information, see [Request
    // URIs](https://cloud.google.com/storage/docs/request-endpoints).
    OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
    // Optional. Cloud region where annotation should take place. Supported cloud
    // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
    // is specified, a region will be determined based on video file location.
    LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
    // contains filtered or unexported fields
}

func (*AnnotateVideoRequest) Descriptor

func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.

func (*AnnotateVideoRequest) GetFeatures

func (x *AnnotateVideoRequest) GetFeatures() []Feature

func (*AnnotateVideoRequest) GetInputContent

func (x *AnnotateVideoRequest) GetInputContent() []byte

func (*AnnotateVideoRequest) GetInputUri

func (x *AnnotateVideoRequest) GetInputUri() string

func (*AnnotateVideoRequest) GetLocationId

func (x *AnnotateVideoRequest) GetLocationId() string

func (*AnnotateVideoRequest) GetOutputUri

func (x *AnnotateVideoRequest) GetOutputUri() string

func (*AnnotateVideoRequest) GetVideoContext

func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext

func (*AnnotateVideoRequest) ProtoMessage

func (*AnnotateVideoRequest) ProtoMessage()

func (*AnnotateVideoRequest) ProtoReflect

func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message

func (*AnnotateVideoRequest) Reset

func (x *AnnotateVideoRequest) Reset()

func (*AnnotateVideoRequest) String

func (x *AnnotateVideoRequest) String() string

type AnnotateVideoResponse

Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

type AnnotateVideoResponse struct {

    // Annotation results for all videos specified in `AnnotateVideoRequest`.
    AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
    // contains filtered or unexported fields
}

func (*AnnotateVideoResponse) Descriptor

func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.

func (*AnnotateVideoResponse) GetAnnotationResults

func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults

func (*AnnotateVideoResponse) ProtoMessage

func (*AnnotateVideoResponse) ProtoMessage()

func (*AnnotateVideoResponse) ProtoReflect

func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message

func (*AnnotateVideoResponse) Reset

func (x *AnnotateVideoResponse) Reset()

func (*AnnotateVideoResponse) String

func (x *AnnotateVideoResponse) String() string

type Entity

Detected entity from video analysis.

type Entity struct {

    // Opaque entity ID. Some IDs may be available in
    // [Google Knowledge Graph Search
    // API](https://developers.google.com/knowledge-graph/).
    EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
    // Textual description, e.g. `Fixed-gear bicycle`.
    Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
    // Language code for `description` in BCP-47 format.
    LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
    // contains filtered or unexported fields
}

func (*Entity) Descriptor

func (*Entity) Descriptor() ([]byte, []int)

Deprecated: Use Entity.ProtoReflect.Descriptor instead.

func (*Entity) GetDescription

func (x *Entity) GetDescription() string

func (*Entity) GetEntityId

func (x *Entity) GetEntityId() string

func (*Entity) GetLanguageCode

func (x *Entity) GetLanguageCode() string

func (*Entity) ProtoMessage

func (*Entity) ProtoMessage()

func (*Entity) ProtoReflect

func (x *Entity) ProtoReflect() protoreflect.Message

func (*Entity) Reset

func (x *Entity) Reset()

func (*Entity) String

func (x *Entity) String() string

type ExplicitContentAnnotation

Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.

type ExplicitContentAnnotation struct {

    // All video frames where explicit content was detected.
    Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
    // contains filtered or unexported fields
}

func (*ExplicitContentAnnotation) Descriptor

func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.

func (*ExplicitContentAnnotation) GetFrames

func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame

func (*ExplicitContentAnnotation) ProtoMessage

func (*ExplicitContentAnnotation) ProtoMessage()

func (*ExplicitContentAnnotation) ProtoReflect

func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Message

func (*ExplicitContentAnnotation) Reset

func (x *ExplicitContentAnnotation) Reset()

func (*ExplicitContentAnnotation) String

func (x *ExplicitContentAnnotation) String() string

type ExplicitContentDetectionConfig

Config for EXPLICIT_CONTENT_DETECTION.

type ExplicitContentDetectionConfig struct {

    // Model to use for explicit content detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

func (*ExplicitContentDetectionConfig) Descriptor

func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.

func (*ExplicitContentDetectionConfig) GetModel

func (x *ExplicitContentDetectionConfig) GetModel() string

func (*ExplicitContentDetectionConfig) ProtoMessage

func (*ExplicitContentDetectionConfig) ProtoMessage()

func (*ExplicitContentDetectionConfig) ProtoReflect

func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message

func (*ExplicitContentDetectionConfig) Reset

func (x *ExplicitContentDetectionConfig) Reset()

func (*ExplicitContentDetectionConfig) String

func (x *ExplicitContentDetectionConfig) String() string

type ExplicitContentFrame

Video frame level annotation results for explicit content.

type ExplicitContentFrame struct {

    // Time-offset, relative to the beginning of the video, corresponding to the
    // video frame for this location.
    TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // Likelihood of the pornography content..
    PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,proto3,enum=google.cloud.videointelligence.v1p1beta1.Likelihood" json:"pornography_likelihood,omitempty"`
    // contains filtered or unexported fields
}

func (*ExplicitContentFrame) Descriptor

func (*ExplicitContentFrame) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.

func (*ExplicitContentFrame) GetPornographyLikelihood

func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood

func (*ExplicitContentFrame) GetTimeOffset

func (x *ExplicitContentFrame) GetTimeOffset() *durationpb.Duration

func (*ExplicitContentFrame) ProtoMessage

func (*ExplicitContentFrame) ProtoMessage()

func (*ExplicitContentFrame) ProtoReflect

func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message

func (*ExplicitContentFrame) Reset

func (x *ExplicitContentFrame) Reset()

func (*ExplicitContentFrame) String

func (x *ExplicitContentFrame) String() string

type Feature

Video annotation feature.

type Feature int32
const (
    // Unspecified.
    Feature_FEATURE_UNSPECIFIED Feature = 0
    // Label detection. Detect objects, such as dog or flower.
    Feature_LABEL_DETECTION Feature = 1
    // Shot change detection.
    Feature_SHOT_CHANGE_DETECTION Feature = 2
    // Explicit content detection.
    Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
    // Speech transcription.
    Feature_SPEECH_TRANSCRIPTION Feature = 6
)

func (Feature) Descriptor

func (Feature) Descriptor() protoreflect.EnumDescriptor

func (Feature) Enum

func (x Feature) Enum() *Feature

func (Feature) EnumDescriptor

func (Feature) EnumDescriptor() ([]byte, []int)

Deprecated: Use Feature.Descriptor instead.

func (Feature) Number

func (x Feature) Number() protoreflect.EnumNumber

func (Feature) String

func (x Feature) String() string

func (Feature) Type

func (Feature) Type() protoreflect.EnumType

type LabelAnnotation

Label annotation.

type LabelAnnotation struct {

    // Detected entity.
    Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
    // Common categories for the detected entity.
    // E.g. when the label is `Terrier` the category is likely `dog`. And in some
    // cases there might be more than one categories e.g. `Terrier` could also be
    // a `pet`.
    CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
    // All video segments where a label was detected.
    Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
    // All video frames where a label was detected.
    Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
    // contains filtered or unexported fields
}

func (*LabelAnnotation) Descriptor

func (*LabelAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.

func (*LabelAnnotation) GetCategoryEntities

func (x *LabelAnnotation) GetCategoryEntities() []*Entity

func (*LabelAnnotation) GetEntity

func (x *LabelAnnotation) GetEntity() *Entity

func (*LabelAnnotation) GetFrames

func (x *LabelAnnotation) GetFrames() []*LabelFrame

func (*LabelAnnotation) GetSegments

func (x *LabelAnnotation) GetSegments() []*LabelSegment

func (*LabelAnnotation) ProtoMessage

func (*LabelAnnotation) ProtoMessage()

func (*LabelAnnotation) ProtoReflect

func (x *LabelAnnotation) ProtoReflect() protoreflect.Message

func (*LabelAnnotation) Reset

func (x *LabelAnnotation) Reset()

func (*LabelAnnotation) String

func (x *LabelAnnotation) String() string

type LabelDetectionConfig

Config for LABEL_DETECTION.

type LabelDetectionConfig struct {

    // What labels should be detected with LABEL_DETECTION, in addition to
    // video-level labels or segment-level labels.
    // If unspecified, defaults to `SHOT_MODE`.
    LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,proto3,enum=google.cloud.videointelligence.v1p1beta1.LabelDetectionMode" json:"label_detection_mode,omitempty"`
    // Whether the video has been shot from a stationary (i.e. non-moving) camera.
    // When set to true, might improve detection accuracy for moving objects.
    // Should be used with `SHOT_AND_FRAME_MODE` enabled.
    StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
    // Model to use for label detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

func (*LabelDetectionConfig) Descriptor

func (*LabelDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.

func (*LabelDetectionConfig) GetLabelDetectionMode

func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode

func (*LabelDetectionConfig) GetModel

func (x *LabelDetectionConfig) GetModel() string

func (*LabelDetectionConfig) GetStationaryCamera

func (x *LabelDetectionConfig) GetStationaryCamera() bool

func (*LabelDetectionConfig) ProtoMessage

func (*LabelDetectionConfig) ProtoMessage()

func (*LabelDetectionConfig) ProtoReflect

func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message

func (*LabelDetectionConfig) Reset

func (x *LabelDetectionConfig) Reset()

func (*LabelDetectionConfig) String

func (x *LabelDetectionConfig) String() string

type LabelDetectionMode

Label detection mode.

type LabelDetectionMode int32
const (
    // Unspecified.
    LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
    // Detect shot-level labels.
    LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
    // Detect frame-level labels.
    LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
    // Detect both shot-level and frame-level labels.
    LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)

func (LabelDetectionMode) Descriptor

func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptor

func (LabelDetectionMode) Enum

func (x LabelDetectionMode) Enum() *LabelDetectionMode

func (LabelDetectionMode) EnumDescriptor

func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use LabelDetectionMode.Descriptor instead.

func (LabelDetectionMode) Number

func (x LabelDetectionMode) Number() protoreflect.EnumNumber

func (LabelDetectionMode) String

func (x LabelDetectionMode) String() string

func (LabelDetectionMode) Type

func (LabelDetectionMode) Type() protoreflect.EnumType

type LabelFrame

Video frame level annotation results for label detection.

type LabelFrame struct {

    // Time-offset, relative to the beginning of the video, corresponding to the
    // video frame for this location.
    TimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // Confidence that the label is accurate. Range: [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

func (*LabelFrame) Descriptor

func (*LabelFrame) Descriptor() ([]byte, []int)

Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.

func (*LabelFrame) GetConfidence

func (x *LabelFrame) GetConfidence() float32

func (*LabelFrame) GetTimeOffset

func (x *LabelFrame) GetTimeOffset() *durationpb.Duration

func (*LabelFrame) ProtoMessage

func (*LabelFrame) ProtoMessage()

func (*LabelFrame) ProtoReflect

func (x *LabelFrame) ProtoReflect() protoreflect.Message

func (*LabelFrame) Reset

func (x *LabelFrame) Reset()

func (*LabelFrame) String

func (x *LabelFrame) String() string

type LabelSegment

Video segment level annotation results for label detection.

type LabelSegment struct {

    // Video segment where a label was detected.
    Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
    // Confidence that the label is accurate. Range: [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

func (*LabelSegment) Descriptor

func (*LabelSegment) Descriptor() ([]byte, []int)

Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.

func (*LabelSegment) GetConfidence

func (x *LabelSegment) GetConfidence() float32

func (*LabelSegment) GetSegment

func (x *LabelSegment) GetSegment() *VideoSegment

func (*LabelSegment) ProtoMessage

func (*LabelSegment) ProtoMessage()

func (*LabelSegment) ProtoReflect

func (x *LabelSegment) ProtoReflect() protoreflect.Message

func (*LabelSegment) Reset

func (x *LabelSegment) Reset()

func (*LabelSegment) String

func (x *LabelSegment) String() string

type Likelihood

Bucketized representation of likelihood.

type Likelihood int32
const (
    // Unspecified likelihood.
    Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
    // Very unlikely.
    Likelihood_VERY_UNLIKELY Likelihood = 1
    // Unlikely.
    Likelihood_UNLIKELY Likelihood = 2
    // Possible.
    Likelihood_POSSIBLE Likelihood = 3
    // Likely.
    Likelihood_LIKELY Likelihood = 4
    // Very likely.
    Likelihood_VERY_LIKELY Likelihood = 5
)

func (Likelihood) Descriptor

func (Likelihood) Descriptor() protoreflect.EnumDescriptor

func (Likelihood) Enum

func (x Likelihood) Enum() *Likelihood

func (Likelihood) EnumDescriptor

func (Likelihood) EnumDescriptor() ([]byte, []int)

Deprecated: Use Likelihood.Descriptor instead.

func (Likelihood) Number

func (x Likelihood) Number() protoreflect.EnumNumber

func (Likelihood) String

func (x Likelihood) String() string

func (Likelihood) Type

func (Likelihood) Type() protoreflect.EnumType

type ShotChangeDetectionConfig

Config for SHOT_CHANGE_DETECTION.

type ShotChangeDetectionConfig struct {

    // Model to use for shot change detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

func (*ShotChangeDetectionConfig) Descriptor

func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.

func (*ShotChangeDetectionConfig) GetModel

func (x *ShotChangeDetectionConfig) GetModel() string

func (*ShotChangeDetectionConfig) ProtoMessage

func (*ShotChangeDetectionConfig) ProtoMessage()

func (*ShotChangeDetectionConfig) ProtoReflect

func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Message

func (*ShotChangeDetectionConfig) Reset

func (x *ShotChangeDetectionConfig) Reset()

func (*ShotChangeDetectionConfig) String

func (x *ShotChangeDetectionConfig) String() string

type SpeechContext

Provides "hints" to the speech recognizer to favor specific words and phrases in the results.

type SpeechContext struct {

    // Optional. A list of strings containing words and phrases "hints" so that
    // the speech recognition is more likely to recognize them. This can be used
    // to improve the accuracy for specific words and phrases, for example, if
    // specific commands are typically spoken by the user. This can also be used
    // to add additional words to the vocabulary of the recognizer. See
    // [usage limits](https://cloud.google.com/speech/limits#content).
    Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
    // contains filtered or unexported fields
}

func (*SpeechContext) Descriptor

func (*SpeechContext) Descriptor() ([]byte, []int)

Deprecated: Use SpeechContext.ProtoReflect.Descriptor instead.

func (*SpeechContext) GetPhrases

func (x *SpeechContext) GetPhrases() []string

func (*SpeechContext) ProtoMessage

func (*SpeechContext) ProtoMessage()

func (*SpeechContext) ProtoReflect

func (x *SpeechContext) ProtoReflect() protoreflect.Message

func (*SpeechContext) Reset

func (x *SpeechContext) Reset()

func (*SpeechContext) String

func (x *SpeechContext) String() string

type SpeechRecognitionAlternative

Alternative hypotheses (a.k.a. n-best list).

type SpeechRecognitionAlternative struct {

    // Output only. Transcript text representing the words that the user spoke.
    Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
    // Output only. The confidence estimate between 0.0 and 1.0. A higher number
    // indicates an estimated greater likelihood that the recognized words are
    // correct. This field is set only for the top alternative.
    // This field is not guaranteed to be accurate and users should not rely on it
    // to be always provided.
    // The default of 0.0 is a sentinel value indicating `confidence` was not set.
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // Output only. A list of word-specific information for each recognized word.
    Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
    // contains filtered or unexported fields
}

func (*SpeechRecognitionAlternative) Descriptor

func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)

Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.

func (*SpeechRecognitionAlternative) GetConfidence

func (x *SpeechRecognitionAlternative) GetConfidence() float32

func (*SpeechRecognitionAlternative) GetTranscript

func (x *SpeechRecognitionAlternative) GetTranscript() string

func (*SpeechRecognitionAlternative) GetWords

func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo

func (*SpeechRecognitionAlternative) ProtoMessage

func (*SpeechRecognitionAlternative) ProtoMessage()

func (*SpeechRecognitionAlternative) ProtoReflect

func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message

func (*SpeechRecognitionAlternative) Reset

func (x *SpeechRecognitionAlternative) Reset()

func (*SpeechRecognitionAlternative) String

func (x *SpeechRecognitionAlternative) String() string

type SpeechTranscription

A speech recognition result corresponding to a portion of the audio.

type SpeechTranscription struct {

    // May contain one or more recognition hypotheses (up to the maximum specified
    // in `max_alternatives`).  These alternatives are ordered in terms of
    // accuracy, with the top (first) alternative being the most probable, as
    // ranked by the recognizer.
    Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
    // contains filtered or unexported fields
}

func (*SpeechTranscription) Descriptor

func (*SpeechTranscription) Descriptor() ([]byte, []int)

Deprecated: Use SpeechTranscription.ProtoReflect.Descriptor instead.

func (*SpeechTranscription) GetAlternatives

func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative

func (*SpeechTranscription) ProtoMessage

func (*SpeechTranscription) ProtoMessage()

func (*SpeechTranscription) ProtoReflect

func (x *SpeechTranscription) ProtoReflect() protoreflect.Message

func (*SpeechTranscription) Reset

func (x *SpeechTranscription) Reset()

func (*SpeechTranscription) String

func (x *SpeechTranscription) String() string

type SpeechTranscriptionConfig

Config for SPEECH_TRANSCRIPTION.

type SpeechTranscriptionConfig struct {

    // Required. *Required* The language of the supplied audio as a
    // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    // Example: "en-US".
    // See [Language Support](https://cloud.google.com/speech/docs/languages)
    // for a list of the currently supported language codes.
    LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
    // Optional. Maximum number of recognition hypotheses to be returned.
    // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    // within each `SpeechTranscription`. The server may return fewer than
    // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
    // return a maximum of one. If omitted, will return a maximum of one.
    MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
    // Optional. If set to `true`, the server will attempt to filter out
    // profanities, replacing all but the initial character in each filtered word
    // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    // won't be filtered out.
    FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
    // Optional. A means to provide context to assist the speech recognition.
    SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
    // Optional. If 'true', adds punctuation to recognition result hypotheses.
    // This feature is only available in select languages. Setting this for
    // requests in other languages has no effect at all. The default 'false' value
    // does not add punctuation to result hypotheses. NOTE: "This is currently
    // offered as an experimental service, complimentary to all users. In the
    // future this may be exclusively available as a premium feature."
    EnableAutomaticPunctuation bool `protobuf:"varint,5,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
    // Optional. For file formats, such as MXF or MKV, supporting multiple audio
    // tracks, specify up to two tracks. Default: track 0.
    AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
    // contains filtered or unexported fields
}

func (*SpeechTranscriptionConfig) Descriptor

func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)

Deprecated: Use SpeechTranscriptionConfig.ProtoReflect.Descriptor instead.

func (*SpeechTranscriptionConfig) GetAudioTracks

func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32

func (*SpeechTranscriptionConfig) GetEnableAutomaticPunctuation

func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool

func (*SpeechTranscriptionConfig) GetFilterProfanity

func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool

func (*SpeechTranscriptionConfig) GetLanguageCode

func (x *SpeechTranscriptionConfig) GetLanguageCode() string

func (*SpeechTranscriptionConfig) GetMaxAlternatives

func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32

func (*SpeechTranscriptionConfig) GetSpeechContexts

func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext

func (*SpeechTranscriptionConfig) ProtoMessage

func (*SpeechTranscriptionConfig) ProtoMessage()

func (*SpeechTranscriptionConfig) ProtoReflect

func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Message

func (*SpeechTranscriptionConfig) Reset

func (x *SpeechTranscriptionConfig) Reset()

func (*SpeechTranscriptionConfig) String

func (x *SpeechTranscriptionConfig) String() string

type UnimplementedVideoIntelligenceServiceServer

UnimplementedVideoIntelligenceServiceServer can be embedded to have forward compatible implementations.

type UnimplementedVideoIntelligenceServiceServer struct {
}

func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo

func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)

type VideoAnnotationProgress

Annotation progress for a single video.

type VideoAnnotationProgress struct {

    // Output only. Video file location in
    // [Google Cloud Storage](https://cloud.google.com/storage/).
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
    // Output only. Approximate percentage processed thus far. Guaranteed to be
    // 100 when fully processed.
    ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
    // Output only. Time when the request was received.
    StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
    // Output only. Time of the most recent update.
    UpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
    // contains filtered or unexported fields
}

func (*VideoAnnotationProgress) Descriptor

func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)

Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.

func (*VideoAnnotationProgress) GetInputUri

func (x *VideoAnnotationProgress) GetInputUri() string

func (*VideoAnnotationProgress) GetProgressPercent

func (x *VideoAnnotationProgress) GetProgressPercent() int32

func (*VideoAnnotationProgress) GetStartTime

func (x *VideoAnnotationProgress) GetStartTime() *timestamppb.Timestamp

func (*VideoAnnotationProgress) GetUpdateTime

func (x *VideoAnnotationProgress) GetUpdateTime() *timestamppb.Timestamp

func (*VideoAnnotationProgress) ProtoMessage

func (*VideoAnnotationProgress) ProtoMessage()

func (*VideoAnnotationProgress) ProtoReflect

func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message

func (*VideoAnnotationProgress) Reset

func (x *VideoAnnotationProgress) Reset()

func (*VideoAnnotationProgress) String

func (x *VideoAnnotationProgress) String() string

type VideoAnnotationResults

Annotation results for a single video.

type VideoAnnotationResults struct {

    // Output only. Video file location in
    // [Google Cloud Storage](https://cloud.google.com/storage/).
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
    // Label annotations on video level or user specified segment level.
    // There is exactly one element for each unique label.
    SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations,proto3" json:"segment_label_annotations,omitempty"`
    // Label annotations on shot level.
    // There is exactly one element for each unique label.
    ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
    // Label annotations on frame level.
    // There is exactly one element for each unique label.
    FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations,proto3" json:"frame_label_annotations,omitempty"`
    // Shot annotations. Each shot is represented as a video segment.
    ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
    // Explicit content annotation.
    ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
    // Speech transcription.
    SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
    // Output only. If set, indicates an error. Note that for a single
    // `AnnotateVideoRequest` some videos may succeed and some may fail.
    Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
    // contains filtered or unexported fields
}

func (*VideoAnnotationResults) Descriptor

func (*VideoAnnotationResults) Descriptor() ([]byte, []int)

Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.

func (*VideoAnnotationResults) GetError

func (x *VideoAnnotationResults) GetError() *status.Status

func (*VideoAnnotationResults) GetExplicitAnnotation

func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation

func (*VideoAnnotationResults) GetFrameLabelAnnotations

func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetInputUri

func (x *VideoAnnotationResults) GetInputUri() string

func (*VideoAnnotationResults) GetSegmentLabelAnnotations

func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetShotAnnotations

func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment

func (*VideoAnnotationResults) GetShotLabelAnnotations

func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetSpeechTranscriptions

func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription

func (*VideoAnnotationResults) ProtoMessage

func (*VideoAnnotationResults) ProtoMessage()

func (*VideoAnnotationResults) ProtoReflect

func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message

func (*VideoAnnotationResults) Reset

func (x *VideoAnnotationResults) Reset()

func (*VideoAnnotationResults) String

func (x *VideoAnnotationResults) String() string

type VideoContext

Video context and/or feature-specific parameters.

type VideoContext struct {

    // Video segments to annotate. The segments may overlap and are not required
    // to be contiguous or span the whole video. If unspecified, each video is
    // treated as a single segment.
    Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
    // Config for LABEL_DETECTION.
    LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
    // Config for SHOT_CHANGE_DETECTION.
    ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3" json:"shot_change_detection_config,omitempty"`
    // Config for EXPLICIT_CONTENT_DETECTION.
    ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3" json:"explicit_content_detection_config,omitempty"`
    // Config for SPEECH_TRANSCRIPTION.
    SpeechTranscriptionConfig *SpeechTranscriptionConfig `protobuf:"bytes,6,opt,name=speech_transcription_config,json=speechTranscriptionConfig,proto3" json:"speech_transcription_config,omitempty"`
    // contains filtered or unexported fields
}

func (*VideoContext) Descriptor

func (*VideoContext) Descriptor() ([]byte, []int)

Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.

func (*VideoContext) GetExplicitContentDetectionConfig

func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig

func (*VideoContext) GetLabelDetectionConfig

func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig

func (*VideoContext) GetSegments

func (x *VideoContext) GetSegments() []*VideoSegment

func (*VideoContext) GetShotChangeDetectionConfig

func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig

func (*VideoContext) GetSpeechTranscriptionConfig

func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig

func (*VideoContext) ProtoMessage

func (*VideoContext) ProtoMessage()

func (*VideoContext) ProtoReflect

func (x *VideoContext) ProtoReflect() protoreflect.Message

func (*VideoContext) Reset

func (x *VideoContext) Reset()

func (*VideoContext) String

func (x *VideoContext) String() string

type VideoIntelligenceServiceClient

VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type VideoIntelligenceServiceClient interface {
    // Performs asynchronous video annotation. Progress and results can be
    // retrieved through the `google.longrunning.Operations` interface.
    // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
    // `Operation.response` contains `AnnotateVideoResponse` (results).
    AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

func NewVideoIntelligenceServiceClient

func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClient

type VideoIntelligenceServiceServer

VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service.

type VideoIntelligenceServiceServer interface {
    // Performs asynchronous video annotation. Progress and results can be
    // retrieved through the `google.longrunning.Operations` interface.
    // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
    // `Operation.response` contains `AnnotateVideoResponse` (results).
    AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
}

type VideoSegment

Video segment.

type VideoSegment struct {

    // Time-offset, relative to the beginning of the video,
    // corresponding to the start of the segment (inclusive).
    StartTimeOffset *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
    // Time-offset, relative to the beginning of the video,
    // corresponding to the end of the segment (inclusive).
    EndTimeOffset *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
    // contains filtered or unexported fields
}

func (*VideoSegment) Descriptor

func (*VideoSegment) Descriptor() ([]byte, []int)

Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.

func (*VideoSegment) GetEndTimeOffset

func (x *VideoSegment) GetEndTimeOffset() *durationpb.Duration

func (*VideoSegment) GetStartTimeOffset

func (x *VideoSegment) GetStartTimeOffset() *durationpb.Duration

func (*VideoSegment) ProtoMessage

func (*VideoSegment) ProtoMessage()

func (*VideoSegment) ProtoReflect

func (x *VideoSegment) ProtoReflect() protoreflect.Message

func (*VideoSegment) Reset

func (x *VideoSegment) Reset()

func (*VideoSegment) String

func (x *VideoSegment) String() string

type WordInfo

Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as `enable_word_time_offsets`.

type WordInfo struct {

    // Output only. Time offset relative to the beginning of the audio, and
    // corresponding to the start of the spoken word. This field is only set if
    // `enable_word_time_offsets=true` and only in the top hypothesis. This is an
    // experimental feature and the accuracy of the time offset can vary.
    StartTime *durationpb.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
    // Output only. Time offset relative to the beginning of the audio, and
    // corresponding to the end of the spoken word. This field is only set if
    // `enable_word_time_offsets=true` and only in the top hypothesis. This is an
    // experimental feature and the accuracy of the time offset can vary.
    EndTime *durationpb.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
    // Output only. The word corresponding to this set of information.
    Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
    // contains filtered or unexported fields
}

func (*WordInfo) Descriptor

func (*WordInfo) Descriptor() ([]byte, []int)

Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.

func (*WordInfo) GetEndTime

func (x *WordInfo) GetEndTime() *durationpb.Duration

func (*WordInfo) GetStartTime

func (x *WordInfo) GetStartTime() *durationpb.Duration

func (*WordInfo) GetWord

func (x *WordInfo) GetWord() string

func (*WordInfo) ProtoMessage

func (*WordInfo) ProtoMessage()

func (*WordInfo) ProtoReflect

func (x *WordInfo) ProtoReflect() protoreflect.Message

func (*WordInfo) Reset

func (x *WordInfo) Reset()

func (*WordInfo) String

func (x *WordInfo) String() string