mirror of
https://github.com/genuinetools/reg.git
synced 2024-09-19 00:31:02 -04:00
1484 lines
64 KiB
Go
1484 lines
64 KiB
Go
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||
|
// source: google/cloud/speech/v1beta1/cloud_speech.proto
|
||
|
|
||
|
package speech // import "google.golang.org/genproto/googleapis/cloud/speech/v1beta1"
|
||
|
|
||
|
import proto "github.com/golang/protobuf/proto"
|
||
|
import fmt "fmt"
|
||
|
import math "math"
|
||
|
import _ "github.com/golang/protobuf/ptypes/duration"
|
||
|
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||
|
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||
|
import longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||
|
import status "google.golang.org/genproto/googleapis/rpc/status"
|
||
|
|
||
|
import (
|
||
|
context "golang.org/x/net/context"
|
||
|
grpc "google.golang.org/grpc"
|
||
|
)
|
||
|
|
||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||
|
var _ = proto.Marshal
|
||
|
var _ = fmt.Errorf
|
||
|
var _ = math.Inf
|
||
|
|
||
|
// This is a compile-time assertion to ensure that this generated file
|
||
|
// is compatible with the proto package it is being compiled against.
|
||
|
// A compilation error at this line likely means your copy of the
|
||
|
// proto package needs to be updated.
|
||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||
|
|
||
|
// Audio encoding of the data sent in the audio message. All encodings support
|
||
|
// only 1 channel (mono) audio. Only `FLAC` includes a header that describes
|
||
|
// the bytes of audio that follow the header. The other encodings are raw
|
||
|
// audio bytes with no header.
|
||
|
//
|
||
|
// For best results, the audio source should be captured and transmitted using
|
||
|
// a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
|
||
|
// reduced if lossy codecs (such as AMR, AMR_WB and MULAW) are used to capture
|
||
|
// or transmit the audio, particularly if background noise is present.
|
||
|
type RecognitionConfig_AudioEncoding int32
|
||
|
|
||
|
const (
|
||
|
// Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
|
||
|
RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
|
||
|
// Uncompressed 16-bit signed little-endian samples (Linear PCM).
|
||
|
// This is the only encoding that may be used by `AsyncRecognize`.
|
||
|
RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
|
||
|
// This is the recommended encoding for `SyncRecognize` and
|
||
|
// `StreamingRecognize` because it uses lossless compression; therefore
|
||
|
// recognition accuracy is not compromised by a lossy codec.
|
||
|
//
|
||
|
// The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
|
||
|
// http://flac.sourceforge.net/documentation.html.
|
||
|
// 16-bit and 24-bit samples are supported.
|
||
|
// Not all fields in STREAMINFO are supported.
|
||
|
RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
|
||
|
// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
|
||
|
RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
|
||
|
// Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.
|
||
|
RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
|
||
|
// Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz.
|
||
|
RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
|
||
|
)
|
||
|
|
||
|
var RecognitionConfig_AudioEncoding_name = map[int32]string{
|
||
|
0: "ENCODING_UNSPECIFIED",
|
||
|
1: "LINEAR16",
|
||
|
2: "FLAC",
|
||
|
3: "MULAW",
|
||
|
4: "AMR",
|
||
|
5: "AMR_WB",
|
||
|
}
|
||
|
var RecognitionConfig_AudioEncoding_value = map[string]int32{
|
||
|
"ENCODING_UNSPECIFIED": 0,
|
||
|
"LINEAR16": 1,
|
||
|
"FLAC": 2,
|
||
|
"MULAW": 3,
|
||
|
"AMR": 4,
|
||
|
"AMR_WB": 5,
|
||
|
}
|
||
|
|
||
|
func (x RecognitionConfig_AudioEncoding) String() string {
|
||
|
return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
|
||
|
}
|
||
|
func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{4, 0}
|
||
|
}
|
||
|
|
||
|
// Indicates the type of endpointer event.
|
||
|
type StreamingRecognizeResponse_EndpointerType int32
|
||
|
|
||
|
const (
|
||
|
// No endpointer event specified.
|
||
|
StreamingRecognizeResponse_ENDPOINTER_EVENT_UNSPECIFIED StreamingRecognizeResponse_EndpointerType = 0
|
||
|
// Speech has been detected in the audio stream, and the service is
|
||
|
// beginning to process it.
|
||
|
StreamingRecognizeResponse_START_OF_SPEECH StreamingRecognizeResponse_EndpointerType = 1
|
||
|
// Speech has ceased to be detected in the audio stream. (For example, the
|
||
|
// user may have paused after speaking.) If `single_utterance` is `false`,
|
||
|
// the service will continue to process audio, and if subsequent speech is
|
||
|
// detected, will send another START_OF_SPEECH event.
|
||
|
StreamingRecognizeResponse_END_OF_SPEECH StreamingRecognizeResponse_EndpointerType = 2
|
||
|
// This event is sent after the client has half-closed the input stream gRPC
|
||
|
// connection and the server has received all of the audio. (The server may
|
||
|
// still be processing the audio and may subsequently return additional
|
||
|
// results.)
|
||
|
StreamingRecognizeResponse_END_OF_AUDIO StreamingRecognizeResponse_EndpointerType = 3
|
||
|
// This event is only sent when `single_utterance` is `true`. It indicates
|
||
|
// that the server has detected the end of the user's speech utterance and
|
||
|
// expects no additional speech. Therefore, the server will not process
|
||
|
// additional audio (although it may subsequently return additional
|
||
|
// results). The client should stop sending additional audio data,
|
||
|
// half-close the gRPC connection, and wait for any additional results
|
||
|
// until the server closes the gRPC connection.
|
||
|
StreamingRecognizeResponse_END_OF_UTTERANCE StreamingRecognizeResponse_EndpointerType = 4
|
||
|
)
|
||
|
|
||
|
var StreamingRecognizeResponse_EndpointerType_name = map[int32]string{
|
||
|
0: "ENDPOINTER_EVENT_UNSPECIFIED",
|
||
|
1: "START_OF_SPEECH",
|
||
|
2: "END_OF_SPEECH",
|
||
|
3: "END_OF_AUDIO",
|
||
|
4: "END_OF_UTTERANCE",
|
||
|
}
|
||
|
var StreamingRecognizeResponse_EndpointerType_value = map[string]int32{
|
||
|
"ENDPOINTER_EVENT_UNSPECIFIED": 0,
|
||
|
"START_OF_SPEECH": 1,
|
||
|
"END_OF_SPEECH": 2,
|
||
|
"END_OF_AUDIO": 3,
|
||
|
"END_OF_UTTERANCE": 4,
|
||
|
}
|
||
|
|
||
|
func (x StreamingRecognizeResponse_EndpointerType) String() string {
|
||
|
return proto.EnumName(StreamingRecognizeResponse_EndpointerType_name, int32(x))
|
||
|
}
|
||
|
func (StreamingRecognizeResponse_EndpointerType) EnumDescriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{10, 0}
|
||
|
}
|
||
|
|
||
|
// The top-level message sent by the client for the `SyncRecognize` method.
|
||
|
type SyncRecognizeRequest struct {
|
||
|
// *Required* Provides information to the recognizer that specifies how to
|
||
|
// process the request.
|
||
|
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||
|
// *Required* The audio data to be recognized.
|
||
|
Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *SyncRecognizeRequest) Reset() { *m = SyncRecognizeRequest{} }
|
||
|
func (m *SyncRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*SyncRecognizeRequest) ProtoMessage() {}
|
||
|
func (*SyncRecognizeRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{0}
|
||
|
}
|
||
|
func (m *SyncRecognizeRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_SyncRecognizeRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *SyncRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_SyncRecognizeRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *SyncRecognizeRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_SyncRecognizeRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *SyncRecognizeRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_SyncRecognizeRequest.Size(m)
|
||
|
}
|
||
|
func (m *SyncRecognizeRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_SyncRecognizeRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_SyncRecognizeRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *SyncRecognizeRequest) GetConfig() *RecognitionConfig {
|
||
|
if m != nil {
|
||
|
return m.Config
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *SyncRecognizeRequest) GetAudio() *RecognitionAudio {
|
||
|
if m != nil {
|
||
|
return m.Audio
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// The top-level message sent by the client for the `AsyncRecognize` method.
|
||
|
type AsyncRecognizeRequest struct {
|
||
|
// *Required* Provides information to the recognizer that specifies how to
|
||
|
// process the request.
|
||
|
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||
|
// *Required* The audio data to be recognized.
|
||
|
Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *AsyncRecognizeRequest) Reset() { *m = AsyncRecognizeRequest{} }
|
||
|
func (m *AsyncRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*AsyncRecognizeRequest) ProtoMessage() {}
|
||
|
func (*AsyncRecognizeRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{1}
|
||
|
}
|
||
|
func (m *AsyncRecognizeRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_AsyncRecognizeRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *AsyncRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_AsyncRecognizeRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *AsyncRecognizeRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_AsyncRecognizeRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *AsyncRecognizeRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_AsyncRecognizeRequest.Size(m)
|
||
|
}
|
||
|
func (m *AsyncRecognizeRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_AsyncRecognizeRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_AsyncRecognizeRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *AsyncRecognizeRequest) GetConfig() *RecognitionConfig {
|
||
|
if m != nil {
|
||
|
return m.Config
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *AsyncRecognizeRequest) GetAudio() *RecognitionAudio {
|
||
|
if m != nil {
|
||
|
return m.Audio
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// The top-level message sent by the client for the `StreamingRecognize` method.
|
||
|
// Multiple `StreamingRecognizeRequest` messages are sent. The first message
|
||
|
// must contain a `streaming_config` message and must not contain `audio` data.
|
||
|
// All subsequent messages must contain `audio` data and must not contain a
|
||
|
// `streaming_config` message.
|
||
|
type StreamingRecognizeRequest struct {
|
||
|
// The streaming request, which is either a streaming config or audio content.
|
||
|
//
|
||
|
// Types that are valid to be assigned to StreamingRequest:
|
||
|
// *StreamingRecognizeRequest_StreamingConfig
|
||
|
// *StreamingRecognizeRequest_AudioContent
|
||
|
StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} }
|
||
|
func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*StreamingRecognizeRequest) ProtoMessage() {}
|
||
|
func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{2}
|
||
|
}
|
||
|
func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_StreamingRecognizeRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *StreamingRecognizeRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
|
||
|
}
|
||
|
func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
|
||
|
|
||
|
type isStreamingRecognizeRequest_StreamingRequest interface {
|
||
|
isStreamingRecognizeRequest_StreamingRequest()
|
||
|
}
|
||
|
|
||
|
type StreamingRecognizeRequest_StreamingConfig struct {
|
||
|
StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
|
||
|
}
|
||
|
type StreamingRecognizeRequest_AudioContent struct {
|
||
|
AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
|
||
|
}
|
||
|
|
||
|
func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
|
||
|
func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
|
||
|
|
||
|
func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
|
||
|
if m != nil {
|
||
|
return m.StreamingRequest
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig {
|
||
|
if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok {
|
||
|
return x.StreamingConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeRequest) GetAudioContent() []byte {
|
||
|
if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok {
|
||
|
return x.AudioContent
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||
|
func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||
|
return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{
|
||
|
(*StreamingRecognizeRequest_StreamingConfig)(nil),
|
||
|
(*StreamingRecognizeRequest_AudioContent)(nil),
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||
|
m := msg.(*StreamingRecognizeRequest)
|
||
|
// streaming_request
|
||
|
switch x := m.StreamingRequest.(type) {
|
||
|
case *StreamingRecognizeRequest_StreamingConfig:
|
||
|
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||
|
if err := b.EncodeMessage(x.StreamingConfig); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
case *StreamingRecognizeRequest_AudioContent:
|
||
|
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||
|
b.EncodeRawBytes(x.AudioContent)
|
||
|
case nil:
|
||
|
default:
|
||
|
return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x)
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||
|
m := msg.(*StreamingRecognizeRequest)
|
||
|
switch tag {
|
||
|
case 1: // streaming_request.streaming_config
|
||
|
if wire != proto.WireBytes {
|
||
|
return true, proto.ErrInternalBadWireType
|
||
|
}
|
||
|
msg := new(StreamingRecognitionConfig)
|
||
|
err := b.DecodeMessage(msg)
|
||
|
m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg}
|
||
|
return true, err
|
||
|
case 2: // streaming_request.audio_content
|
||
|
if wire != proto.WireBytes {
|
||
|
return true, proto.ErrInternalBadWireType
|
||
|
}
|
||
|
x, err := b.DecodeRawBytes(true)
|
||
|
m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x}
|
||
|
return true, err
|
||
|
default:
|
||
|
return false, nil
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
|
||
|
m := msg.(*StreamingRecognizeRequest)
|
||
|
// streaming_request
|
||
|
switch x := m.StreamingRequest.(type) {
|
||
|
case *StreamingRecognizeRequest_StreamingConfig:
|
||
|
s := proto.Size(x.StreamingConfig)
|
||
|
n += 1 // tag and wire
|
||
|
n += proto.SizeVarint(uint64(s))
|
||
|
n += s
|
||
|
case *StreamingRecognizeRequest_AudioContent:
|
||
|
n += 1 // tag and wire
|
||
|
n += proto.SizeVarint(uint64(len(x.AudioContent)))
|
||
|
n += len(x.AudioContent)
|
||
|
case nil:
|
||
|
default:
|
||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||
|
}
|
||
|
return n
|
||
|
}
|
||
|
|
||
|
// Provides information to the recognizer that specifies how to process the
|
||
|
// request.
|
||
|
type StreamingRecognitionConfig struct {
|
||
|
// *Required* Provides information to the recognizer that specifies how to
|
||
|
// process the request.
|
||
|
Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
|
||
|
// *Optional* If `false` or omitted, the recognizer will perform continuous
|
||
|
// recognition (continuing to wait for and process audio even if the user
|
||
|
// pauses speaking) until the client closes the input stream (gRPC API) or
|
||
|
// until the maximum time limit has been reached. May return multiple
|
||
|
// `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
|
||
|
//
|
||
|
// If `true`, the recognizer will detect a single spoken utterance. When it
|
||
|
// detects that the user has paused or stopped speaking, it will return an
|
||
|
// `END_OF_UTTERANCE` event and cease recognition. It will return no more than
|
||
|
// one `StreamingRecognitionResult` with the `is_final` flag set to `true`.
|
||
|
SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
|
||
|
// *Optional* If `true`, interim results (tentative hypotheses) may be
|
||
|
// returned as they become available (these interim results are indicated with
|
||
|
// the `is_final=false` flag).
|
||
|
// If `false` or omitted, only `is_final=true` result(s) are returned.
|
||
|
InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} }
|
||
|
func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*StreamingRecognitionConfig) ProtoMessage() {}
|
||
|
func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{3}
|
||
|
}
|
||
|
func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_StreamingRecognitionConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *StreamingRecognitionConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
|
||
|
}
|
||
|
func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
|
||
|
if m != nil {
|
||
|
return m.Config
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognitionConfig) GetSingleUtterance() bool {
|
||
|
if m != nil {
|
||
|
return m.SingleUtterance
|
||
|
}
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognitionConfig) GetInterimResults() bool {
|
||
|
if m != nil {
|
||
|
return m.InterimResults
|
||
|
}
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
// Provides information to the recognizer that specifies how to process the
|
||
|
// request.
|
||
|
type RecognitionConfig struct {
|
||
|
// *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
|
||
|
Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
|
||
|
// *Required* Sample rate in Hertz of the audio data sent in all
|
||
|
// `RecognitionAudio` messages. Valid values are: 8000-48000.
|
||
|
// 16000 is optimal. For best results, set the sampling rate of the audio
|
||
|
// source to 16000 Hz. If that's not possible, use the native sample rate of
|
||
|
// the audio source (instead of re-sampling).
|
||
|
SampleRate int32 `protobuf:"varint,2,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"`
|
||
|
// *Optional* The language of the supplied audio as a BCP-47 language tag.
|
||
|
// Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt
|
||
|
// If omitted, defaults to "en-US". See
|
||
|
// [Language Support](https://cloud.google.com/speech/docs/languages)
|
||
|
// for a list of the currently supported language codes.
|
||
|
LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
|
||
|
// *Optional* Maximum number of recognition hypotheses to be returned.
|
||
|
// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
||
|
// within each `SpeechRecognitionResult`.
|
||
|
// The server may return fewer than `max_alternatives`.
|
||
|
// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
|
||
|
// one. If omitted, will return a maximum of one.
|
||
|
MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
|
||
|
// *Optional* If set to `true`, the server will attempt to filter out
|
||
|
// profanities, replacing all but the initial character in each filtered word
|
||
|
// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
|
||
|
// won't be filtered out.
|
||
|
ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
|
||
|
// *Optional* A means to provide context to assist the speech recognition.
|
||
|
SpeechContext *SpeechContext `protobuf:"bytes,6,opt,name=speech_context,json=speechContext,proto3" json:"speech_context,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} }
|
||
|
func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*RecognitionConfig) ProtoMessage() {}
|
||
|
func (*RecognitionConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{4}
|
||
|
}
|
||
|
func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *RecognitionConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_RecognitionConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *RecognitionConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_RecognitionConfig.Size(m)
|
||
|
}
|
||
|
func (m *RecognitionConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
|
||
|
if m != nil {
|
||
|
return m.Encoding
|
||
|
}
|
||
|
return RecognitionConfig_ENCODING_UNSPECIFIED
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionConfig) GetSampleRate() int32 {
|
||
|
if m != nil {
|
||
|
return m.SampleRate
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionConfig) GetLanguageCode() string {
|
||
|
if m != nil {
|
||
|
return m.LanguageCode
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionConfig) GetMaxAlternatives() int32 {
|
||
|
if m != nil {
|
||
|
return m.MaxAlternatives
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionConfig) GetProfanityFilter() bool {
|
||
|
if m != nil {
|
||
|
return m.ProfanityFilter
|
||
|
}
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionConfig) GetSpeechContext() *SpeechContext {
|
||
|
if m != nil {
|
||
|
return m.SpeechContext
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Provides "hints" to the speech recognizer to favor specific words and phrases
|
||
|
// in the results.
|
||
|
type SpeechContext struct {
|
||
|
// *Optional* A list of strings containing words and phrases "hints" so that
|
||
|
// the speech recognition is more likely to recognize them. This can be used
|
||
|
// to improve the accuracy for specific words and phrases, for example, if
|
||
|
// specific commands are typically spoken by the user. This can also be used
|
||
|
// to add additional words to the vocabulary of the recognizer. See
|
||
|
// [usage limits](https://cloud.google.com/speech/limits#content).
|
||
|
Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *SpeechContext) Reset() { *m = SpeechContext{} }
|
||
|
func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
|
||
|
func (*SpeechContext) ProtoMessage() {}
|
||
|
func (*SpeechContext) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{5}
|
||
|
}
|
||
|
func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *SpeechContext) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_SpeechContext.Merge(dst, src)
|
||
|
}
|
||
|
func (m *SpeechContext) XXX_Size() int {
|
||
|
return xxx_messageInfo_SpeechContext.Size(m)
|
||
|
}
|
||
|
func (m *SpeechContext) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_SpeechContext.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
|
||
|
|
||
|
func (m *SpeechContext) GetPhrases() []string {
|
||
|
if m != nil {
|
||
|
return m.Phrases
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Contains audio data in the encoding specified in the `RecognitionConfig`.
|
||
|
// Either `content` or `uri` must be supplied. Supplying both or neither
|
||
|
// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
|
||
|
// [audio limits](https://cloud.google.com/speech/limits#content).
|
||
|
type RecognitionAudio struct {
|
||
|
// The audio source, which is either inline content or a GCS uri.
|
||
|
//
|
||
|
// Types that are valid to be assigned to AudioSource:
|
||
|
// *RecognitionAudio_Content
|
||
|
// *RecognitionAudio_Uri
|
||
|
AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} }
|
||
|
func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
|
||
|
func (*RecognitionAudio) ProtoMessage() {}
|
||
|
func (*RecognitionAudio) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{6}
|
||
|
}
|
||
|
func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *RecognitionAudio) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_RecognitionAudio.Merge(dst, src)
|
||
|
}
|
||
|
func (m *RecognitionAudio) XXX_Size() int {
|
||
|
return xxx_messageInfo_RecognitionAudio.Size(m)
|
||
|
}
|
||
|
func (m *RecognitionAudio) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
|
||
|
|
||
|
type isRecognitionAudio_AudioSource interface {
|
||
|
isRecognitionAudio_AudioSource()
|
||
|
}
|
||
|
|
||
|
type RecognitionAudio_Content struct {
|
||
|
Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
|
||
|
}
|
||
|
type RecognitionAudio_Uri struct {
|
||
|
Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
|
||
|
}
|
||
|
|
||
|
func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
|
||
|
func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
|
||
|
|
||
|
func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
|
||
|
if m != nil {
|
||
|
return m.AudioSource
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionAudio) GetContent() []byte {
|
||
|
if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok {
|
||
|
return x.Content
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *RecognitionAudio) GetUri() string {
|
||
|
if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok {
|
||
|
return x.Uri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||
|
func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||
|
return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{
|
||
|
(*RecognitionAudio_Content)(nil),
|
||
|
(*RecognitionAudio_Uri)(nil),
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||
|
m := msg.(*RecognitionAudio)
|
||
|
// audio_source
|
||
|
switch x := m.AudioSource.(type) {
|
||
|
case *RecognitionAudio_Content:
|
||
|
b.EncodeVarint(1<<3 | proto.WireBytes)
|
||
|
b.EncodeRawBytes(x.Content)
|
||
|
case *RecognitionAudio_Uri:
|
||
|
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||
|
b.EncodeStringBytes(x.Uri)
|
||
|
case nil:
|
||
|
default:
|
||
|
return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x)
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||
|
m := msg.(*RecognitionAudio)
|
||
|
switch tag {
|
||
|
case 1: // audio_source.content
|
||
|
if wire != proto.WireBytes {
|
||
|
return true, proto.ErrInternalBadWireType
|
||
|
}
|
||
|
x, err := b.DecodeRawBytes(true)
|
||
|
m.AudioSource = &RecognitionAudio_Content{x}
|
||
|
return true, err
|
||
|
case 2: // audio_source.uri
|
||
|
if wire != proto.WireBytes {
|
||
|
return true, proto.ErrInternalBadWireType
|
||
|
}
|
||
|
x, err := b.DecodeStringBytes()
|
||
|
m.AudioSource = &RecognitionAudio_Uri{x}
|
||
|
return true, err
|
||
|
default:
|
||
|
return false, nil
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
|
||
|
m := msg.(*RecognitionAudio)
|
||
|
// audio_source
|
||
|
switch x := m.AudioSource.(type) {
|
||
|
case *RecognitionAudio_Content:
|
||
|
n += 1 // tag and wire
|
||
|
n += proto.SizeVarint(uint64(len(x.Content)))
|
||
|
n += len(x.Content)
|
||
|
case *RecognitionAudio_Uri:
|
||
|
n += 1 // tag and wire
|
||
|
n += proto.SizeVarint(uint64(len(x.Uri)))
|
||
|
n += len(x.Uri)
|
||
|
case nil:
|
||
|
default:
|
||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||
|
}
|
||
|
return n
|
||
|
}
|
||
|
|
||
|
// The only message returned to the client by `SyncRecognize`. method. It
|
||
|
// contains the result as zero or more sequential `SpeechRecognitionResult`
|
||
|
// messages.
|
||
|
type SyncRecognizeResponse struct {
|
||
|
// *Output-only* Sequential list of transcription results corresponding to
|
||
|
// sequential portions of audio.
|
||
|
Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *SyncRecognizeResponse) Reset() { *m = SyncRecognizeResponse{} }
|
||
|
func (m *SyncRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||
|
func (*SyncRecognizeResponse) ProtoMessage() {}
|
||
|
func (*SyncRecognizeResponse) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{7}
|
||
|
}
|
||
|
func (m *SyncRecognizeResponse) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_SyncRecognizeResponse.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *SyncRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_SyncRecognizeResponse.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *SyncRecognizeResponse) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_SyncRecognizeResponse.Merge(dst, src)
|
||
|
}
|
||
|
func (m *SyncRecognizeResponse) XXX_Size() int {
|
||
|
return xxx_messageInfo_SyncRecognizeResponse.Size(m)
|
||
|
}
|
||
|
func (m *SyncRecognizeResponse) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_SyncRecognizeResponse.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_SyncRecognizeResponse proto.InternalMessageInfo
|
||
|
|
||
|
func (m *SyncRecognizeResponse) GetResults() []*SpeechRecognitionResult {
|
||
|
if m != nil {
|
||
|
return m.Results
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// The only message returned to the client by `AsyncRecognize`. It contains the
|
||
|
// result as zero or more sequential `SpeechRecognitionResult` messages. It is
|
||
|
// included in the `result.response` field of the `Operation` returned by the
|
||
|
// `GetOperation` call of the `google::longrunning::Operations` service.
|
||
|
type AsyncRecognizeResponse struct {
|
||
|
// *Output-only* Sequential list of transcription results corresponding to
|
||
|
// sequential portions of audio.
|
||
|
Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *AsyncRecognizeResponse) Reset() { *m = AsyncRecognizeResponse{} }
|
||
|
func (m *AsyncRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||
|
func (*AsyncRecognizeResponse) ProtoMessage() {}
|
||
|
func (*AsyncRecognizeResponse) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{8}
|
||
|
}
|
||
|
func (m *AsyncRecognizeResponse) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_AsyncRecognizeResponse.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *AsyncRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_AsyncRecognizeResponse.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *AsyncRecognizeResponse) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_AsyncRecognizeResponse.Merge(dst, src)
|
||
|
}
|
||
|
func (m *AsyncRecognizeResponse) XXX_Size() int {
|
||
|
return xxx_messageInfo_AsyncRecognizeResponse.Size(m)
|
||
|
}
|
||
|
func (m *AsyncRecognizeResponse) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_AsyncRecognizeResponse.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_AsyncRecognizeResponse proto.InternalMessageInfo
|
||
|
|
||
|
func (m *AsyncRecognizeResponse) GetResults() []*SpeechRecognitionResult {
|
||
|
if m != nil {
|
||
|
return m.Results
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Describes the progress of a long-running `AsyncRecognize` call. It is
|
||
|
// included in the `metadata` field of the `Operation` returned by the
|
||
|
// `GetOperation` call of the `google::longrunning::Operations` service.
|
||
|
type AsyncRecognizeMetadata struct {
|
||
|
// Approximate percentage of audio processed thus far. Guaranteed to be 100
|
||
|
// when the audio is fully processed and the results are available.
|
||
|
ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
|
||
|
// Time when the request was received.
|
||
|
StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
|
||
|
// Time of the most recent processing update.
|
||
|
LastUpdateTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *AsyncRecognizeMetadata) Reset() { *m = AsyncRecognizeMetadata{} }
|
||
|
func (m *AsyncRecognizeMetadata) String() string { return proto.CompactTextString(m) }
|
||
|
func (*AsyncRecognizeMetadata) ProtoMessage() {}
|
||
|
func (*AsyncRecognizeMetadata) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{9}
|
||
|
}
|
||
|
func (m *AsyncRecognizeMetadata) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_AsyncRecognizeMetadata.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *AsyncRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_AsyncRecognizeMetadata.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *AsyncRecognizeMetadata) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_AsyncRecognizeMetadata.Merge(dst, src)
|
||
|
}
|
||
|
func (m *AsyncRecognizeMetadata) XXX_Size() int {
|
||
|
return xxx_messageInfo_AsyncRecognizeMetadata.Size(m)
|
||
|
}
|
||
|
func (m *AsyncRecognizeMetadata) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_AsyncRecognizeMetadata.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_AsyncRecognizeMetadata proto.InternalMessageInfo
|
||
|
|
||
|
func (m *AsyncRecognizeMetadata) GetProgressPercent() int32 {
|
||
|
if m != nil {
|
||
|
return m.ProgressPercent
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *AsyncRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
|
||
|
if m != nil {
|
||
|
return m.StartTime
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *AsyncRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
|
||
|
if m != nil {
|
||
|
return m.LastUpdateTime
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// `StreamingRecognizeResponse` is the only message returned to the client by
|
||
|
// `StreamingRecognize`. A series of one or more `StreamingRecognizeResponse`
|
||
|
// messages are streamed back to the client.
|
||
|
//
|
||
|
// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
|
||
|
// be returned while processing audio:
|
||
|
//
|
||
|
// 1. endpointer_type: START_OF_SPEECH
|
||
|
//
|
||
|
// 2. results { alternatives { transcript: "tube" } stability: 0.01 }
|
||
|
// result_index: 0
|
||
|
//
|
||
|
// 3. results { alternatives { transcript: "to be a" } stability: 0.01 }
|
||
|
// result_index: 0
|
||
|
//
|
||
|
// 4. results { alternatives { transcript: "to be" } stability: 0.9 }
|
||
|
// results { alternatives { transcript: " or not to be" } stability: 0.01 }
|
||
|
// result_index: 0
|
||
|
//
|
||
|
// 5. results { alternatives { transcript: "to be or not to be"
|
||
|
// confidence: 0.92 }
|
||
|
// alternatives { transcript: "to bee or not to bee" }
|
||
|
// is_final: true }
|
||
|
// result_index: 0
|
||
|
//
|
||
|
// 6. results { alternatives { transcript: " that's" } stability: 0.01 }
|
||
|
// result_index: 1
|
||
|
//
|
||
|
// 7. results { alternatives { transcript: " that is" } stability: 0.9 }
|
||
|
// results { alternatives { transcript: " the question" } stability: 0.01 }
|
||
|
// result_index: 1
|
||
|
//
|
||
|
// 8. endpointer_type: END_OF_SPEECH
|
||
|
//
|
||
|
// 9. results { alternatives { transcript: " that is the question"
|
||
|
// confidence: 0.98 }
|
||
|
// alternatives { transcript: " that was the question" }
|
||
|
// is_final: true }
|
||
|
// result_index: 1
|
||
|
//
|
||
|
// 10. endpointer_type: END_OF_AUDIO
|
||
|
//
|
||
|
// Notes:
|
||
|
//
|
||
|
// - Only two of the above responses #5 and #9 contain final results, they are
|
||
|
// indicated by `is_final: true`. Concatenating these together generates the
|
||
|
// full transcript: "to be or not to be that is the question".
|
||
|
//
|
||
|
// - The others contain interim `results`. #4 and #7 contain two interim
|
||
|
// `results`, the first portion has a high stability and is less likely to
|
||
|
// change, the second portion has a low stability and is very likely to
|
||
|
// change. A UI designer might choose to show only high stability `results`.
|
||
|
//
|
||
|
// - The specific `stability` and `confidence` values shown above are only for
|
||
|
// illustrative purposes. Actual values may vary.
|
||
|
//
|
||
|
// - The `result_index` indicates the portion of audio that has had final
|
||
|
// results returned, and is no longer being processed. For example, the
|
||
|
// `results` in #6 and later correspond to the portion of audio after
|
||
|
// "to be or not to be".
|
||
|
type StreamingRecognizeResponse struct {
|
||
|
// *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status] message that
|
||
|
// specifies the error for the operation.
|
||
|
Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
||
|
// *Output-only* This repeated list contains zero or more results that
|
||
|
// correspond to consecutive portions of the audio currently being processed.
|
||
|
// It contains zero or one `is_final=true` result (the newly settled portion),
|
||
|
// followed by zero or more `is_final=false` results.
|
||
|
Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
|
||
|
// *Output-only* Indicates the lowest index in the `results` array that has
|
||
|
// changed. The repeated `StreamingRecognitionResult` results overwrite past
|
||
|
// results at this index and higher.
|
||
|
ResultIndex int32 `protobuf:"varint,3,opt,name=result_index,json=resultIndex,proto3" json:"result_index,omitempty"`
|
||
|
// *Output-only* Indicates the type of endpointer event.
|
||
|
EndpointerType StreamingRecognizeResponse_EndpointerType `protobuf:"varint,4,opt,name=endpointer_type,json=endpointerType,proto3,enum=google.cloud.speech.v1beta1.StreamingRecognizeResponse_EndpointerType" json:"endpointer_type,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} }
|
||
|
func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
|
||
|
func (*StreamingRecognizeResponse) ProtoMessage() {}
|
||
|
func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{10}
|
||
|
}
|
||
|
func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_StreamingRecognizeResponse.Merge(dst, src)
|
||
|
}
|
||
|
func (m *StreamingRecognizeResponse) XXX_Size() int {
|
||
|
return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
|
||
|
}
|
||
|
func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
|
||
|
|
||
|
func (m *StreamingRecognizeResponse) GetError() *status.Status {
|
||
|
if m != nil {
|
||
|
return m.Error
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult {
|
||
|
if m != nil {
|
||
|
return m.Results
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeResponse) GetResultIndex() int32 {
|
||
|
if m != nil {
|
||
|
return m.ResultIndex
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognizeResponse) GetEndpointerType() StreamingRecognizeResponse_EndpointerType {
|
||
|
if m != nil {
|
||
|
return m.EndpointerType
|
||
|
}
|
||
|
return StreamingRecognizeResponse_ENDPOINTER_EVENT_UNSPECIFIED
|
||
|
}
|
||
|
|
||
|
// A streaming speech recognition result corresponding to a portion of the audio
|
||
|
// that is currently being processed.
|
||
|
type StreamingRecognitionResult struct {
|
||
|
// *Output-only* May contain one or more recognition hypotheses (up to the
|
||
|
// maximum specified in `max_alternatives`).
|
||
|
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
|
||
|
// *Output-only* If `false`, this `StreamingRecognitionResult` represents an
|
||
|
// interim result that may change. If `true`, this is the final time the
|
||
|
// speech service will return this particular `StreamingRecognitionResult`,
|
||
|
// the recognizer will not return any further hypotheses for this portion of
|
||
|
// the transcript and corresponding audio.
|
||
|
IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
|
||
|
// *Output-only* An estimate of the likelihood that the recognizer will not
|
||
|
// change its guess about this interim result. Values range from 0.0
|
||
|
// (completely unstable) to 1.0 (completely stable).
|
||
|
// This field is only provided for interim results (`is_final=false`).
|
||
|
// The default of 0.0 is a sentinel value indicating `stability` was not set.
|
||
|
Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} }
|
||
|
func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
|
||
|
func (*StreamingRecognitionResult) ProtoMessage() {}
|
||
|
func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{11}
|
||
|
}
|
||
|
func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_StreamingRecognitionResult.Merge(dst, src)
|
||
|
}
|
||
|
func (m *StreamingRecognitionResult) XXX_Size() int {
|
||
|
return xxx_messageInfo_StreamingRecognitionResult.Size(m)
|
||
|
}
|
||
|
func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
|
||
|
|
||
|
func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
|
||
|
if m != nil {
|
||
|
return m.Alternatives
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognitionResult) GetIsFinal() bool {
|
||
|
if m != nil {
|
||
|
return m.IsFinal
|
||
|
}
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
func (m *StreamingRecognitionResult) GetStability() float32 {
|
||
|
if m != nil {
|
||
|
return m.Stability
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
// A speech recognition result corresponding to a portion of the audio.
|
||
|
type SpeechRecognitionResult struct {
|
||
|
// *Output-only* May contain one or more recognition hypotheses (up to the
|
||
|
// maximum specified in `max_alternatives`).
|
||
|
Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} }
|
||
|
func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
|
||
|
func (*SpeechRecognitionResult) ProtoMessage() {}
|
||
|
func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{12}
|
||
|
}
|
||
|
func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_SpeechRecognitionResult.Merge(dst, src)
|
||
|
}
|
||
|
func (m *SpeechRecognitionResult) XXX_Size() int {
|
||
|
return xxx_messageInfo_SpeechRecognitionResult.Size(m)
|
||
|
}
|
||
|
func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
|
||
|
|
||
|
func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
|
||
|
if m != nil {
|
||
|
return m.Alternatives
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Alternative hypotheses (a.k.a. n-best list).
|
||
|
type SpeechRecognitionAlternative struct {
|
||
|
// *Output-only* Transcript text representing the words that the user spoke.
|
||
|
Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
|
||
|
// *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
|
||
|
// indicates an estimated greater likelihood that the recognized words are
|
||
|
// correct. This field is typically provided only for the top hypothesis, and
|
||
|
// only for `is_final=true` results. Clients should not rely on the
|
||
|
// `confidence` field as it is not guaranteed to be accurate, or even set, in
|
||
|
// any of the results.
|
||
|
// The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
||
|
Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} }
|
||
|
func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
|
||
|
func (*SpeechRecognitionAlternative) ProtoMessage() {}
|
||
|
func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_cloud_speech_8234301300f3be52, []int{13}
|
||
|
}
|
||
|
func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_SpeechRecognitionAlternative.Merge(dst, src)
|
||
|
}
|
||
|
func (m *SpeechRecognitionAlternative) XXX_Size() int {
|
||
|
return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
|
||
|
}
|
||
|
func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
|
||
|
|
||
|
func (m *SpeechRecognitionAlternative) GetTranscript() string {
|
||
|
if m != nil {
|
||
|
return m.Transcript
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
|
||
|
if m != nil {
|
||
|
return m.Confidence
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func init() {
|
||
|
proto.RegisterType((*SyncRecognizeRequest)(nil), "google.cloud.speech.v1beta1.SyncRecognizeRequest")
|
||
|
proto.RegisterType((*AsyncRecognizeRequest)(nil), "google.cloud.speech.v1beta1.AsyncRecognizeRequest")
|
||
|
proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1beta1.StreamingRecognizeRequest")
|
||
|
proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1beta1.StreamingRecognitionConfig")
|
||
|
proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1beta1.RecognitionConfig")
|
||
|
proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1beta1.SpeechContext")
|
||
|
proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1beta1.RecognitionAudio")
|
||
|
proto.RegisterType((*SyncRecognizeResponse)(nil), "google.cloud.speech.v1beta1.SyncRecognizeResponse")
|
||
|
proto.RegisterType((*AsyncRecognizeResponse)(nil), "google.cloud.speech.v1beta1.AsyncRecognizeResponse")
|
||
|
proto.RegisterType((*AsyncRecognizeMetadata)(nil), "google.cloud.speech.v1beta1.AsyncRecognizeMetadata")
|
||
|
proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1beta1.StreamingRecognizeResponse")
|
||
|
proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1beta1.StreamingRecognitionResult")
|
||
|
proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1beta1.SpeechRecognitionResult")
|
||
|
proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1beta1.SpeechRecognitionAlternative")
|
||
|
proto.RegisterEnum("google.cloud.speech.v1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value)
|
||
|
proto.RegisterEnum("google.cloud.speech.v1beta1.StreamingRecognizeResponse_EndpointerType", StreamingRecognizeResponse_EndpointerType_name, StreamingRecognizeResponse_EndpointerType_value)
|
||
|
}
|
||
|
|
||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||
|
var _ context.Context
|
||
|
var _ grpc.ClientConn
|
||
|
|
||
|
// This is a compile-time assertion to ensure that this generated file
|
||
|
// is compatible with the grpc package it is being compiled against.
|
||
|
const _ = grpc.SupportPackageIsVersion4
|
||
|
|
||
|
// SpeechClient is the client API for Speech service.
|
||
|
//
|
||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||
|
type SpeechClient interface {
|
||
|
// Performs synchronous speech recognition: receive results after all audio
|
||
|
// has been sent and processed.
|
||
|
SyncRecognize(ctx context.Context, in *SyncRecognizeRequest, opts ...grpc.CallOption) (*SyncRecognizeResponse, error)
|
||
|
// Performs asynchronous speech recognition: receive results via the
|
||
|
// [google.longrunning.Operations]
|
||
|
// (/speech/reference/rest/v1beta1/operations#Operation)
|
||
|
// interface. Returns either an
|
||
|
// `Operation.error` or an `Operation.response` which contains
|
||
|
// an `AsyncRecognizeResponse` message.
|
||
|
AsyncRecognize(ctx context.Context, in *AsyncRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||
|
// Performs bidirectional streaming speech recognition: receive results while
|
||
|
// sending audio. This method is only available via the gRPC API (not REST).
|
||
|
StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
|
||
|
}
|
||
|
|
||
|
type speechClient struct {
|
||
|
cc *grpc.ClientConn
|
||
|
}
|
||
|
|
||
|
func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
|
||
|
return &speechClient{cc}
|
||
|
}
|
||
|
|
||
|
func (c *speechClient) SyncRecognize(ctx context.Context, in *SyncRecognizeRequest, opts ...grpc.CallOption) (*SyncRecognizeResponse, error) {
|
||
|
out := new(SyncRecognizeResponse)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.speech.v1beta1.Speech/SyncRecognize", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *speechClient) AsyncRecognize(ctx context.Context, in *AsyncRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||
|
out := new(longrunning.Operation)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.speech.v1beta1.Speech/AsyncRecognize", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
|
||
|
stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1beta1.Speech/StreamingRecognize", opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
x := &speechStreamingRecognizeClient{stream}
|
||
|
return x, nil
|
||
|
}
|
||
|
|
||
|
type Speech_StreamingRecognizeClient interface {
|
||
|
Send(*StreamingRecognizeRequest) error
|
||
|
Recv() (*StreamingRecognizeResponse, error)
|
||
|
grpc.ClientStream
|
||
|
}
|
||
|
|
||
|
type speechStreamingRecognizeClient struct {
|
||
|
grpc.ClientStream
|
||
|
}
|
||
|
|
||
|
func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error {
|
||
|
return x.ClientStream.SendMsg(m)
|
||
|
}
|
||
|
|
||
|
func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) {
|
||
|
m := new(StreamingRecognizeResponse)
|
||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return m, nil
|
||
|
}
|
||
|
|
||
|
// SpeechServer is the server API for Speech service.
|
||
|
type SpeechServer interface {
|
||
|
// Performs synchronous speech recognition: receive results after all audio
|
||
|
// has been sent and processed.
|
||
|
SyncRecognize(context.Context, *SyncRecognizeRequest) (*SyncRecognizeResponse, error)
|
||
|
// Performs asynchronous speech recognition: receive results via the
|
||
|
// [google.longrunning.Operations]
|
||
|
// (/speech/reference/rest/v1beta1/operations#Operation)
|
||
|
// interface. Returns either an
|
||
|
// `Operation.error` or an `Operation.response` which contains
|
||
|
// an `AsyncRecognizeResponse` message.
|
||
|
AsyncRecognize(context.Context, *AsyncRecognizeRequest) (*longrunning.Operation, error)
|
||
|
// Performs bidirectional streaming speech recognition: receive results while
|
||
|
// sending audio. This method is only available via the gRPC API (not REST).
|
||
|
StreamingRecognize(Speech_StreamingRecognizeServer) error
|
||
|
}
|
||
|
|
||
|
func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) {
|
||
|
s.RegisterService(&_Speech_serviceDesc, srv)
|
||
|
}
|
||
|
|
||
|
func _Speech_SyncRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(SyncRecognizeRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(SpeechServer).SyncRecognize(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.speech.v1beta1.Speech/SyncRecognize",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(SpeechServer).SyncRecognize(ctx, req.(*SyncRecognizeRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _Speech_AsyncRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(AsyncRecognizeRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(SpeechServer).AsyncRecognize(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.speech.v1beta1.Speech/AsyncRecognize",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(SpeechServer).AsyncRecognize(ctx, req.(*AsyncRecognizeRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||
|
return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream})
|
||
|
}
|
||
|
|
||
|
type Speech_StreamingRecognizeServer interface {
|
||
|
Send(*StreamingRecognizeResponse) error
|
||
|
Recv() (*StreamingRecognizeRequest, error)
|
||
|
grpc.ServerStream
|
||
|
}
|
||
|
|
||
|
type speechStreamingRecognizeServer struct {
|
||
|
grpc.ServerStream
|
||
|
}
|
||
|
|
||
|
func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error {
|
||
|
return x.ServerStream.SendMsg(m)
|
||
|
}
|
||
|
|
||
|
func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) {
|
||
|
m := new(StreamingRecognizeRequest)
|
||
|
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return m, nil
|
||
|
}
|
||
|
|
||
|
var _Speech_serviceDesc = grpc.ServiceDesc{
|
||
|
ServiceName: "google.cloud.speech.v1beta1.Speech",
|
||
|
HandlerType: (*SpeechServer)(nil),
|
||
|
Methods: []grpc.MethodDesc{
|
||
|
{
|
||
|
MethodName: "SyncRecognize",
|
||
|
Handler: _Speech_SyncRecognize_Handler,
|
||
|
},
|
||
|
{
|
||
|
MethodName: "AsyncRecognize",
|
||
|
Handler: _Speech_AsyncRecognize_Handler,
|
||
|
},
|
||
|
},
|
||
|
Streams: []grpc.StreamDesc{
|
||
|
{
|
||
|
StreamName: "StreamingRecognize",
|
||
|
Handler: _Speech_StreamingRecognize_Handler,
|
||
|
ServerStreams: true,
|
||
|
ClientStreams: true,
|
||
|
},
|
||
|
},
|
||
|
Metadata: "google/cloud/speech/v1beta1/cloud_speech.proto",
|
||
|
}
|
||
|
|
||
|
func init() {
|
||
|
proto.RegisterFile("google/cloud/speech/v1beta1/cloud_speech.proto", fileDescriptor_cloud_speech_8234301300f3be52)
|
||
|
}
|
||
|
|
||
|
var fileDescriptor_cloud_speech_8234301300f3be52 = []byte{
|
||
|
// 1214 bytes of a gzipped FileDescriptorProto
|
||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4d, 0x6f, 0x1b, 0xc5,
|
||
|
0x1b, 0xcf, 0xda, 0x71, 0x5e, 0x9e, 0xd8, 0xce, 0x76, 0xda, 0xfe, 0xeb, 0xfa, 0x9f, 0xb6, 0x61,
|
||
|
0x2b, 0x44, 0x5a, 0x89, 0x35, 0x09, 0xa8, 0x55, 0x0b, 0x17, 0xc7, 0x59, 0x13, 0x4b, 0x8d, 0x93,
|
||
|
0x4e, 0x1c, 0x8a, 0x90, 0x60, 0x35, 0x59, 0x4f, 0xb6, 0x2b, 0xd9, 0x33, 0xcb, 0xcc, 0x6c, 0x95,
|
||
|
0x70, 0xec, 0x8d, 0x0b, 0x17, 0xbe, 0x00, 0x12, 0x7c, 0x02, 0xc4, 0x81, 0x0b, 0xe2, 0xc2, 0x81,
|
||
|
0x3b, 0x5f, 0x81, 0x0f, 0x82, 0x76, 0x66, 0x37, 0xb1, 0x9d, 0xc6, 0x34, 0x88, 0x4a, 0xdc, 0xfc,
|
||
|
0xfc, 0x9e, 0x97, 0xf9, 0xed, 0x33, 0xcf, 0xcb, 0x18, 0xdc, 0x90, 0xf3, 0x70, 0x40, 0x1b, 0xc1,
|
||
|
0x80, 0x27, 0xfd, 0x86, 0x8c, 0x29, 0x0d, 0x9e, 0x37, 0x5e, 0xac, 0x1f, 0x52, 0x45, 0xd6, 0x0d,
|
||
|
0xe8, 0x1b, 0xd0, 0x8d, 0x05, 0x57, 0x1c, 0xfd, 0xdf, 0xd8, 0xbb, 0x5a, 0xe5, 0x66, 0xaa, 0xcc,
|
||
|
0xbe, 0xbe, 0x92, 0x05, 0x23, 0x71, 0xd4, 0x20, 0x8c, 0x71, 0x45, 0x54, 0xc4, 0x99, 0x34, 0xae,
|
||
|
0xf5, 0xbb, 0x99, 0x76, 0xc0, 0x59, 0x28, 0x12, 0xc6, 0x22, 0x16, 0x36, 0x78, 0x4c, 0xc5, 0x98,
|
||
|
0xd1, 0xed, 0xcc, 0x48, 0x4b, 0x87, 0xc9, 0x51, 0xa3, 0x9f, 0x18, 0x83, 0x4c, 0x7f, 0x67, 0x52,
|
||
|
0xaf, 0xa2, 0x21, 0x95, 0x8a, 0x0c, 0xe3, 0xcc, 0xe0, 0x46, 0x66, 0x20, 0xe2, 0xa0, 0x21, 0x15,
|
||
|
0x51, 0x49, 0x16, 0xd9, 0xf9, 0xde, 0x82, 0x6b, 0xfb, 0x27, 0x2c, 0xc0, 0x34, 0xe0, 0x21, 0x8b,
|
||
|
0xbe, 0xa2, 0x98, 0x7e, 0x99, 0x50, 0xa9, 0x50, 0x1b, 0xe6, 0x02, 0xce, 0x8e, 0xa2, 0xb0, 0x66,
|
||
|
0xad, 0x5a, 0x6b, 0x4b, 0x1b, 0xae, 0x3b, 0xe5, 0x1b, 0xdd, 0xcc, 0x3d, 0xa5, 0xd4, 0xd2, 0x5e,
|
||
|
0x38, 0xf3, 0x46, 0x2d, 0x28, 0x91, 0xa4, 0x1f, 0xf1, 0x5a, 0x41, 0x87, 0x79, 0xf7, 0x75, 0xc3,
|
||
|
0x34, 0x53, 0x27, 0x6c, 0x7c, 0x9d, 0x1f, 0x2c, 0xb8, 0xde, 0x94, 0xff, 0x79, 0x9a, 0xbf, 0x58,
|
||
|
0x70, 0x73, 0x5f, 0x09, 0x4a, 0x86, 0x11, 0x0b, 0xcf, 0x51, 0xed, 0x83, 0x2d, 0x73, 0xa5, 0x3f,
|
||
|
0x46, 0xfa, 0xe1, 0xd4, 0xd3, 0x26, 0x23, 0x9e, 0xb1, 0xdf, 0x9e, 0xc1, 0xcb, 0xa7, 0x21, 0x0d,
|
||
|
0x84, 0xde, 0x86, 0x8a, 0x26, 0x93, 0x9e, 0xa0, 0x28, 0x53, 0xfa, 0x83, 0xca, 0xdb, 0x33, 0xb8,
|
||
|
0xac, 0xe1, 0x96, 0x41, 0x37, 0xaf, 0xc2, 0x95, 0x33, 0x32, 0xc2, 0x30, 0x74, 0x7e, 0xb6, 0xa0,
|
||
|
0x7e, 0xf1, 0x69, 0xff, 0x5a, 0xae, 0xef, 0x81, 0x2d, 0x23, 0x16, 0x0e, 0xa8, 0x9f, 0x28, 0x45,
|
||
|
0x05, 0x61, 0x01, 0xd5, 0x2c, 0x17, 0xf0, 0xb2, 0xc1, 0x0f, 0x72, 0x18, 0xbd, 0x03, 0xcb, 0x11,
|
||
|
0x53, 0x54, 0x44, 0x43, 0x5f, 0x50, 0x99, 0x0c, 0x94, 0xac, 0x15, 0xb5, 0x65, 0x35, 0x83, 0xb1,
|
||
|
0x41, 0x9d, 0x5f, 0x8b, 0x70, 0xe5, 0x3c, 0xe3, 0x4f, 0x61, 0x81, 0xb2, 0x80, 0xf7, 0x23, 0x66,
|
||
|
0x38, 0x57, 0x37, 0x3e, 0xba, 0x1c, 0x67, 0x57, 0xdf, 0xaf, 0x97, 0xc5, 0xc0, 0xa7, 0xd1, 0xd0,
|
||
|
0x1d, 0x58, 0x92, 0x64, 0x18, 0x0f, 0xa8, 0x2f, 0x88, 0x32, 0xf4, 0x4b, 0x18, 0x0c, 0x84, 0x89,
|
||
|
0xa2, 0xe8, 0x2e, 0x54, 0x06, 0x84, 0x85, 0x09, 0x09, 0xa9, 0x1f, 0xf0, 0x3e, 0xd5, 0xbc, 0x17,
|
||
|
0x71, 0x39, 0x07, 0x5b, 0xbc, 0x4f, 0xd3, 0x4c, 0x0c, 0xc9, 0xb1, 0x4f, 0x06, 0x8a, 0x0a, 0x46,
|
||
|
0x54, 0xf4, 0x82, 0xca, 0xda, 0xac, 0x0e, 0xb5, 0x3c, 0x24, 0xc7, 0xcd, 0x11, 0x38, 0x35, 0x8d,
|
||
|
0x05, 0x3f, 0x22, 0x2c, 0x52, 0x27, 0xfe, 0x51, 0x94, 0xaa, 0x6a, 0x25, 0x93, 0xb4, 0x53, 0xbc,
|
||
|
0xad, 0x61, 0xf4, 0x14, 0xaa, 0xe6, 0xbb, 0x4c, 0x0d, 0x1c, 0xab, 0xda, 0x9c, 0xbe, 0xaf, 0xfb,
|
||
|
0xd3, 0xcb, 0x4c, 0x8b, 0x2d, 0xe3, 0x81, 0x2b, 0x72, 0x54, 0x74, 0x08, 0x54, 0xc6, 0x32, 0x81,
|
||
|
0x6a, 0x70, 0xcd, 0xeb, 0xb6, 0x76, 0xb7, 0x3a, 0xdd, 0x8f, 0xfd, 0x83, 0xee, 0xfe, 0x9e, 0xd7,
|
||
|
0xea, 0xb4, 0x3b, 0xde, 0x96, 0x3d, 0x83, 0xca, 0xb0, 0xf0, 0xa4, 0xd3, 0xf5, 0x9a, 0x78, 0xfd,
|
||
|
0x81, 0x6d, 0xa1, 0x05, 0x98, 0x6d, 0x3f, 0x69, 0xb6, 0xec, 0x02, 0x5a, 0x84, 0xd2, 0xce, 0xc1,
|
||
|
0x93, 0xe6, 0x33, 0xbb, 0x88, 0xe6, 0xa1, 0xd8, 0xdc, 0xc1, 0xf6, 0x2c, 0x02, 0x98, 0x6b, 0xee,
|
||
|
0x60, 0xff, 0xd9, 0xa6, 0x5d, 0x72, 0xee, 0x41, 0x65, 0x8c, 0x02, 0xaa, 0xc1, 0x7c, 0xfc, 0x5c,
|
||
|
0x10, 0x49, 0x65, 0xcd, 0x5a, 0x2d, 0xae, 0x2d, 0xe2, 0x5c, 0x74, 0x30, 0xd8, 0x93, 0x2d, 0x88,
|
||
|
0xea, 0x30, 0x9f, 0x57, 0xbc, 0x95, 0x55, 0x7c, 0x0e, 0x20, 0x04, 0xc5, 0x44, 0x44, 0xfa, 0x92,
|
||
|
0x16, 0xb7, 0x67, 0x70, 0x2a, 0x6c, 0x56, 0xc1, 0x34, 0x84, 0x2f, 0x79, 0x22, 0x02, 0xea, 0x84,
|
||
|
0x70, 0x7d, 0x62, 0x0e, 0xca, 0x98, 0x33, 0x49, 0x51, 0x17, 0xe6, 0xf3, 0xd2, 0x2b, 0xac, 0x16,
|
||
|
0xd7, 0x96, 0x36, 0x3e, 0x78, 0x8d, 0x34, 0x8e, 0xd0, 0x33, 0x15, 0x8a, 0xf3, 0x20, 0xce, 0x73,
|
||
|
0xf8, 0xdf, 0xe4, 0x28, 0x7b, 0x43, 0x27, 0xfd, 0x66, 0x4d, 0x1e, 0xb5, 0x43, 0x15, 0xe9, 0x13,
|
||
|
0x45, 0xb2, 0x6a, 0x0a, 0x05, 0x95, 0xd2, 0x8f, 0xa9, 0x08, 0xf2, 0xb4, 0x95, 0x74, 0x35, 0x69,
|
||
|
0x7c, 0xcf, 0xc0, 0xe8, 0x11, 0x80, 0x54, 0x44, 0x28, 0x3f, 0xdd, 0x29, 0xd9, 0x78, 0xac, 0xe7,
|
||
|
0xc4, 0xf2, 0x85, 0xe3, 0xf6, 0xf2, 0x85, 0x83, 0x17, 0xb5, 0x75, 0x2a, 0xa3, 0x2d, 0xb0, 0x07,
|
||
|
0x44, 0x2a, 0x3f, 0x89, 0xfb, 0x44, 0x51, 0x13, 0xa0, 0xf8, 0xb7, 0x01, 0xaa, 0xa9, 0xcf, 0x81,
|
||
|
0x76, 0x49, 0x41, 0xe7, 0xc7, 0xe2, 0xf9, 0xa9, 0x34, 0x92, 0xb5, 0x35, 0x28, 0x51, 0x21, 0xb8,
|
||
|
0xc8, 0x86, 0x12, 0xca, 0x23, 0x8b, 0x38, 0x70, 0xf7, 0xf5, 0xaa, 0xc3, 0xc6, 0x00, 0x3d, 0x9d,
|
||
|
0xcc, 0xef, 0xe5, 0xe7, 0xee, 0x44, 0x8a, 0xd1, 0x5b, 0x50, 0x36, 0x3f, 0xfd, 0x88, 0xf5, 0xe9,
|
||
|
0xb1, 0xfe, 0xba, 0x12, 0x5e, 0x32, 0x58, 0x27, 0x85, 0x10, 0x87, 0x65, 0xca, 0xfa, 0x31, 0xd7,
|
||
|
0x03, 0xcb, 0x57, 0x27, 0x31, 0xd5, 0x2d, 0x5e, 0xdd, 0x68, 0x5f, 0xea, 0xf4, 0xb3, 0x2f, 0x76,
|
||
|
0xbd, 0xd3, 0x70, 0xbd, 0x93, 0x98, 0xe2, 0x2a, 0x1d, 0x93, 0x9d, 0x97, 0x16, 0x54, 0xc7, 0x4d,
|
||
|
0xd0, 0x2a, 0xac, 0x78, 0xdd, 0xad, 0xbd, 0xdd, 0x4e, 0xb7, 0xe7, 0x61, 0xdf, 0xfb, 0xc4, 0xeb,
|
||
|
0xf6, 0x26, 0xba, 0xf6, 0x2a, 0x2c, 0xef, 0xf7, 0x9a, 0xb8, 0xe7, 0xef, 0xb6, 0xfd, 0xfd, 0x3d,
|
||
|
0xcf, 0x6b, 0x6d, 0xdb, 0x16, 0xba, 0x02, 0x15, 0xaf, 0xbb, 0x35, 0x02, 0x15, 0x90, 0x0d, 0xe5,
|
||
|
0x0c, 0x6a, 0x1e, 0x6c, 0x75, 0x76, 0xed, 0x22, 0xba, 0x06, 0x76, 0x86, 0x1c, 0xf4, 0x7a, 0x1e,
|
||
|
0x6e, 0x76, 0x5b, 0x9e, 0x3d, 0xeb, 0xfc, 0x74, 0xc1, 0x2a, 0x31, 0x09, 0x44, 0x9f, 0x43, 0x79,
|
||
|
0x6c, 0xe8, 0x59, 0xfa, 0x3e, 0x1e, 0x5d, 0xae, 0xde, 0x47, 0xe6, 0x23, 0x1e, 0x0b, 0x87, 0x6e,
|
||
|
0xc2, 0x42, 0x24, 0xfd, 0xa3, 0x88, 0x91, 0x41, 0xb6, 0x59, 0xe6, 0x23, 0xd9, 0x4e, 0x45, 0xb4,
|
||
|
0x02, 0x69, 0x81, 0x1e, 0x46, 0x83, 0x48, 0x9d, 0xe8, 0xeb, 0x2a, 0xe0, 0x33, 0xc0, 0x39, 0x86,
|
||
|
0x1b, 0x17, 0xb4, 0xd5, 0x1b, 0xa6, 0xec, 0x7c, 0x01, 0x2b, 0xd3, 0xac, 0xd1, 0x6d, 0x00, 0x25,
|
||
|
0x08, 0x93, 0x81, 0x88, 0x62, 0xd3, 0xab, 0x8b, 0x78, 0x04, 0x49, 0xf5, 0x7a, 0xbd, 0xf6, 0x69,
|
||
|
0xbe, 0x4e, 0x0b, 0x78, 0x04, 0xd9, 0xf8, 0xbd, 0x08, 0x73, 0xe6, 0x00, 0xf4, 0x9d, 0x05, 0x95,
|
||
|
0xb1, 0x59, 0x87, 0xd6, 0xa7, 0x7f, 0xc5, 0x2b, 0x1e, 0x5e, 0xf5, 0x8d, 0xcb, 0xb8, 0x98, 0xc2,
|
||
|
0x75, 0xd6, 0x5e, 0xfe, 0xf1, 0xe7, 0xb7, 0x05, 0xc7, 0xb9, 0x75, 0xfa, 0x96, 0x36, 0x6e, 0x8f,
|
||
|
0xd3, 0x21, 0x25, 0x72, 0xf3, 0xc7, 0xd6, 0x7d, 0xf4, 0x8d, 0x05, 0xd5, 0xf1, 0xd1, 0x85, 0xa6,
|
||
|
0x1f, 0xf8, 0xca, 0xd7, 0x61, 0xfd, 0x56, 0xee, 0x33, 0xf2, 0xba, 0x76, 0x77, 0xf3, 0xd7, 0xb5,
|
||
|
0x73, 0x4f, 0xf3, 0xb9, 0xeb, 0xdc, 0x9e, 0xe4, 0x43, 0xce, 0x11, 0xfa, 0xda, 0x02, 0x74, 0xbe,
|
||
|
0x25, 0xd1, 0x83, 0x4b, 0xf7, 0xb0, 0x21, 0xf6, 0xf0, 0x1f, 0xf6, 0xfe, 0x9a, 0xf5, 0x9e, 0xb5,
|
||
|
0x29, 0xe1, 0x4e, 0xc0, 0x87, 0xd3, 0x22, 0x6c, 0x2e, 0x99, 0xab, 0xde, 0x4b, 0xa7, 0xeb, 0x9e,
|
||
|
0xf5, 0x59, 0x33, 0xb3, 0x0d, 0x79, 0xfa, 0xfc, 0x70, 0xb9, 0x08, 0x1b, 0x21, 0x65, 0x7a, 0xf6,
|
||
|
0x36, 0x8c, 0x8a, 0xc4, 0x91, 0x7c, 0xe5, 0xdf, 0x9d, 0x0f, 0x8d, 0x78, 0x38, 0xa7, 0xad, 0xdf,
|
||
|
0xff, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x25, 0x0d, 0x06, 0x48, 0x1b, 0x0d, 0x00, 0x00,
|
||
|
}
|