123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452 |
- // Copyright 2022 Google LLC
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- syntax = "proto3";
- package google.cloud.dialogflow.v2;
- import "google/api/field_behavior.proto";
- import "google/api/resource.proto";
- import "google/protobuf/duration.proto";
- option cc_enable_arenas = true;
- option csharp_namespace = "Google.Cloud.Dialogflow.V2";
- option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow";
- option java_multiple_files = true;
- option java_outer_classname = "AudioConfigProto";
- option java_package = "com.google.cloud.dialogflow.v2";
- option objc_class_prefix = "DF";
- option (google.api.resource_definition) = {
- type: "automl.googleapis.com/Model"
- pattern: "projects/{project}/locations/{location}/models/{model}"
- };
- option (google.api.resource_definition) = {
- type: "speech.googleapis.com/PhraseSet"
- pattern: "projects/{project}/locations/{location}/phraseSets/{phrase_set}"
- };
- // Audio encoding of the audio content sent in the conversational query request.
- // Refer to the
- // [Cloud Speech API
- // documentation](https://cloud.google.com/speech-to-text/docs/basics) for more
- // details.
- enum AudioEncoding {
- // Not specified.
- AUDIO_ENCODING_UNSPECIFIED = 0;
- // Uncompressed 16-bit signed little-endian samples (Linear PCM).
- AUDIO_ENCODING_LINEAR_16 = 1;
- // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
- // Codec) is the recommended encoding because it is lossless (therefore
- // recognition is not compromised) and requires only about half the
- // bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and
- // 24-bit samples, however, not all fields in `STREAMINFO` are supported.
- AUDIO_ENCODING_FLAC = 2;
- // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
- AUDIO_ENCODING_MULAW = 3;
- // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
- AUDIO_ENCODING_AMR = 4;
- // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
- AUDIO_ENCODING_AMR_WB = 5;
- // Opus encoded audio frames in Ogg container
- // ([OggOpus](https://wiki.xiph.org/OggOpus)).
- // `sample_rate_hertz` must be 16000.
- AUDIO_ENCODING_OGG_OPUS = 6;
- // Although the use of lossy encodings is not recommended, if a very low
- // bitrate encoding is required, `OGG_OPUS` is highly preferred over
- // Speex encoding. The [Speex](https://speex.org/) encoding supported by
- // Dialogflow API has a header byte in each block, as in MIME type
- // `audio/x-speex-with-header-byte`.
- // It is a variant of the RTP Speex encoding defined in
- // [RFC 5574](https://tools.ietf.org/html/rfc5574).
- // The stream is a sequence of blocks, one block per RTP packet. Each block
- // starts with a byte containing the length of the block, in bytes, followed
- // by one or more frames of Speex data, padded to an integral number of
- // bytes (octets) as specified in RFC 5574. In other words, each RTP header
- // is replaced with a single byte containing the block length. Only Speex
- // wideband is supported. `sample_rate_hertz` must be 16000.
- AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
- }
- // Hints for the speech recognizer to help with recognition in a specific
- // conversation state.
- message SpeechContext {
- // Optional. A list of strings containing words and phrases that the speech
- // recognizer should recognize with higher likelihood.
- //
- // This list can be used to:
- //
- // * improve accuracy for words and phrases you expect the user to say,
- // e.g. typical commands for your Dialogflow agent
- // * add additional words to the speech recognizer vocabulary
- // * ...
- //
- // See the [Cloud Speech
- // documentation](https://cloud.google.com/speech-to-text/quotas) for usage
- // limits.
- repeated string phrases = 1;
- // Optional. Boost for this context compared to other contexts:
- //
- // * If the boost is positive, Dialogflow will increase the probability that
- // the phrases in this context are recognized over similar sounding phrases.
- // * If the boost is unspecified or non-positive, Dialogflow will not apply
- // any boost.
- //
- // Dialogflow recommends that you use boosts in the range (0, 20] and that you
- // find a value that fits your use case with binary search.
- float boost = 2;
- }
- // Variant of the specified [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
- //
- // See the [Cloud Speech
- // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
- // for which models have different variants. For example, the "phone_call" model
- // has both a standard and an enhanced variant. When you use an enhanced model,
- // you will generally receive higher quality results than for a standard model.
- enum SpeechModelVariant {
- // No model variant specified. In this case Dialogflow defaults to
- // USE_BEST_AVAILABLE.
- SPEECH_MODEL_VARIANT_UNSPECIFIED = 0;
- // Use the best available variant of the [Speech
- // model][InputAudioConfig.model] that the caller is eligible for.
- //
- // Please see the [Dialogflow
- // docs](https://cloud.google.com/dialogflow/docs/data-logging) for
- // how to make your project eligible for enhanced models.
- USE_BEST_AVAILABLE = 1;
- // Use standard model variant even if an enhanced model is available. See the
- // [Cloud Speech
- // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
- // for details about enhanced models.
- USE_STANDARD = 2;
- // Use an enhanced model variant:
- //
- // * If an enhanced variant does not exist for the given
- // [model][google.cloud.dialogflow.v2.InputAudioConfig.model] and request language, Dialogflow falls
- // back to the standard variant.
- //
- // The [Cloud Speech
- // documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
- // describes which models have enhanced variants.
- //
- // * If the API caller isn't eligible for enhanced models, Dialogflow returns
- // an error. Please see the [Dialogflow
- // docs](https://cloud.google.com/dialogflow/docs/data-logging)
- // for how to make your project eligible.
- USE_ENHANCED = 3;
- }
- // Information for a word recognized by the speech recognizer.
- message SpeechWordInfo {
- // The word this info is for.
- string word = 3;
- // Time offset relative to the beginning of the audio that corresponds to the
- // start of the spoken word. This is an experimental feature and the accuracy
- // of the time offset can vary.
- google.protobuf.Duration start_offset = 1;
- // Time offset relative to the beginning of the audio that corresponds to the
- // end of the spoken word. This is an experimental feature and the accuracy of
- // the time offset can vary.
- google.protobuf.Duration end_offset = 2;
- // The Speech confidence between 0.0 and 1.0 for this word. A higher number
- // indicates an estimated greater likelihood that the recognized word is
- // correct. The default of 0.0 is a sentinel value indicating that confidence
- // was not set.
- //
- // This field is not guaranteed to be fully stable over time for the same
- // audio input. Users should also not rely on it to always be provided.
- float confidence = 4;
- }
- // Instructs the speech recognizer how to process the audio content.
- message InputAudioConfig {
- // Required. Audio encoding of the audio content to process.
- AudioEncoding audio_encoding = 1;
- // Required. Sample rate (in Hertz) of the audio content sent in the query.
- // Refer to
- // [Cloud Speech API
- // documentation](https://cloud.google.com/speech-to-text/docs/basics) for
- // more details.
- int32 sample_rate_hertz = 2;
- // Required. The language of the supplied audio. Dialogflow does not do
- // translations. See [Language
- // Support](https://cloud.google.com/dialogflow/docs/reference/language)
- // for a list of the currently supported language codes. Note that queries in
- // the same session do not necessarily need to specify the same language.
- string language_code = 3;
- // If `true`, Dialogflow returns [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
- // [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult] with information about the recognized speech
- // words, e.g. start and end time offsets. If false or unspecified, Speech
- // doesn't return any word-level information.
- bool enable_word_info = 13;
- // A list of strings containing words and phrases that the speech
- // recognizer should recognize with higher likelihood.
- //
- // See [the Cloud Speech
- // documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
- // for more details.
- //
- // This field is deprecated. Please use [speech_contexts]() instead. If you
- // specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
- // treat the [phrase_hints]() as a single additional [SpeechContext]().
- repeated string phrase_hints = 4 [deprecated = true];
- // Context information to assist speech recognition.
- //
- // See [the Cloud Speech
- // documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
- // for more details.
- repeated SpeechContext speech_contexts = 11;
- // Which Speech model to select for the given request. Select the
- // model best suited to your domain to get best results. If a model is not
- // explicitly specified, then we auto-select a model based on the parameters
- // in the InputAudioConfig.
- // If enhanced speech model is enabled for the agent and an enhanced
- // version of the specified model for the language does not exist, then the
- // speech is recognized using the standard version of the specified model.
- // Refer to
- // [Cloud Speech API
- // documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
- // for more details.
- string model = 7;
- // Which variant of the [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
- SpeechModelVariant model_variant = 10;
- // If `false` (default), recognition does not cease until the
- // client closes the stream.
- // If `true`, the recognizer will detect a single spoken utterance in input
- // audio. Recognition ceases when it detects the audio's voice has
- // stopped or paused. In this case, once a detected intent is received, the
- // client should close the stream and start a new request with a new stream as
- // needed.
- // Note: This setting is relevant only for streaming methods.
- // Note: When specified, InputAudioConfig.single_utterance takes precedence
- // over StreamingDetectIntentRequest.single_utterance.
- bool single_utterance = 8;
- // Only used in [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
- // [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent].
- // If `false` and recognition doesn't return any result, trigger
- // `NO_SPEECH_RECOGNIZED` event to Dialogflow agent.
- bool disable_no_speech_recognized_event = 14;
- }
- // Gender of the voice as described in
- // [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
- enum SsmlVoiceGender {
- // An unspecified gender, which means that the client doesn't care which
- // gender the selected voice will have.
- SSML_VOICE_GENDER_UNSPECIFIED = 0;
- // A male voice.
- SSML_VOICE_GENDER_MALE = 1;
- // A female voice.
- SSML_VOICE_GENDER_FEMALE = 2;
- // A gender-neutral voice.
- SSML_VOICE_GENDER_NEUTRAL = 3;
- }
- // Description of which voice to use for speech synthesis.
- message VoiceSelectionParams {
- // Optional. The name of the voice. If not set, the service will choose a
- // voice based on the other parameters such as language_code and
- // [ssml_gender][google.cloud.dialogflow.v2.VoiceSelectionParams.ssml_gender].
- string name = 1;
- // Optional. The preferred gender of the voice. If not set, the service will
- // choose a voice based on the other parameters such as language_code and
- // [name][google.cloud.dialogflow.v2.VoiceSelectionParams.name]. Note that this is only a preference, not requirement. If a
- // voice of the appropriate gender is not available, the synthesizer should
- // substitute a voice with a different gender rather than failing the request.
- SsmlVoiceGender ssml_gender = 2;
- }
- // Configuration of how speech should be synthesized.
- message SynthesizeSpeechConfig {
- // Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
- // native speed supported by the specific voice. 2.0 is twice as fast, and
- // 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
- // other values < 0.25 or > 4.0 will return an error.
- double speaking_rate = 1;
- // Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
- // semitones from the original pitch. -20 means decrease 20 semitones from the
- // original pitch.
- double pitch = 2;
- // Optional. Volume gain (in dB) of the normal native volume supported by the
- // specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
- // 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
- // will play at approximately half the amplitude of the normal native signal
- // amplitude. A value of +6.0 (dB) will play at approximately twice the
- // amplitude of the normal native signal amplitude. We strongly recommend not
- // to exceed +10 (dB) as there's usually no effective increase in loudness for
- // any value greater than that.
- double volume_gain_db = 3;
- // Optional. An identifier which selects 'audio effects' profiles that are
- // applied on (post synthesized) text to speech. Effects are applied on top of
- // each other in the order they are given.
- repeated string effects_profile_id = 5;
- // Optional. The desired voice of the synthesized audio.
- VoiceSelectionParams voice = 4;
- }
- // Audio encoding of the output audio format in Text-To-Speech.
- enum OutputAudioEncoding {
- // Not specified.
- OUTPUT_AUDIO_ENCODING_UNSPECIFIED = 0;
- // Uncompressed 16-bit signed little-endian samples (Linear PCM).
- // Audio content returned as LINEAR16 also contains a WAV header.
- OUTPUT_AUDIO_ENCODING_LINEAR_16 = 1;
- // MP3 audio at 32kbps.
- OUTPUT_AUDIO_ENCODING_MP3 = 2;
- // MP3 audio at 64kbps.
- OUTPUT_AUDIO_ENCODING_MP3_64_KBPS = 4;
- // Opus encoded audio wrapped in an ogg container. The result will be a
- // file which can be played natively on Android, and in browsers (at least
- // Chrome and Firefox). The quality of the encoding is considerably higher
- // than MP3 while using approximately the same bitrate.
- OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3;
- // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
- OUTPUT_AUDIO_ENCODING_MULAW = 5;
- }
- // Instructs the speech synthesizer on how to generate the output audio content.
- // If this audio config is supplied in a request, it overrides all existing
- // text-to-speech settings applied to the agent.
- message OutputAudioConfig {
- // Required. Audio encoding of the synthesized audio content.
- OutputAudioEncoding audio_encoding = 1 [(google.api.field_behavior) = REQUIRED];
- // The synthesis sample rate (in hertz) for this audio. If not
- // provided, then the synthesizer will use the default sample rate based on
- // the audio encoding. If this is different from the voice's natural sample
- // rate, then the synthesizer will honor this request by converting to the
- // desired sample rate (which might result in worse audio quality).
- int32 sample_rate_hertz = 2;
- // Configuration of how speech should be synthesized.
- SynthesizeSpeechConfig synthesize_speech_config = 3;
- }
- // [DTMF](https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling)
- // digit in Telephony Gateway.
- enum TelephonyDtmf {
- // Not specified. This value may be used to indicate an absent digit.
- TELEPHONY_DTMF_UNSPECIFIED = 0;
- // Number: '1'.
- DTMF_ONE = 1;
- // Number: '2'.
- DTMF_TWO = 2;
- // Number: '3'.
- DTMF_THREE = 3;
- // Number: '4'.
- DTMF_FOUR = 4;
- // Number: '5'.
- DTMF_FIVE = 5;
- // Number: '6'.
- DTMF_SIX = 6;
- // Number: '7'.
- DTMF_SEVEN = 7;
- // Number: '8'.
- DTMF_EIGHT = 8;
- // Number: '9'.
- DTMF_NINE = 9;
- // Number: '0'.
- DTMF_ZERO = 10;
- // Letter: 'A'.
- DTMF_A = 11;
- // Letter: 'B'.
- DTMF_B = 12;
- // Letter: 'C'.
- DTMF_C = 13;
- // Letter: 'D'.
- DTMF_D = 14;
- // Asterisk/star: '*'.
- DTMF_STAR = 15;
- // Pound/diamond/hash/square/gate/octothorpe: '#'.
- DTMF_POUND = 16;
- }
- // A wrapper of repeated TelephonyDtmf digits.
- message TelephonyDtmfEvents {
- // A sequence of TelephonyDtmf digits.
- repeated TelephonyDtmf dtmf_events = 1;
- }
- // Configures speech transcription for [ConversationProfile][google.cloud.dialogflow.v2.ConversationProfile].
- message SpeechToTextConfig {
- // The speech model used in speech to text.
- // `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
- // `USE_ENHANCED`. It can be overridden in [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest] and
- // [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest] request.
- // If enhanced model variant is specified and an enhanced
- // version of the specified model for the language does not exist, then it
- // would emit an error.
- SpeechModelVariant speech_model_variant = 1;
- // Which Speech model to select. Select the model best suited to your domain
- // to get best results. If a model is not explicitly specified, then a default
- // model is used.
- // Refer to
- // [Cloud Speech API
- // documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
- // for more details.
- string model = 2;
- }
|