123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878 |
- // Copyright 2022 Google LLC
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- syntax = "proto3";
- package google.cloud.dialogflow.cx.v3;
- import "google/api/annotations.proto";
- import "google/api/client.proto";
- import "google/api/field_behavior.proto";
- import "google/api/resource.proto";
- import "google/cloud/dialogflow/cx/v3/audio_config.proto";
- import "google/cloud/dialogflow/cx/v3/intent.proto";
- import "google/cloud/dialogflow/cx/v3/page.proto";
- import "google/cloud/dialogflow/cx/v3/response_message.proto";
- import "google/cloud/dialogflow/cx/v3/session_entity_type.proto";
- import "google/protobuf/duration.proto";
- import "google/protobuf/struct.proto";
- import "google/rpc/status.proto";
- import "google/type/latlng.proto";
- option cc_enable_arenas = true;
- option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
- option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/cx/v3;cx";
- option java_multiple_files = true;
- option java_outer_classname = "SessionProto";
- option java_package = "com.google.cloud.dialogflow.cx.v3";
- option objc_class_prefix = "DF";
- option ruby_package = "Google::Cloud::Dialogflow::CX::V3";
- option (google.api.resource_definition) = {
- type: "dialogflow.googleapis.com/Session"
- pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
- pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
- };
- // A session represents an interaction with a user. You retrieve user input
- // and pass it to the [DetectIntent][google.cloud.dialogflow.cx.v3.Sessions.DetectIntent] method to determine
- // user intent and respond.
- service Sessions {
- option (google.api.default_host) = "dialogflow.googleapis.com";
- option (google.api.oauth_scopes) =
- "https://www.googleapis.com/auth/cloud-platform,"
- "https://www.googleapis.com/auth/dialogflow";
- // Processes a natural language query and returns structured, actionable data
- // as a result. This method is not idempotent, because it may cause session
- // entity types to be updated, which in turn might affect results of future
- // queries.
- //
- // Note: Always use agent versions for production traffic.
- // See [Versions and
- // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
- rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
- option (google.api.http) = {
- post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
- body: "*"
- additional_bindings {
- post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
- body: "*"
- }
- };
- }
- // Processes a natural language query in audio format in a streaming fashion
- // and returns structured, actionable data as a result. This method is only
- // available via the gRPC API (not REST).
- //
- // Note: Always use agent versions for production traffic.
- // See [Versions and
- // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
- rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
- }
- // Returns preliminary intent match results, doesn't change the session
- // status.
- rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
- option (google.api.http) = {
- post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
- body: "*"
- additional_bindings {
- post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
- body: "*"
- }
- };
- }
- // Fulfills a matched intent returned by [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent].
- // Must be called after [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent], with input from
- // [MatchIntentResponse][google.cloud.dialogflow.cx.v3.MatchIntentResponse]. Otherwise, the behavior is undefined.
- rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
- option (google.api.http) = {
- post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
- body: "*"
- additional_bindings {
- post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
- body: "*"
- }
- };
- }
- }
- // The request to detect user's intent.
- message DetectIntentRequest {
- // Required. The name of the session this query is sent to.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
- // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
- // If `Environment ID` is not specified, we assume default 'draft'
- // environment.
- // It's up to the API caller to choose an appropriate `Session ID`. It can be
- // a random number or some type of session identifiers (preferably hashed).
- // The length of the `Session ID` must not exceed 36 characters.
- //
- // For more information, see the [sessions
- // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
- //
- // Note: Always use agent versions for production traffic.
- // See [Versions and
- // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
- string session = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Session"
- }
- ];
- // The parameters of this query.
- QueryParameters query_params = 2;
- // Required. The input specification.
- QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
- // Instructs the speech synthesizer how to generate the output audio.
- OutputAudioConfig output_audio_config = 4;
- }
- // The message returned from the DetectIntent method.
- message DetectIntentResponse {
- // Represents different DetectIntentResponse types.
- enum ResponseType {
- // Not specified. This should never happen.
- RESPONSE_TYPE_UNSPECIFIED = 0;
- // Partial response. e.g. Aggregated responses in a Fulfillment that enables
- // `return_partial_response` can be returned as partial response.
- // WARNING: partial response is not eligible for barge-in.
- PARTIAL = 1;
- // Final response.
- FINAL = 2;
- }
- // Output only. The unique identifier of the response. It can be used to
- // locate a response in the training example set or for reporting issues.
- string response_id = 1;
- // The result of the conversational query.
- QueryResult query_result = 2;
- // The audio data bytes encoded as specified in the request.
- // Note: The output audio is generated based on the values of default platform
- // text responses found in the
- // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages] field. If
- // multiple default text responses exist, they will be concatenated when
- // generating audio. If no default platform text responses exist, the
- // generated audio content will be empty.
- //
- // In some scenarios, multiple output audio fields may be present in the
- // response structure. In these cases, only the top-most-level audio output
- // has content.
- bytes output_audio = 4;
- // The config used by the speech synthesizer to generate the output audio.
- OutputAudioConfig output_audio_config = 5;
- // Response type.
- ResponseType response_type = 6;
- // Indicates whether the partial response can be cancelled when a later
- // response arrives. e.g. if the agent specified some music as partial
- // response, it can be cancelled.
- bool allow_cancellation = 7;
- }
- // The top-level message sent by the client to the
- // [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent] method.
- //
- // Multiple request messages should be sent in order:
- //
- // 1. The first message must contain
- // [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
- // [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] plus optionally
- // [query_params][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_params]. If the client
- // wants to receive an audio response, it should also contain
- // [output_audio_config][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.output_audio_config].
- //
- // 2. If [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] was set to
- // [query_input.audio.config][google.cloud.dialogflow.cx.v3.AudioInput.config], all subsequent messages
- // must contain [query_input.audio.audio][google.cloud.dialogflow.cx.v3.AudioInput.audio] to continue with
- // Speech recognition.
- // If you decide to rather detect an intent from text
- // input after you already started Speech recognition, please send a message
- // with [query_input.text][google.cloud.dialogflow.cx.v3.QueryInput.text].
- //
- // However, note that:
- //
- // * Dialogflow will bill you for the audio duration so far.
- // * Dialogflow discards all Speech recognition results in favor of the
- // input text.
- // * Dialogflow will use the language code from the first message.
- //
- // After you sent all input, you must half-close or abort the request stream.
- message StreamingDetectIntentRequest {
- // The name of the session this query is sent to.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
- // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
- // If `Environment ID` is not specified, we assume default 'draft'
- // environment.
- // It's up to the API caller to choose an appropriate `Session ID`. It can be
- // a random number or some type of session identifiers (preferably hashed).
- // The length of the `Session ID` must not exceed 36 characters.
- // Note: session must be set in the first request.
- //
- // For more information, see the [sessions
- // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
- //
- // Note: Always use agent versions for production traffic.
- // See [Versions and
- // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
- string session = 1 [(google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Session"
- }];
- // The parameters of this query.
- QueryParameters query_params = 2;
- // Required. The input specification.
- QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
- // Instructs the speech synthesizer how to generate the output audio.
- OutputAudioConfig output_audio_config = 4;
- // Enable partial detect intent response. If this flag is not enabled,
- // response stream still contains only one final `DetectIntentResponse` even
- // if some `Fulfillment`s in the agent have been configured to return partial
- // responses.
- bool enable_partial_response = 5;
- }
- // The top-level message returned from the
- // [StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent] method.
- //
- // Multiple response messages (N) can be returned in order.
- //
- // The first (N-1) responses set either the `recognition_result` or
- // `detect_intent_response` field, depending on the request:
- //
- // * If the `StreamingDetectIntentRequest.query_input.audio` field was
- // set, and the `StreamingDetectIntentRequest.enable_partial_response`
- // field was false, the `recognition_result` field is populated for each
- // of the (N-1) responses.
- // See the [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult] message for details
- // about the result message sequence.
- //
- // * If the `StreamingDetectIntentRequest.enable_partial_response` field was
- // true, the `detect_intent_response` field is populated for each
- // of the (N-1) responses, where 1 <= N <= 4.
- // These responses set the [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type] field
- // to `PARTIAL`.
- //
- // For the final Nth response message, the `detect_intent_response` is fully
- // populated, and [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type] is set to `FINAL`.
- message StreamingDetectIntentResponse {
- // The output response.
- oneof response {
- // The result of speech recognition.
- StreamingRecognitionResult recognition_result = 1;
- // The response from detect intent.
- DetectIntentResponse detect_intent_response = 2;
- }
- }
- // Contains a speech recognition result corresponding to a portion of the audio
- // that is currently being processed or an indication that this is the end
- // of the single requested utterance.
- //
- // While end-user audio is being processed, Dialogflow sends a series of
- // results. Each result may contain a `transcript` value. A transcript
- // represents a portion of the utterance. While the recognizer is processing
- // audio, transcript values may be interim values or finalized values.
- // Once a transcript is finalized, the `is_final` value is set to true and
- // processing continues for the next transcript.
- //
- // If `StreamingDetectIntentRequest.query_input.audio.config.single_utterance`
- // was true, and the recognizer has completed processing audio,
- // the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the
- // following (last) result contains the last finalized transcript.
- //
- // The complete end-user utterance is determined by concatenating the
- // finalized transcript values received for the series of results.
- //
- // In the following example, single utterance is enabled. In the case where
- // single utterance is not enabled, result 7 would not occur.
- //
- // ```
- // Num | transcript | message_type | is_final
- // --- | ----------------------- | ----------------------- | --------
- // 1 | "tube" | TRANSCRIPT | false
- // 2 | "to be a" | TRANSCRIPT | false
- // 3 | "to be" | TRANSCRIPT | false
- // 4 | "to be or not to be" | TRANSCRIPT | true
- // 5 | "that's" | TRANSCRIPT | false
- // 6 | "that is | TRANSCRIPT | false
- // 7 | unset | END_OF_SINGLE_UTTERANCE | unset
- // 8 | " that is the question" | TRANSCRIPT | true
- // ```
- //
- // Concatenating the finalized transcripts with `is_final` set to true,
- // the complete utterance becomes "to be or not to be that is the question".
- message StreamingRecognitionResult {
- // Type of the response message.
- enum MessageType {
- // Not specified. Should never be used.
- MESSAGE_TYPE_UNSPECIFIED = 0;
- // Message contains a (possibly partial) transcript.
- TRANSCRIPT = 1;
- // Event indicates that the server has detected the end of the user's speech
- // utterance and expects no additional speech. Therefore, the server will
- // not process additional audio (although it may subsequently return
- // additional results). The client should stop sending additional audio
- // data, half-close the gRPC connection, and wait for any additional results
- // until the server closes the gRPC connection. This message is only sent if
- // [`single_utterance`][google.cloud.dialogflow.cx.v3.InputAudioConfig.single_utterance] was set to
- // `true`, and is not used otherwise.
- END_OF_SINGLE_UTTERANCE = 2;
- }
- // Type of the result message.
- MessageType message_type = 1;
- // Transcript text representing the words that the user spoke.
- // Populated if and only if `message_type` = `TRANSCRIPT`.
- string transcript = 2;
- // If `false`, the `StreamingRecognitionResult` represents an
- // interim result that may change. If `true`, the recognizer will not return
- // any further hypotheses about this piece of the audio. May only be populated
- // for `message_type` = `TRANSCRIPT`.
- bool is_final = 3;
- // The Speech confidence between 0.0 and 1.0 for the current portion of audio.
- // A higher number indicates an estimated greater likelihood that the
- // recognized words are correct. The default of 0.0 is a sentinel value
- // indicating that confidence was not set.
- //
- // This field is typically only provided if `is_final` is true and you should
- // not rely on it being accurate or even set.
- float confidence = 4;
- // An estimate of the likelihood that the speech recognizer will
- // not change its guess about this interim recognition result:
- // * If the value is unspecified or 0.0, Dialogflow didn't compute the
- // stability. In particular, Dialogflow will only provide stability for
- // `TRANSCRIPT` results with `is_final = false`.
- // * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
- // unstable and 1.0 means completely stable.
- float stability = 6;
- // Word-specific information for the words recognized by Speech in
- // [transcript][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
- // [InputAudioConfig.enable_word_info] is set.
- repeated SpeechWordInfo speech_word_info = 7;
- // Time offset of the end of this Speech recognition result relative to the
- // beginning of the audio. Only populated for `message_type` =
- // `TRANSCRIPT`.
- google.protobuf.Duration speech_end_offset = 8;
- // Detected language code for the transcript.
- string language_code = 10;
- }
- // Represents the parameters of a conversational query.
- message QueryParameters {
- // The time zone of this conversational query from the [time zone
- // database](https://www.iana.org/time-zones), e.g., America/New_York,
- // Europe/Paris. If not provided, the time zone specified in the agent is
- // used.
- string time_zone = 1;
- // The geo location of this conversational query.
- google.type.LatLng geo_location = 2;
- // Additional session entity types to replace or extend developer entity types
- // with. The entity synonyms apply to all languages and persist for the
- // session of this query.
- repeated SessionEntityType session_entity_types = 3;
- // This field can be used to pass custom data into the webhook associated with
- // the agent. Arbitrary JSON objects are supported.
- // Some integrations that query a Dialogflow agent may provide additional
- // information in the payload.
- // In particular, for the Dialogflow Phone Gateway integration, this field has
- // the form:
- // ```
- // {
- // "telephony": {
- // "caller_id": "+18558363987"
- // }
- // }
- // ```
- google.protobuf.Struct payload = 4;
- // Additional parameters to be put into [session
- // parameters][SessionInfo.parameters]. To remove a
- // parameter from the session, clients should explicitly set the parameter
- // value to null.
- //
- // You can reference the session parameters in the agent with the following
- // format: $session.params.parameter-id.
- //
- // Depending on your protocol or client library language, this is a
- // map, associative array, symbol table, dictionary, or JSON object
- // composed of a collection of (MapKey, MapValue) pairs:
- //
- // * MapKey type: string
- // * MapKey value: parameter name
- // * MapValue type: If parameter's entity type is a composite entity then use
- // map, otherwise, depending on the parameter value type, it could be one of
- // string, number, boolean, null, list or map.
- // * MapValue value: If parameter's entity type is a composite entity then use
- // map from composite entity property names to property values, otherwise,
- // use parameter value.
- google.protobuf.Struct parameters = 5;
- // The unique identifier of the [page][google.cloud.dialogflow.cx.v3.Page] to override the [current
- // page][QueryResult.current_page] in the session.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/flows/<Flow ID>/pages/<Page ID>`.
- //
- // If `current_page` is specified, the previous state of the session will be
- // ignored by Dialogflow, including the [previous
- // page][QueryResult.current_page] and the [previous session
- // parameters][QueryResult.parameters].
- // In most cases, [current_page][google.cloud.dialogflow.cx.v3.QueryParameters.current_page] and
- // [parameters][google.cloud.dialogflow.cx.v3.QueryParameters.parameters] should be configured together to
- // direct a session to a specific state.
- string current_page = 6 [(google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Page"
- }];
- // Whether to disable webhook calls for this request.
- bool disable_webhook = 7;
- // Configures whether sentiment analysis should be performed. If not
- // provided, sentiment analysis is not performed.
- bool analyze_query_text_sentiment = 8;
- // This field can be used to pass HTTP headers for a webhook
- // call. These headers will be sent to webhook along with the headers that
- // have been configured through Dialogflow web console. The headers defined
- // within this field will overwrite the headers configured through Dialogflow
- // console if there is a conflict. Header names are case-insensitive.
- // Google's specified headers are not allowed. Including: "Host",
- // "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
- // "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
- map<string, string> webhook_headers = 10;
- // A list of flow versions to override for the request.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/flows/<Flow ID>/versions/<Version ID>`.
- //
- // If version 1 of flow X is included in this list, the traffic of
- // flow X will go through version 1 regardless of the version configuration in
- // the environment. Each flow can have at most one version specified in this
- // list.
- repeated string flow_versions = 14 [(google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Version"
- }];
- }
- // Represents the query input. It can contain one of:
- //
- // 1. A conversational query in the form of text.
- //
- // 2. An intent query that specifies which intent to trigger.
- //
- // 3. Natural language speech audio to be processed.
- //
- // 4. An event to be triggered.
- //
- message QueryInput {
- // Required. The input specification.
- oneof input {
- // The natural language text to be processed.
- TextInput text = 2;
- // The intent to be triggered.
- IntentInput intent = 3;
- // The natural language speech audio to be processed.
- AudioInput audio = 5;
- // The event to be triggered.
- EventInput event = 6;
- // The DTMF event to be handled.
- DtmfInput dtmf = 7;
- }
- // Required. The language of the input. See [Language
- // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
- // for a list of the currently supported language codes. Note that queries in
- // the same session do not necessarily need to specify the same language.
- string language_code = 4 [(google.api.field_behavior) = REQUIRED];
- }
- // Represents the result of a conversational query.
- message QueryResult {
- // The original conversational query.
- oneof query {
- // If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was provided as input, this field
- // will contain a copy of the text.
- string text = 1;
- // If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as input, this field will
- // contain a copy of the intent identifier.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/intents/<Intent ID>`.
- string trigger_intent = 11 [(google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Intent"
- }];
- // If [natural language speech audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
- // this field will contain the transcript for the audio.
- string transcript = 12;
- // If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as input, this field will contain
- // the name of the event.
- string trigger_event = 14;
- // If a [DTMF][DTMFInput] was provided as input, this field will contain
- // a copy of the [DTMFInput][].
- DtmfInput dtmf = 23;
- }
- // The language that was triggered during intent detection.
- // See [Language
- // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
- // for a list of the currently supported language codes.
- string language_code = 2;
- // The collected [session parameters][google.cloud.dialogflow.cx.v3.SessionInfo.parameters].
- //
- // Depending on your protocol or client library language, this is a
- // map, associative array, symbol table, dictionary, or JSON object
- // composed of a collection of (MapKey, MapValue) pairs:
- //
- // * MapKey type: string
- // * MapKey value: parameter name
- // * MapValue type: If parameter's entity type is a composite entity then use
- // map, otherwise, depending on the parameter value type, it could be one of
- // string, number, boolean, null, list or map.
- // * MapValue value: If parameter's entity type is a composite entity then use
- // map from composite entity property names to property values, otherwise,
- // use parameter value.
- google.protobuf.Struct parameters = 3;
- // The list of rich messages returned to the client. Responses vary from
- // simple text messages to more sophisticated, structured payloads used
- // to drive complex logic.
- repeated ResponseMessage response_messages = 4;
- // The list of webhook call status in the order of call sequence.
- repeated google.rpc.Status webhook_statuses = 13;
- // The list of webhook payload in [WebhookResponse.payload][google.cloud.dialogflow.cx.v3.WebhookResponse.payload], in
- // the order of call sequence. If some webhook call fails or doesn't return
- // any payload, an empty `Struct` would be used instead.
- repeated google.protobuf.Struct webhook_payloads = 6;
- // The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all fields are filled in this message,
- // including but not limited to `name` and `display_name`.
- Page current_page = 7;
- // The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the conversational query. Some, not all fields
- // are filled in this message, including but not limited to: `name` and
- // `display_name`.
- // This field is deprecated, please use [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match] instead.
- Intent intent = 8 [deprecated = true];
- // The intent detection confidence. Values range from 0.0 (completely
- // uncertain) to 1.0 (completely certain).
- // This value is for informational purpose only and is only used to
- // help match the best intent within the classification threshold.
- // This value may change for the same end-user expression at any time due to a
- // model retraining or change in implementation.
- // This field is deprecated, please use [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match] instead.
- float intent_detection_confidence = 9 [deprecated = true];
- // Intent match result, could be an intent or an event.
- Match match = 15;
- // The free-form diagnostic info. For example, this field could contain
- // webhook call latency. The fields of this data can change without notice,
- // so you should not write code that depends on its structure.
- //
- // One of the fields is called "Alternative Matched Intents", which may
- // aid with debugging. The following describes these intent results:
- //
- // - The list is empty if no intent was matched to end-user input.
- // - Only intents that are referenced in the currently active flow are
- // included.
- // - The matched intent is included.
- // - Other intents that could have matched end-user input, but did not match
- // because they are referenced by intent routes that are out of
- // [scope](https://cloud.google.com/dialogflow/cx/docs/concept/handler#scope),
- // are included.
- // - Other intents referenced by intent routes in scope that matched end-user
- // input, but had a lower confidence score.
- google.protobuf.Struct diagnostic_info = 10;
- // The sentiment analyss result, which depends on
- // [`analyze_query_text_sentiment`]
- // [google.cloud.dialogflow.cx.v3.QueryParameters.analyze_query_text_sentiment], specified in the request.
- SentimentAnalysisResult sentiment_analysis_result = 17;
- }
- // Represents the natural language text to be processed.
- message TextInput {
- // Required. The UTF-8 encoded natural language text to be processed. Text length must
- // not exceed 256 characters.
- string text = 1 [(google.api.field_behavior) = REQUIRED];
- }
- // Represents the intent to trigger programmatically rather than as a result of
- // natural language processing.
- message IntentInput {
- // Required. The unique identifier of the intent.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/intents/<Intent ID>`.
- string intent = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Intent"
- }
- ];
- }
- // Represents the natural speech audio to be processed.
- message AudioInput {
- // Required. Instructs the speech recognizer how to process the speech audio.
- InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];
- // The natural language speech audio to be processed.
- // A single request can contain up to 1 minute of speech audio data.
- // The [transcribed text][google.cloud.dialogflow.cx.v3.QueryResult.transcript] cannot contain more than 256
- // bytes.
- //
- // For non-streaming audio detect intent, both `config` and `audio` must be
- // provided.
- // For streaming audio detect intent, `config` must be provided in
- // the first request and `audio` must be provided in all following requests.
- bytes audio = 2;
- }
- // Represents the event to trigger.
- message EventInput {
- // Name of the event.
- string event = 1;
- }
- // Represents the input for dtmf event.
- message DtmfInput {
- // The dtmf digits.
- string digits = 1;
- // The finish digit (if any).
- string finish_digit = 2;
- }
- // Represents one match result of [MatchIntent][].
- message Match {
- // Type of a Match.
- enum MatchType {
- // Not specified. Should never be used.
- MATCH_TYPE_UNSPECIFIED = 0;
- // The query was matched to an intent.
- INTENT = 1;
- // The query directly triggered an intent.
- DIRECT_INTENT = 2;
- // The query was used for parameter filling.
- PARAMETER_FILLING = 3;
- // No match was found for the query.
- NO_MATCH = 4;
- // Indicates an empty query.
- NO_INPUT = 5;
- // The query directly triggered an event.
- EVENT = 6;
- }
- // The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the query. Some, not all fields are filled in
- // this message, including but not limited to: `name` and `display_name`. Only
- // filled for [`INTENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
- Intent intent = 1;
- // The event that matched the query. Filled for
- // [`EVENT`][google.cloud.dialogflow.cx.v3.Match.MatchType], [`NO_MATCH`][google.cloud.dialogflow.cx.v3.Match.MatchType] and
- // [`NO_INPUT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match types.
- string event = 6;
- // The collection of parameters extracted from the query.
- //
- // Depending on your protocol or client library language, this is a
- // map, associative array, symbol table, dictionary, or JSON object
- // composed of a collection of (MapKey, MapValue) pairs:
- //
- // * MapKey type: string
- // * MapKey value: parameter name
- // * MapValue type: If parameter's entity type is a composite entity then use
- // map, otherwise, depending on the parameter value type, it could be one of
- // string, number, boolean, null, list or map.
- // * MapValue value: If parameter's entity type is a composite entity then use
- // map from composite entity property names to property values, otherwise,
- // use parameter value.
- google.protobuf.Struct parameters = 2;
- // Final text input which was matched during MatchIntent. This value can be
- // different from original input sent in request because of spelling
- // correction or other processing.
- string resolved_input = 3;
- // Type of this [Match][google.cloud.dialogflow.cx.v3.Match].
- MatchType match_type = 4;
- // The confidence of this match. Values range from 0.0 (completely uncertain)
- // to 1.0 (completely certain).
- // This value is for informational purpose only and is only used to help match
- // the best intent within the classification threshold. This value may change
- // for the same end-user expression at any time due to a model retraining or
- // change in implementation.
- float confidence = 5;
- }
- // Request of [MatchIntent][].
- message MatchIntentRequest {
- // Required. The name of the session this query is sent to.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/sessions/<Session ID>` or `projects/<Project ID>/locations/<Location
- // ID>/agents/<Agent ID>/environments/<Environment ID>/sessions/<Session ID>`.
- // If `Environment ID` is not specified, we assume default 'draft'
- // environment.
- // It's up to the API caller to choose an appropriate `Session ID`. It can be
- // a random number or some type of session identifiers (preferably hashed).
- // The length of the `Session ID` must not exceed 36 characters.
- //
- // For more information, see the [sessions
- // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
- string session = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Session"
- }
- ];
- // The parameters of this query.
- QueryParameters query_params = 2;
- // Required. The input specification.
- QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
- }
- // Response of [MatchIntent][].
- message MatchIntentResponse {
- // The original conversational query.
- oneof query {
- // If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was provided as input, this field
- // will contain a copy of the text.
- string text = 1;
- // If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as input, this field will
- // contain a copy of the intent identifier.
- // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
- // ID>/intents/<Intent ID>`.
- string trigger_intent = 2 [(google.api.resource_reference) = {
- type: "dialogflow.googleapis.com/Intent"
- }];
- // If [natural language speech audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
- // this field will contain the transcript for the audio.
- string transcript = 3;
- // If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as input, this field will
- // contain a copy of the event name.
- string trigger_event = 6;
- }
- // Match results, if more than one, ordered descendingly by the confidence
- // we have that the particular intent matches the query.
- repeated Match matches = 4;
- // The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all fields are filled in this message,
- // including but not limited to `name` and `display_name`.
- Page current_page = 5;
- }
- // Request of [FulfillIntent][]
- message FulfillIntentRequest {
- // Must be same as the corresponding MatchIntent request, otherwise the
- // behavior is undefined.
- MatchIntentRequest match_intent_request = 1;
- // The matched intent/event to fulfill.
- Match match = 2;
- // Instructs the speech synthesizer how to generate output audio.
- OutputAudioConfig output_audio_config = 3;
- }
- // Response of [FulfillIntent][]
- message FulfillIntentResponse {
- // Output only. The unique identifier of the response. It can be used to
- // locate a response in the training example set or for reporting issues.
- string response_id = 1;
- // The result of the conversational query.
- QueryResult query_result = 2;
- // The audio data bytes encoded as specified in the request.
- // Note: The output audio is generated based on the values of default platform
- // text responses found in the
- // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages] field. If
- // multiple default text responses exist, they will be concatenated when
- // generating audio. If no default platform text responses exist, the
- // generated audio content will be empty.
- //
- // In some scenarios, multiple output audio fields may be present in the
- // response structure. In these cases, only the top-most-level audio output
- // has content.
- bytes output_audio = 3;
- // The config used by the speech synthesizer to generate the output audio.
- OutputAudioConfig output_audio_config = 4;
- }
- // The result of sentiment analysis. Sentiment analysis inspects user input
- // and identifies the prevailing subjective opinion, especially to determine a
- // user's attitude as positive, negative, or neutral.
- message SentimentAnalysisResult {
- // Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
- // sentiment).
- float score = 1;
- // A non-negative number in the [0, +inf) range, which represents the absolute
- // magnitude of sentiment, regardless of score (positive or negative).
- float magnitude = 2;
- }
|