participant.proto 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. // Copyright 2022 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.dialogflow.v2;
  16. import "google/api/annotations.proto";
  17. import "google/api/client.proto";
  18. import "google/api/field_behavior.proto";
  19. import "google/api/resource.proto";
  20. import "google/cloud/dialogflow/v2/audio_config.proto";
  21. import "google/cloud/dialogflow/v2/session.proto";
  22. import "google/protobuf/field_mask.proto";
  23. import "google/protobuf/struct.proto";
  24. import "google/protobuf/timestamp.proto";
  25. import "google/rpc/status.proto";
  26. option cc_enable_arenas = true;
  27. option csharp_namespace = "Google.Cloud.Dialogflow.V2";
  28. option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow";
  29. option java_multiple_files = true;
  30. option java_outer_classname = "ParticipantProto";
  31. option java_package = "com.google.cloud.dialogflow.v2";
  32. option objc_class_prefix = "DF";
  33. // Service for managing [Participants][google.cloud.dialogflow.v2.Participant].
  34. service Participants {
  35. option (google.api.default_host) = "dialogflow.googleapis.com";
  36. option (google.api.oauth_scopes) =
  37. "https://www.googleapis.com/auth/cloud-platform,"
  38. "https://www.googleapis.com/auth/dialogflow";
  39. // Creates a new participant in a conversation.
  40. rpc CreateParticipant(CreateParticipantRequest) returns (Participant) {
  41. option (google.api.http) = {
  42. post: "/v2/{parent=projects/*/conversations/*}/participants"
  43. body: "participant"
  44. additional_bindings {
  45. post: "/v2/{parent=projects/*/locations/*/conversations/*}/participants"
  46. body: "participant"
  47. }
  48. };
  49. option (google.api.method_signature) = "parent,participant";
  50. }
  51. // Retrieves a conversation participant.
  52. rpc GetParticipant(GetParticipantRequest) returns (Participant) {
  53. option (google.api.http) = {
  54. get: "/v2/{name=projects/*/conversations/*/participants/*}"
  55. additional_bindings {
  56. get: "/v2/{name=projects/*/locations/*/conversations/*/participants/*}"
  57. }
  58. };
  59. option (google.api.method_signature) = "name";
  60. }
  61. // Returns the list of all participants in the specified conversation.
  62. rpc ListParticipants(ListParticipantsRequest) returns (ListParticipantsResponse) {
  63. option (google.api.http) = {
  64. get: "/v2/{parent=projects/*/conversations/*}/participants"
  65. additional_bindings {
  66. get: "/v2/{parent=projects/*/locations/*/conversations/*}/participants"
  67. }
  68. };
  69. option (google.api.method_signature) = "parent";
  70. }
  71. // Updates the specified participant.
  72. rpc UpdateParticipant(UpdateParticipantRequest) returns (Participant) {
  73. option (google.api.http) = {
  74. patch: "/v2/{participant.name=projects/*/conversations/*/participants/*}"
  75. body: "participant"
  76. additional_bindings {
  77. patch: "/v2/{participant.name=projects/*/locations/*/conversations/*/participants/*}"
  78. body: "participant"
  79. }
  80. };
  81. option (google.api.method_signature) = "participant,update_mask";
  82. }
  83. // Adds a text (chat, for example), or audio (phone recording, for example)
  84. // message from a participant into the conversation.
  85. //
  86. // Note: Always use agent versions for production traffic
  87. // sent to virtual agents. See [Versions and
  88. // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
  89. rpc AnalyzeContent(AnalyzeContentRequest) returns (AnalyzeContentResponse) {
  90. option (google.api.http) = {
  91. post: "/v2/{participant=projects/*/conversations/*/participants/*}:analyzeContent"
  92. body: "*"
  93. additional_bindings {
  94. post: "/v2/{participant=projects/*/locations/*/conversations/*/participants/*}:analyzeContent"
  95. body: "*"
  96. }
  97. };
  98. option (google.api.method_signature) = "participant,text_input";
  99. option (google.api.method_signature) = "participant,event_input";
  100. }
  101. // Adds a text (chat, for example), or audio (phone recording, for example)
  102. // message from a participant into the conversation.
  103. // Note: This method is only available through the gRPC API (not REST).
  104. //
  105. // The top-level message sent to the client by the server is
  106. // `StreamingAnalyzeContentResponse`. Multiple response messages can be
  107. // returned in order. The first one or more messages contain the
  108. // `recognition_result` field. Each result represents a more complete
  109. // transcript of what the user said. The next message contains the
  110. // `reply_text` field and potentially the `reply_audio` field. The message can
  111. // also contain the `automated_agent_reply` field.
  112. //
  113. // Note: Always use agent versions for production traffic
  114. // sent to virtual agents. See [Versions and
  115. // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
  116. rpc StreamingAnalyzeContent(stream StreamingAnalyzeContentRequest) returns (stream StreamingAnalyzeContentResponse) {
  117. }
  118. // Gets suggested articles for a participant based on specific historical
  119. // messages.
  120. rpc SuggestArticles(SuggestArticlesRequest) returns (SuggestArticlesResponse) {
  121. option (google.api.http) = {
  122. post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestArticles"
  123. body: "*"
  124. additional_bindings {
  125. post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestArticles"
  126. body: "*"
  127. }
  128. };
  129. option (google.api.method_signature) = "parent";
  130. }
  131. // Gets suggested faq answers for a participant based on specific historical
  132. // messages.
  133. rpc SuggestFaqAnswers(SuggestFaqAnswersRequest) returns (SuggestFaqAnswersResponse) {
  134. option (google.api.http) = {
  135. post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
  136. body: "*"
  137. additional_bindings {
  138. post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
  139. body: "*"
  140. }
  141. };
  142. option (google.api.method_signature) = "parent";
  143. }
  144. // Gets smart replies for a participant based on specific historical
  145. // messages.
  146. rpc SuggestSmartReplies(SuggestSmartRepliesRequest) returns (SuggestSmartRepliesResponse) {
  147. option (google.api.http) = {
  148. post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
  149. body: "*"
  150. additional_bindings {
  151. post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
  152. body: "*"
  153. }
  154. };
  155. option (google.api.method_signature) = "parent";
  156. }
  157. }
  158. // Represents a conversation participant (human agent, virtual agent, end-user).
  159. message Participant {
  160. option (google.api.resource) = {
  161. type: "dialogflow.googleapis.com/Participant"
  162. pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
  163. pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
  164. };
  165. // Enumeration of the roles a participant can play in a conversation.
  166. enum Role {
  167. // Participant role not set.
  168. ROLE_UNSPECIFIED = 0;
  169. // Participant is a human agent.
  170. HUMAN_AGENT = 1;
  171. // Participant is an automated agent, such as a Dialogflow agent.
  172. AUTOMATED_AGENT = 2;
  173. // Participant is an end user that has called or chatted with
  174. // Dialogflow services.
  175. END_USER = 3;
  176. }
  177. // Optional. The unique identifier of this participant.
  178. // Format: `projects/<Project ID>/locations/<Location
  179. // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
  180. string name = 1 [(google.api.field_behavior) = OPTIONAL];
  181. // Immutable. The role this participant plays in the conversation. This field must be set
  182. // during participant creation and is then immutable.
  183. Role role = 2 [(google.api.field_behavior) = IMMUTABLE];
  184. // Optional. Label applied to streams representing this participant in SIPREC
  185. // XML metadata and SDP. This is used to assign transcriptions from that
  186. // media stream to this participant. This field can be updated.
  187. string sip_recording_media_label = 6 [(google.api.field_behavior) = OPTIONAL];
  188. // Optional. Obfuscated user id that should be associated with the created participant.
  189. //
  190. // You can specify a user id as follows:
  191. //
  192. // 1. If you set this field in
  193. // [CreateParticipantRequest][google.cloud.dialogflow.v2.CreateParticipantRequest.participant] or
  194. // [UpdateParticipantRequest][google.cloud.dialogflow.v2.UpdateParticipantRequest.participant],
  195. // Dialogflow adds the obfuscated user id with the participant.
  196. //
  197. // 2. If you set this field in
  198. // [AnalyzeContent][google.cloud.dialogflow.v2.AnalyzeContentRequest.obfuscated_external_user_id] or
  199. // [StreamingAnalyzeContent][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.obfuscated_external_user_id],
  200. // Dialogflow will update [Participant.obfuscated_external_user_id][google.cloud.dialogflow.v2.Participant.obfuscated_external_user_id].
  201. //
  202. // Dialogflow returns an error if you try to add a user id for a
  203. // non-[END_USER][google.cloud.dialogflow.v2.Participant.Role.END_USER] participant.
  204. //
  205. // Dialogflow uses this user id for billing and measurement purposes. For
  206. // example, Dialogflow determines whether a user in one conversation returned
  207. // in a later conversation.
  208. //
  209. // Note:
  210. //
  211. // * Please never pass raw user ids to Dialogflow. Always obfuscate your user
  212. // id first.
  213. // * Dialogflow only accepts a UTF-8 encoded string, e.g., a hex digest of a
  214. // hash function like SHA-512.
  215. // * The length of the user id must be <= 256 characters.
  216. string obfuscated_external_user_id = 7 [(google.api.field_behavior) = OPTIONAL];
  217. // Optional. Key-value filters on the metadata of documents returned by article
  218. // suggestion. If specified, article suggestion only returns suggested
  219. // documents that match all filters in their [Document.metadata][google.cloud.dialogflow.v2.Document.metadata]. Multiple
  220. // values for a metadata key should be concatenated by comma. For example,
  221. // filters to match all documents that have 'US' or 'CA' in their market
  222. // metadata values and 'agent' in their user metadata values will be
  223. // ```
  224. // documents_metadata_filters {
  225. // key: "market"
  226. // value: "US,CA"
  227. // }
  228. // documents_metadata_filters {
  229. // key: "user"
  230. // value: "agent"
  231. // }
  232. // ```
  233. map<string, string> documents_metadata_filters = 8 [(google.api.field_behavior) = OPTIONAL];
  234. }
  235. // Represents a message posted into a conversation.
  236. message Message {
  237. option (google.api.resource) = {
  238. type: "dialogflow.googleapis.com/Message"
  239. pattern: "projects/{project}/conversations/{conversation}/messages/{message}"
  240. pattern: "projects/{project}/locations/{location}/conversations/{conversation}/messages/{message}"
  241. };
  242. // Optional. The unique identifier of the message.
  243. // Format: `projects/<Project ID>/locations/<Location
  244. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  245. string name = 1 [(google.api.field_behavior) = OPTIONAL];
  246. // Required. The message content.
  247. string content = 2 [(google.api.field_behavior) = REQUIRED];
  248. // Optional. The message language.
  249. // This should be a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
  250. // language tag. Example: "en-US".
  251. string language_code = 3 [(google.api.field_behavior) = OPTIONAL];
  252. // Output only. The participant that sends this message.
  253. string participant = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
  254. // Output only. The role of the participant.
  255. Participant.Role participant_role = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
  256. // Output only. The time when the message was created in Contact Center AI.
  257. google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
  258. // Optional. The time when the message was sent.
  259. google.protobuf.Timestamp send_time = 9 [(google.api.field_behavior) = OPTIONAL];
  260. // Output only. The annotation for the message.
  261. MessageAnnotation message_annotation = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
  262. // Output only. The sentiment analysis result for the message.
  263. SentimentAnalysisResult sentiment_analysis = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
  264. }
  265. // The request message for [Participants.CreateParticipant][google.cloud.dialogflow.v2.Participants.CreateParticipant].
  266. message CreateParticipantRequest {
  267. // Required. Resource identifier of the conversation adding the participant.
  268. // Format: `projects/<Project ID>/locations/<Location
  269. // ID>/conversations/<Conversation ID>`.
  270. string parent = 1 [
  271. (google.api.field_behavior) = REQUIRED,
  272. (google.api.resource_reference) = {
  273. child_type: "dialogflow.googleapis.com/Participant"
  274. }
  275. ];
  276. // Required. The participant to create.
  277. Participant participant = 2 [(google.api.field_behavior) = REQUIRED];
  278. }
  279. // The request message for [Participants.GetParticipant][google.cloud.dialogflow.v2.Participants.GetParticipant].
  280. message GetParticipantRequest {
  281. // Required. The name of the participant. Format:
  282. // `projects/<Project ID>/locations/<Location ID>/conversations/<Conversation
  283. // ID>/participants/<Participant ID>`.
  284. string name = 1 [
  285. (google.api.field_behavior) = REQUIRED,
  286. (google.api.resource_reference) = {
  287. type: "dialogflow.googleapis.com/Participant"
  288. }
  289. ];
  290. }
  291. // The request message for [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
  292. message ListParticipantsRequest {
  293. // Required. The conversation to list all participants from.
  294. // Format: `projects/<Project ID>/locations/<Location
  295. // ID>/conversations/<Conversation ID>`.
  296. string parent = 1 [
  297. (google.api.field_behavior) = REQUIRED,
  298. (google.api.resource_reference) = {
  299. child_type: "dialogflow.googleapis.com/Participant"
  300. }
  301. ];
  302. // Optional. The maximum number of items to return in a single page. By
  303. // default 100 and at most 1000.
  304. int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL];
  305. // Optional. The next_page_token value returned from a previous list request.
  306. string page_token = 3 [(google.api.field_behavior) = OPTIONAL];
  307. }
  308. // The response message for [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
  309. message ListParticipantsResponse {
  310. // The list of participants. There is a maximum number of items
  311. // returned based on the page_size field in the request.
  312. repeated Participant participants = 1;
  313. // Token to retrieve the next page of results or empty if there are no
  314. // more results in the list.
  315. string next_page_token = 2;
  316. }
  317. // The request message for [Participants.UpdateParticipant][google.cloud.dialogflow.v2.Participants.UpdateParticipant].
  318. message UpdateParticipantRequest {
  319. // Required. The participant to update.
  320. Participant participant = 1 [(google.api.field_behavior) = REQUIRED];
  321. // Required. The mask to specify which fields to update.
  322. google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED];
  323. }
  324. // The request message for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
  325. message AnalyzeContentRequest {
  326. // Required. The name of the participant this text comes from.
  327. // Format: `projects/<Project ID>/locations/<Location
  328. // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
  329. string participant = 1 [
  330. (google.api.field_behavior) = REQUIRED,
  331. (google.api.resource_reference) = {
  332. type: "dialogflow.googleapis.com/Participant"
  333. }
  334. ];
  335. // Required. The input content.
  336. oneof input {
  337. // The natural language text to be processed.
  338. TextInput text_input = 6;
  339. // An input event to send to Dialogflow.
  340. EventInput event_input = 8;
  341. }
  342. // Speech synthesis configuration.
  343. // The speech synthesis settings for a virtual agent that may be configured
  344. // for the associated conversation profile are not used when calling
  345. // AnalyzeContent. If this configuration is not supplied, speech synthesis
  346. // is disabled.
  347. OutputAudioConfig reply_audio_config = 5;
  348. // Parameters for a Dialogflow virtual-agent query.
  349. QueryParameters query_params = 9;
  350. // Parameters for a human assist query.
  351. AssistQueryParameters assist_query_params = 14;
  352. // Additional parameters to be put into Dialogflow CX session parameters. To
  353. // remove a parameter from the session, clients should explicitly set the
  354. // parameter value to null.
  355. //
  356. // Note: this field should only be used if you are connecting to a Dialogflow
  357. // CX agent.
  358. google.protobuf.Struct cx_parameters = 18;
  359. // A unique identifier for this request. Restricted to 36 ASCII characters.
  360. // A random UUID is recommended.
  361. // This request is only idempotent if a `request_id` is provided.
  362. string request_id = 11;
  363. }
  364. // The message in the response that indicates the parameters of DTMF.
  365. message DtmfParameters {
  366. // Indicates whether DTMF input can be handled in the next request.
  367. bool accepts_dtmf_input = 1;
  368. }
  369. // The response message for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
  370. message AnalyzeContentResponse {
  371. // The output text content.
  372. // This field is set if the automated agent responded with text to show to
  373. // the user.
  374. string reply_text = 1;
  375. // The audio data bytes encoded as specified in the request.
  376. // This field is set if:
  377. //
  378. // - `reply_audio_config` was specified in the request, or
  379. // - The automated agent responded with audio to play to the user. In such
  380. // case, `reply_audio.config` contains settings used to synthesize the
  381. // speech.
  382. //
  383. // In some scenarios, multiple output audio fields may be present in the
  384. // response structure. In these cases, only the top-most-level audio output
  385. // has content.
  386. OutputAudio reply_audio = 2;
  387. // Only set if a Dialogflow automated agent has responded.
  388. // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
  389. // and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
  390. // are always empty, use [reply_audio][google.cloud.dialogflow.v2.AnalyzeContentResponse.reply_audio] instead.
  391. AutomatedAgentReply automated_agent_reply = 3;
  392. // Message analyzed by CCAI.
  393. Message message = 5;
  394. // The suggestions for most recent human agent. The order is the same as
  395. // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of
  396. // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config].
  397. //
  398. // Note that any failure of Agent Assist features will not lead to the overall
  399. // failure of an AnalyzeContent API call. Instead, the features will
  400. // fail silently with the error field set in the corresponding
  401. // SuggestionResult.
  402. repeated SuggestionResult human_agent_suggestion_results = 6;
  403. // The suggestions for end user. The order is the same as
  404. // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of
  405. // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config].
  406. //
  407. // Same as human_agent_suggestion_results, any failure of Agent Assist
  408. // features will not lead to the overall failure of an AnalyzeContent API
  409. // call. Instead, the features will fail silently with the error field set in
  410. // the corresponding SuggestionResult.
  411. repeated SuggestionResult end_user_suggestion_results = 7;
  412. // Indicates the parameters of DTMF.
  413. DtmfParameters dtmf_parameters = 9;
  414. }
  415. // The top-level message sent by the client to the
  416. // [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent] method.
  417. //
  418. // Multiple request messages should be sent in order:
  419. //
  420. // 1. The first message must contain
  421. // [participant][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.participant],
  422. // [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] and optionally
  423. // [query_params][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.query_params]. If you want
  424. // to receive an audio response, it should also contain
  425. // [reply_audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.reply_audio_config].
  426. // The message must not contain
  427. // [input][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input].
  428. //
  429. // 2. If [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] in the first message
  430. // was set to [audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.audio_config],
  431. // all subsequent messages must contain
  432. // [input_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_audio] to continue
  433. // with Speech recognition.
  434. // However, note that:
  435. //
  436. // * Dialogflow will bill you for the audio so far.
  437. // * Dialogflow discards all Speech recognition results in favor of the
  438. // text input.
  439. //
  440. // 3. If [StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] in the first message was set
  441. // to [StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.text_config], then the second message
  442. // must contain only [input_text][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_text].
  443. // Moreover, you must not send more than two messages.
  444. //
  445. // After you sent all input, you must half-close or abort the request stream.
  446. message StreamingAnalyzeContentRequest {
  447. // Required. The name of the participant this text comes from.
  448. // Format: `projects/<Project ID>/locations/<Location
  449. // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
  450. string participant = 1 [
  451. (google.api.field_behavior) = REQUIRED,
  452. (google.api.resource_reference) = {
  453. type: "dialogflow.googleapis.com/Participant"
  454. }
  455. ];
  456. // The input config.
  457. oneof config {
  458. // Instructs the speech recognizer how to process the speech audio.
  459. InputAudioConfig audio_config = 2;
  460. // The natural language text to be processed.
  461. InputTextConfig text_config = 3;
  462. }
  463. // Speech synthesis configuration.
  464. // The speech synthesis settings for a virtual agent that may be configured
  465. // for the associated conversation profile are not used when calling
  466. // StreamingAnalyzeContent. If this configuration is not supplied, speech
  467. // synthesis is disabled.
  468. OutputAudioConfig reply_audio_config = 4;
  469. // The input.
  470. oneof input {
  471. // The input audio content to be recognized. Must be sent if `audio_config`
  472. // is set in the first message. The complete audio over all streaming
  473. // messages must not exceed 1 minute.
  474. bytes input_audio = 5;
  475. // The UTF-8 encoded natural language text to be processed. Must be sent if
  476. // `text_config` is set in the first message. Text length must not exceed
  477. // 256 bytes for virtual agent interactions. The `input_text` field can be
  478. // only sent once.
  479. string input_text = 6;
  480. // The DTMF digits used to invoke intent and fill in parameter value.
  481. //
  482. // This input is ignored if the previous response indicated that DTMF input
  483. // is not accepted.
  484. TelephonyDtmfEvents input_dtmf = 9;
  485. }
  486. // Parameters for a Dialogflow virtual-agent query.
  487. QueryParameters query_params = 7;
  488. // Parameters for a human assist query.
  489. AssistQueryParameters assist_query_params = 8;
  490. // Additional parameters to be put into Dialogflow CX session parameters. To
  491. // remove a parameter from the session, clients should explicitly set the
  492. // parameter value to null.
  493. //
  494. // Note: this field should only be used if you are connecting to a Dialogflow
  495. // CX agent.
  496. google.protobuf.Struct cx_parameters = 13;
  497. // Enable partial virtual agent responses. If this flag is not enabled,
  498. // response stream still contains only one final response even if some
  499. // `Fulfillment`s in Dialogflow virtual agent have been configured to return
  500. // partial responses.
  501. bool enable_partial_automated_agent_reply = 12;
  502. }
  503. // The top-level message returned from the `StreamingAnalyzeContent` method.
  504. //
  505. // Multiple response messages can be returned in order:
  506. //
  507. // 1. If the input was set to streaming audio, the first one or more messages
  508. // contain `recognition_result`. Each `recognition_result` represents a more
  509. // complete transcript of what the user said. The last `recognition_result`
  510. // has `is_final` set to `true`.
  511. //
  512. // 2. In virtual agent stage: if `enable_partial_automated_agent_reply` is
  513. // true, the following N (currently 1 <= N <= 4) messages
  514. // contain `automated_agent_reply` and optionally `reply_audio`
  515. // returned by the virtual agent. The first (N-1)
  516. // `automated_agent_reply`s will have `automated_agent_reply_type` set to
  517. // `PARTIAL`. The last `automated_agent_reply` has
  518. // `automated_agent_reply_type` set to `FINAL`.
  519. // If `enable_partial_automated_agent_reply` is not enabled, response stream
  520. // only contains the final reply.
  521. //
  522. // In human assist stage: the following N (N >= 1) messages contain
  523. // `human_agent_suggestion_results`, `end_user_suggestion_results` or
  524. // `message`.
  525. message StreamingAnalyzeContentResponse {
  526. // The result of speech recognition.
  527. StreamingRecognitionResult recognition_result = 1;
  528. // The output text content.
  529. // This field is set if an automated agent responded with a text for the user.
  530. string reply_text = 2;
  531. // The audio data bytes encoded as specified in the request.
  532. // This field is set if:
  533. //
  534. // - The `reply_audio_config` field is specified in the request.
  535. // - The automated agent, which this output comes from, responded with audio.
  536. // In such case, the `reply_audio.config` field contains settings used to
  537. // synthesize the speech.
  538. //
  539. // In some scenarios, multiple output audio fields may be present in the
  540. // response structure. In these cases, only the top-most-level audio output
  541. // has content.
  542. OutputAudio reply_audio = 3;
  543. // Only set if a Dialogflow automated agent has responded.
  544. // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
  545. // and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
  546. // are always empty, use [reply_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentResponse.reply_audio] instead.
  547. AutomatedAgentReply automated_agent_reply = 4;
  548. // Message analyzed by CCAI.
  549. Message message = 6;
  550. // The suggestions for most recent human agent. The order is the same as
  551. // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of
  552. // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config].
  553. repeated SuggestionResult human_agent_suggestion_results = 7;
  554. // The suggestions for end user. The order is the same as
  555. // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of
  556. // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config].
  557. repeated SuggestionResult end_user_suggestion_results = 8;
  558. // Indicates the parameters of DTMF.
  559. DtmfParameters dtmf_parameters = 10;
  560. }
  561. // The request message for [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
  562. message SuggestArticlesRequest {
  563. // Required. The name of the participant to fetch suggestion for.
  564. // Format: `projects/<Project ID>/locations/<Location
  565. // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
  566. string parent = 1 [
  567. (google.api.field_behavior) = REQUIRED,
  568. (google.api.resource_reference) = {
  569. type: "dialogflow.googleapis.com/Participant"
  570. }
  571. ];
  572. // Optional. The name of the latest conversation message to compile suggestion
  573. // for. If empty, it will be the latest message of the conversation.
  574. //
  575. // Format: `projects/<Project ID>/locations/<Location
  576. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  577. string latest_message = 2 [
  578. (google.api.field_behavior) = OPTIONAL,
  579. (google.api.resource_reference) = {
  580. type: "dialogflow.googleapis.com/Message"
  581. }
  582. ];
  583. // Optional. Max number of messages prior to and including
  584. // [latest_message][google.cloud.dialogflow.v2.SuggestArticlesRequest.latest_message] to use as context
  585. // when compiling the suggestion. By default 20 and at most 50.
  586. int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
  587. // Parameters for a human assist query.
  588. AssistQueryParameters assist_query_params = 4;
  589. }
  590. // The response message for [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
  591. message SuggestArticlesResponse {
  592. // Articles ordered by score in descending order.
  593. repeated ArticleAnswer article_answers = 1;
  594. // The name of the latest conversation message used to compile
  595. // suggestion for.
  596. //
  597. // Format: `projects/<Project ID>/locations/<Location
  598. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  599. string latest_message = 2;
  600. // Number of messages prior to and including
  601. // [latest_message][google.cloud.dialogflow.v2.SuggestArticlesResponse.latest_message] to compile the
  602. // suggestion. It may be smaller than the
  603. // [SuggestArticlesRequest.context_size][google.cloud.dialogflow.v2.SuggestArticlesRequest.context_size] field in the request if there
  604. // aren't that many messages in the conversation.
  605. int32 context_size = 3;
  606. }
  607. // The request message for [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
  608. message SuggestFaqAnswersRequest {
  609. // Required. The name of the participant to fetch suggestion for.
  610. // Format: `projects/<Project ID>/locations/<Location
  611. // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
  612. string parent = 1 [
  613. (google.api.field_behavior) = REQUIRED,
  614. (google.api.resource_reference) = {
  615. type: "dialogflow.googleapis.com/Participant"
  616. }
  617. ];
  618. // Optional. The name of the latest conversation message to compile suggestion
  619. // for. If empty, it will be the latest message of the conversation.
  620. //
  621. // Format: `projects/<Project ID>/locations/<Location
  622. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  623. string latest_message = 2 [
  624. (google.api.field_behavior) = OPTIONAL,
  625. (google.api.resource_reference) = {
  626. type: "dialogflow.googleapis.com/Message"
  627. }
  628. ];
  629. // Optional. Max number of messages prior to and including
  630. // [latest_message] to use as context when compiling the
  631. // suggestion. By default 20 and at most 50.
  632. int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
  633. // Parameters for a human assist query.
  634. AssistQueryParameters assist_query_params = 4;
  635. }
  636. // The request message for [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
  637. message SuggestFaqAnswersResponse {
  638. // Answers extracted from FAQ documents.
  639. repeated FaqAnswer faq_answers = 1;
  640. // The name of the latest conversation message used to compile
  641. // suggestion for.
  642. //
  643. // Format: `projects/<Project ID>/locations/<Location
  644. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  645. string latest_message = 2;
  646. // Number of messages prior to and including
  647. // [latest_message][google.cloud.dialogflow.v2.SuggestFaqAnswersResponse.latest_message] to compile the
  648. // suggestion. It may be smaller than the
  649. // [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2.SuggestFaqAnswersRequest.context_size] field in the request if there
  650. // aren't that many messages in the conversation.
  651. int32 context_size = 3;
  652. }
  653. // The request message for [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2.Participants.SuggestSmartReplies].
  654. message SuggestSmartRepliesRequest {
  655. // Required. The name of the participant to fetch suggestion for.
  656. // Format: `projects/<Project ID>/locations/<Location
  657. // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
  658. string parent = 1 [
  659. (google.api.field_behavior) = REQUIRED,
  660. (google.api.resource_reference) = {
  661. type: "dialogflow.googleapis.com/Participant"
  662. }
  663. ];
  664. // The current natural language text segment to compile suggestion
  665. // for. This provides a way for user to get follow up smart reply suggestion
  666. // after a smart reply selection, without sending a text message.
  667. TextInput current_text_input = 4;
  668. // The name of the latest conversation message to compile suggestion
  669. // for. If empty, it will be the latest message of the conversation.
  670. //
  671. // Format: `projects/<Project ID>/locations/<Location
  672. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  673. string latest_message = 2 [(google.api.resource_reference) = {
  674. type: "dialogflow.googleapis.com/Message"
  675. }];
  676. // Max number of messages prior to and including
  677. // [latest_message] to use as context when compiling the
  678. // suggestion. By default 20 and at most 50.
  679. int32 context_size = 3;
  680. }
  681. // The response message for [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2.Participants.SuggestSmartReplies].
  682. message SuggestSmartRepliesResponse {
  683. // Output only. Multiple reply options provided by smart reply service. The
  684. // order is based on the rank of the model prediction.
  685. // The maximum number of the returned replies is set in SmartReplyConfig.
  686. repeated SmartReplyAnswer smart_reply_answers = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  687. // The name of the latest conversation message used to compile
  688. // suggestion for.
  689. //
  690. // Format: `projects/<Project ID>/locations/<Location
  691. // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
  692. string latest_message = 2 [(google.api.resource_reference) = {
  693. type: "dialogflow.googleapis.com/Message"
  694. }];
  695. // Number of messages prior to and including
  696. // [latest_message][google.cloud.dialogflow.v2.SuggestSmartRepliesResponse.latest_message] to compile the
  697. // suggestion. It may be smaller than the
  698. // [SuggestSmartRepliesRequest.context_size][google.cloud.dialogflow.v2.SuggestSmartRepliesRequest.context_size] field in the request if there
  699. // aren't that many messages in the conversation.
  700. int32 context_size = 3;
  701. }
  702. // Represents the natural language speech audio to be played to the end user.
  703. message OutputAudio {
  704. // Instructs the speech synthesizer how to generate the speech
  705. // audio.
  706. OutputAudioConfig config = 1;
  707. // The natural language speech audio.
  708. bytes audio = 2;
  709. }
  710. // Represents a response from an automated agent.
  711. message AutomatedAgentReply {
  712. // Represents different automated agent reply types.
  713. enum AutomatedAgentReplyType {
  714. // Not specified. This should never happen.
  715. AUTOMATED_AGENT_REPLY_TYPE_UNSPECIFIED = 0;
  716. // Partial reply. e.g. Aggregated responses in a `Fulfillment` that enables
  717. // `return_partial_response` can be returned as partial reply.
  718. // WARNING: partial reply is not eligible for barge-in.
  719. PARTIAL = 1;
  720. // Final reply.
  721. FINAL = 2;
  722. }
  723. // Response of the Dialogflow [Sessions.DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent] call.
  724. DetectIntentResponse detect_intent_response = 1;
  725. // AutomatedAgentReply type.
  726. AutomatedAgentReplyType automated_agent_reply_type = 7;
  727. // Indicates whether the partial automated agent reply is interruptible when a
  728. // later reply message arrives. e.g. if the agent specified some music as
  729. // partial response, it can be cancelled.
  730. bool allow_cancellation = 8;
  731. // The unique identifier of the current Dialogflow CX conversation page.
  732. // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
  733. // ID>/flows/<Flow ID>/pages/<Page ID>`.
  734. string cx_current_page = 11;
  735. }
  736. // Represents article answer.
  737. message ArticleAnswer {
  738. // The article title.
  739. string title = 1;
  740. // The article URI.
  741. string uri = 2;
  742. // Article snippets.
  743. repeated string snippets = 3;
  744. // Article match confidence.
  745. // The system's confidence score that this article is a good match for this
  746. // conversation, as a value from 0.0 (completely uncertain) to 1.0
  747. // (completely certain).
  748. float confidence = 4;
  749. // A map that contains metadata about the answer and the
  750. // document from which it originates.
  751. map<string, string> metadata = 5;
  752. // The name of answer record, in the format of
  753. // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
  754. // ID>"
  755. string answer_record = 6;
  756. }
  757. // Represents answer from "frequently asked questions".
  758. message FaqAnswer {
  759. // The piece of text from the `source` knowledge base document.
  760. string answer = 1;
  761. // The system's confidence score that this Knowledge answer is a good match
  762. // for this conversational query, range from 0.0 (completely uncertain)
  763. // to 1.0 (completely certain).
  764. float confidence = 2;
  765. // The corresponding FAQ question.
  766. string question = 3;
  767. // Indicates which Knowledge Document this answer was extracted
  768. // from.
  769. // Format: `projects/<Project ID>/locations/<Location
  770. // ID>/agent/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>`.
  771. string source = 4;
  772. // A map that contains metadata about the answer and the
  773. // document from which it originates.
  774. map<string, string> metadata = 5;
  775. // The name of answer record, in the format of
  776. // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
  777. // ID>"
  778. string answer_record = 6;
  779. }
  780. // Represents a smart reply answer.
  781. message SmartReplyAnswer {
  782. // The content of the reply.
  783. string reply = 1;
  784. // Smart reply confidence.
  785. // The system's confidence score that this reply is a good match for
  786. // this conversation, as a value from 0.0 (completely uncertain) to 1.0
  787. // (completely certain).
  788. float confidence = 2;
  789. // The name of answer record, in the format of
  790. // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
  791. // ID>"
  792. string answer_record = 3 [(google.api.resource_reference) = {
  793. type: "dialogflow.googleapis.com/AnswerRecord"
  794. }];
  795. }
  796. // One response of different type of suggestion response which is used in
  797. // the response of [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
  798. // [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent], as well as [HumanAgentAssistantEvent][google.cloud.dialogflow.v2.HumanAgentAssistantEvent].
  799. message SuggestionResult {
  800. // Different type of suggestion response.
  801. oneof suggestion_response {
  802. // Error status if the request failed.
  803. google.rpc.Status error = 1;
  804. // SuggestArticlesResponse if request is for ARTICLE_SUGGESTION.
  805. SuggestArticlesResponse suggest_articles_response = 2;
  806. // SuggestFaqAnswersResponse if request is for FAQ_ANSWER.
  807. SuggestFaqAnswersResponse suggest_faq_answers_response = 3;
  808. // SuggestSmartRepliesResponse if request is for SMART_REPLY.
  809. SuggestSmartRepliesResponse suggest_smart_replies_response = 4;
  810. }
  811. }
  812. // Defines the language used in the input text.
  813. message InputTextConfig {
  814. // Required. The language of this conversational query. See [Language
  815. // Support](https://cloud.google.com/dialogflow/docs/reference/language)
  816. // for a list of the currently supported language codes.
  817. string language_code = 1 [(google.api.field_behavior) = REQUIRED];
  818. }
  819. // Represents a part of a message possibly annotated with an entity. The part
  820. // can be an entity or purely a part of the message between two entities or
  821. // message start/end.
  822. message AnnotatedMessagePart {
  823. // A part of a message possibly annotated with an entity.
  824. string text = 1;
  825. // The [Dialogflow system entity
  826. // type](https://cloud.google.com/dialogflow/docs/reference/system-entities)
  827. // of this message part. If this is empty, Dialogflow could not annotate the
  828. // phrase part with a system entity.
  829. string entity_type = 2;
  830. // The [Dialogflow system entity formatted value
  831. // ](https://cloud.google.com/dialogflow/docs/reference/system-entities) of
  832. // this message part. For example for a system entity of type
  833. // `@sys.unit-currency`, this may contain:
  834. // <pre>
  835. // {
  836. // "amount": 5,
  837. // "currency": "USD"
  838. // }
  839. // </pre>
  840. google.protobuf.Value formatted_value = 3;
  841. }
  842. // Represents the result of annotation for the message.
  843. message MessageAnnotation {
  844. // The collection of annotated message parts ordered by their
  845. // position in the message. You can recover the annotated message by
  846. // concatenating [AnnotatedMessagePart.text].
  847. repeated AnnotatedMessagePart parts = 1;
  848. // Indicates whether the text message contains entities.
  849. bool contain_entities = 2;
  850. }
  851. // Represents the parameters of human assist query.
  852. message AssistQueryParameters {
  853. // Key-value filters on the metadata of documents returned by article
  854. // suggestion. If specified, article suggestion only returns suggested
  855. // documents that match all filters in their [Document.metadata][google.cloud.dialogflow.v2.Document.metadata]. Multiple
  856. // values for a metadata key should be concatenated by comma. For example,
  857. // filters to match all documents that have 'US' or 'CA' in their market
  858. // metadata values and 'agent' in their user metadata values will be
  859. // ```
  860. // documents_metadata_filters {
  861. // key: "market"
  862. // value: "US,CA"
  863. // }
  864. // documents_metadata_filters {
  865. // key: "user"
  866. // value: "agent"
  867. // }
  868. // ```
  869. map<string, string> documents_metadata_filters = 1;
  870. }