xref: /aosp_15_r20/external/googleapis/google/cloud/dialogflow/v2beta1/participant.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.dialogflow.v2beta1;
18
19import "google/api/annotations.proto";
20import "google/api/client.proto";
21import "google/api/field_behavior.proto";
22import "google/api/resource.proto";
23import "google/cloud/dialogflow/v2beta1/audio_config.proto";
24import "google/cloud/dialogflow/v2beta1/session.proto";
25import "google/protobuf/field_mask.proto";
26import "google/protobuf/struct.proto";
27import "google/protobuf/timestamp.proto";
28import "google/rpc/status.proto";
29
30option cc_enable_arenas = true;
31option csharp_namespace = "Google.Cloud.Dialogflow.V2Beta1";
32option go_package = "cloud.google.com/go/dialogflow/apiv2beta1/dialogflowpb;dialogflowpb";
33option java_multiple_files = true;
34option java_outer_classname = "ParticipantProto";
35option java_package = "com.google.cloud.dialogflow.v2beta1";
36option objc_class_prefix = "DF";
37
38// Service for managing
39// [Participants][google.cloud.dialogflow.v2beta1.Participant].
40service Participants {
41  option (google.api.default_host) = "dialogflow.googleapis.com";
42  option (google.api.oauth_scopes) =
43      "https://www.googleapis.com/auth/cloud-platform,"
44      "https://www.googleapis.com/auth/dialogflow";
45
46  // Creates a new participant in a conversation.
47  rpc CreateParticipant(CreateParticipantRequest) returns (Participant) {
48    option (google.api.http) = {
49      post: "/v2beta1/{parent=projects/*/conversations/*}/participants"
50      body: "participant"
51      additional_bindings {
52        post: "/v2beta1/{parent=projects/*/locations/*/conversations/*}/participants"
53        body: "participant"
54      }
55    };
56    option (google.api.method_signature) = "parent,participant";
57  }
58
59  // Retrieves a conversation participant.
60  rpc GetParticipant(GetParticipantRequest) returns (Participant) {
61    option (google.api.http) = {
62      get: "/v2beta1/{name=projects/*/conversations/*/participants/*}"
63      additional_bindings {
64        get: "/v2beta1/{name=projects/*/locations/*/conversations/*/participants/*}"
65      }
66    };
67    option (google.api.method_signature) = "name";
68  }
69
70  // Returns the list of all participants in the specified conversation.
71  rpc ListParticipants(ListParticipantsRequest)
72      returns (ListParticipantsResponse) {
73    option (google.api.http) = {
74      get: "/v2beta1/{parent=projects/*/conversations/*}/participants"
75      additional_bindings {
76        get: "/v2beta1/{parent=projects/*/locations/*/conversations/*}/participants"
77      }
78    };
79    option (google.api.method_signature) = "parent";
80  }
81
82  // Updates the specified participant.
83  rpc UpdateParticipant(UpdateParticipantRequest) returns (Participant) {
84    option (google.api.http) = {
85      patch: "/v2beta1/{participant.name=projects/*/conversations/*/participants/*}"
86      body: "participant"
87      additional_bindings {
88        patch: "/v2beta1/{participant.name=projects/*/locations/*/conversations/*/participants/*}"
89        body: "participant"
90      }
91    };
92    option (google.api.method_signature) = "participant,update_mask";
93  }
94
95  // Adds a text (chat, for example), or audio (phone recording, for example)
96  // message from a participant into the conversation.
97  //
98  // Note: Always use agent versions for production traffic
99  // sent to virtual agents. See [Versions and
100  // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
101  rpc AnalyzeContent(AnalyzeContentRequest) returns (AnalyzeContentResponse) {
102    option (google.api.http) = {
103      post: "/v2beta1/{participant=projects/*/conversations/*/participants/*}:analyzeContent"
104      body: "*"
105      additional_bindings {
106        post: "/v2beta1/{participant=projects/*/locations/*/conversations/*/participants/*}:analyzeContent"
107        body: "*"
108      }
109    };
110    option (google.api.method_signature) = "participant,text_input";
111    option (google.api.method_signature) = "participant,audio_input";
112    option (google.api.method_signature) = "participant,event_input";
113  }
114
115  // Adds a text (e.g., chat) or audio (e.g., phone recording) message from a
116  // participant into the conversation.
117  // Note: This method is only available through the gRPC API (not REST).
118  //
119  // The top-level message sent to the client by the server is
120  // `StreamingAnalyzeContentResponse`. Multiple response messages can be
121  // returned in order. The first one or more messages contain the
122  // `recognition_result` field. Each result represents a more complete
123  // transcript of what the user said. The next message contains the
124  // `reply_text` field, and potentially the `reply_audio` and/or the
125  // `automated_agent_reply` fields.
126  //
127  // Note: Always use agent versions for production traffic
128  // sent to virtual agents. See [Versions and
129  // environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
130  rpc StreamingAnalyzeContent(stream StreamingAnalyzeContentRequest)
131      returns (stream StreamingAnalyzeContentResponse) {}
132
133  // Gets suggested articles for a participant based on specific historical
134  // messages.
135  //
136  // Note that
137  // [ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions]
138  // will only list the auto-generated suggestions, while
139  // [CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion]
140  // will try to compile suggestion based on the provided conversation context
141  // in the real time.
142  rpc SuggestArticles(SuggestArticlesRequest)
143      returns (SuggestArticlesResponse) {
144    option (google.api.http) = {
145      post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestArticles"
146      body: "*"
147      additional_bindings {
148        post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestArticles"
149        body: "*"
150      }
151    };
152    option (google.api.method_signature) = "parent";
153  }
154
155  // Gets suggested faq answers for a participant based on specific historical
156  // messages.
157  rpc SuggestFaqAnswers(SuggestFaqAnswersRequest)
158      returns (SuggestFaqAnswersResponse) {
159    option (google.api.http) = {
160      post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
161      body: "*"
162      additional_bindings {
163        post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
164        body: "*"
165      }
166    };
167    option (google.api.method_signature) = "parent";
168  }
169
170  // Gets smart replies for a participant based on specific historical
171  // messages.
172  rpc SuggestSmartReplies(SuggestSmartRepliesRequest)
173      returns (SuggestSmartRepliesResponse) {
174    option (google.api.http) = {
175      post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
176      body: "*"
177      additional_bindings {
178        post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
179        body: "*"
180      }
181    };
182    option (google.api.method_signature) = "parent";
183  }
184
185  // Deprecated: Use inline suggestion, event based suggestion or
186  // Suggestion* API instead.
187  // See
188  // [HumanAgentAssistantConfig.name][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.name]
189  // for more details. Removal Date: 2020-09-01.
190  //
191  // Retrieves suggestions for live agents.
192  //
193  // This method should be used by human agent client software to fetch auto
194  // generated suggestions in real-time, while the conversation with an end user
195  // is in progress. The functionality is implemented in terms of the
196  // [list
197  // pagination](https://cloud.google.com/apis/design/design_patterns#list_pagination)
198  // design pattern. The client app should use the `next_page_token` field
199  // to fetch the next batch of suggestions. `suggestions` are sorted by
200  // `create_time` in descending order.
201  // To fetch latest suggestion, just set `page_size` to 1.
202  // To fetch new suggestions without duplication, send request with filter
203  // `create_time_epoch_microseconds > [first item's create_time of previous
204  // request]` and empty page_token.
205  rpc ListSuggestions(ListSuggestionsRequest)
206      returns (ListSuggestionsResponse) {
207    option deprecated = true;
208    option (google.api.http) = {
209      get: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions"
210    };
211  }
212
213  // Deprecated. use
214  // [SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles]
215  // and
216  // [SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers]
217  // instead.
218  //
219  // Gets suggestions for a participant based on specific historical
220  // messages.
221  //
222  // Note that
223  // [ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions]
224  // will only list the auto-generated suggestions, while
225  // [CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion]
226  // will try to compile suggestion based on the provided conversation context
227  // in the real time.
228  rpc CompileSuggestion(CompileSuggestionRequest)
229      returns (CompileSuggestionResponse) {
230    option deprecated = true;
231    option (google.api.http) = {
232      post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:compile"
233      body: "*"
234    };
235  }
236}
237
238// Represents a conversation participant (human agent, virtual agent, end-user).
239message Participant {
240  option (google.api.resource) = {
241    type: "dialogflow.googleapis.com/Participant"
242    pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
243    pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
244  };
245
246  // Enumeration of the roles a participant can play in a conversation.
247  enum Role {
248    // Participant role not set.
249    ROLE_UNSPECIFIED = 0;
250
251    // Participant is a human agent.
252    HUMAN_AGENT = 1;
253
254    // Participant is an automated agent, such as a Dialogflow agent.
255    AUTOMATED_AGENT = 2;
256
257    // Participant is an end user that has called or chatted with
258    // Dialogflow services.
259    END_USER = 3;
260  }
261
262  // Optional. The unique identifier of this participant.
263  // Format: `projects/<Project ID>/locations/<Location
264  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
265  string name = 1 [(google.api.field_behavior) = OPTIONAL];
266
267  // Immutable. The role this participant plays in the conversation. This field
268  // must be set during participant creation and is then immutable.
269  Role role = 2 [(google.api.field_behavior) = IMMUTABLE];
270
271  // Optional. Obfuscated user id that should be associated with the created
272  // participant.
273  //
274  // You can specify a user id as follows:
275  //
276  // 1. If you set this field in
277  //    [CreateParticipantRequest][google.cloud.dialogflow.v2beta1.CreateParticipantRequest.participant]
278  //    or
279  //    [UpdateParticipantRequest][google.cloud.dialogflow.v2beta1.UpdateParticipantRequest.participant],
280  //    Dialogflow adds the obfuscated user id with the participant.
281  //
282  // 2. If you set this field in
283  //    [AnalyzeContent][google.cloud.dialogflow.v2beta1.AnalyzeContentRequest.obfuscated_external_user_id]
284  //    or
285  //    [StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.obfuscated_external_user_id],
286  //    Dialogflow will update
287  //    [Participant.obfuscated_external_user_id][google.cloud.dialogflow.v2beta1.Participant.obfuscated_external_user_id].
288  //
289  // Dialogflow uses this user id for billing and measurement. If a user with
290  // the same obfuscated_external_user_id is created in a later conversation,
291  // Dialogflow will know it's the same user.
292  //
293  // Dialogflow also uses this user id for Agent Assist suggestion
294  // personalization. For example, Dialogflow can use it to provide personalized
295  // smart reply suggestions for this user.
296  //
297  // Note:
298  //
299  // * Please never pass raw user ids to Dialogflow. Always obfuscate your user
300  //   id first.
301  // * Dialogflow only accepts a UTF-8 encoded string, e.g., a hex digest of a
302  //   hash function like SHA-512.
303  // * The length of the user id must be <= 256 characters.
304  string obfuscated_external_user_id = 7
305      [(google.api.field_behavior) = OPTIONAL];
306
307  // Optional. Key-value filters on the metadata of documents returned by
308  // article suggestion. If specified, article suggestion only returns suggested
309  // documents that match all filters in their
310  // [Document.metadata][google.cloud.dialogflow.v2beta1.Document.metadata].
311  // Multiple values for a metadata key should be concatenated by comma. For
312  // example, filters to match all documents that have 'US' or 'CA' in their
313  // market metadata values and 'agent' in their user metadata values will be
314  // ```
315  // documents_metadata_filters {
316  //   key: "market"
317  //   value: "US,CA"
318  // }
319  // documents_metadata_filters {
320  //   key: "user"
321  //   value: "agent"
322  // }
323  // ```
324  map<string, string> documents_metadata_filters = 8
325      [(google.api.field_behavior) = OPTIONAL];
326}
327
328// Represents a message posted into a conversation.
329message Message {
330  option (google.api.resource) = {
331    type: "dialogflow.googleapis.com/Message"
332    pattern: "projects/{project}/conversations/{conversation}/messages/{message}"
333    pattern: "projects/{project}/locations/{location}/conversations/{conversation}/messages/{message}"
334  };
335
336  // Optional. The unique identifier of the message.
337  // Format: `projects/<Project ID>/locations/<Location
338  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
339  string name = 1 [(google.api.field_behavior) = OPTIONAL];
340
341  // Required. The message content.
342  string content = 2 [(google.api.field_behavior) = REQUIRED];
343
344  // Optional. The message language.
345  // This should be a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
346  // language tag. Example: "en-US".
347  string language_code = 3 [(google.api.field_behavior) = OPTIONAL];
348
349  // Output only. The participant that sends this message.
350  string participant = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
351
352  // Output only. The role of the participant.
353  Participant.Role participant_role = 5
354      [(google.api.field_behavior) = OUTPUT_ONLY];
355
356  // Output only. The time when the message was created in Contact Center AI.
357  google.protobuf.Timestamp create_time = 6
358      [(google.api.field_behavior) = OUTPUT_ONLY];
359
360  // Optional. The time when the message was sent.
361  google.protobuf.Timestamp send_time = 9
362      [(google.api.field_behavior) = OPTIONAL];
363
364  // Output only. The annotation for the message.
365  MessageAnnotation message_annotation = 7
366      [(google.api.field_behavior) = OUTPUT_ONLY];
367
368  // Output only. The sentiment analysis result for the message.
369  SentimentAnalysisResult sentiment_analysis = 8
370      [(google.api.field_behavior) = OUTPUT_ONLY];
371}
372
373// The request message for
374// [Participants.CreateParticipant][google.cloud.dialogflow.v2beta1.Participants.CreateParticipant].
375message CreateParticipantRequest {
376  // Required. Resource identifier of the conversation adding the participant.
377  // Format: `projects/<Project ID>/locations/<Location
378  // ID>/conversations/<Conversation ID>`.
379  string parent = 1 [
380    (google.api.field_behavior) = REQUIRED,
381    (google.api.resource_reference) = {
382      child_type: "dialogflow.googleapis.com/Participant"
383    }
384  ];
385
386  // Required. The participant to create.
387  Participant participant = 2 [(google.api.field_behavior) = REQUIRED];
388}
389
390// The request message for
391// [Participants.GetParticipant][google.cloud.dialogflow.v2beta1.Participants.GetParticipant].
392message GetParticipantRequest {
393  // Required. The name of the participant. Format:
394  // `projects/<Project ID>/locations/<Location ID>/conversations/<Conversation
395  // ID>/participants/<Participant ID>`.
396  string name = 1 [
397    (google.api.field_behavior) = REQUIRED,
398    (google.api.resource_reference) = {
399      type: "dialogflow.googleapis.com/Participant"
400    }
401  ];
402}
403
404// The request message for
405// [Participants.ListParticipants][google.cloud.dialogflow.v2beta1.Participants.ListParticipants].
406message ListParticipantsRequest {
407  // Required. The conversation to list all participants from.
408  // Format: `projects/<Project ID>/locations/<Location
409  // ID>/conversations/<Conversation ID>`.
410  string parent = 1 [
411    (google.api.field_behavior) = REQUIRED,
412    (google.api.resource_reference) = {
413      child_type: "dialogflow.googleapis.com/Participant"
414    }
415  ];
416
417  // Optional. The maximum number of items to return in a single page. By
418  // default 100 and at most 1000.
419  int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL];
420
421  // Optional. The next_page_token value returned from a previous list request.
422  string page_token = 3 [(google.api.field_behavior) = OPTIONAL];
423}
424
425// The response message for
426// [Participants.ListParticipants][google.cloud.dialogflow.v2beta1.Participants.ListParticipants].
427message ListParticipantsResponse {
428  // The list of participants. There is a maximum number of items
429  // returned based on the page_size field in the request.
430  repeated Participant participants = 1;
431
432  // Token to retrieve the next page of results or empty if there are no
433  // more results in the list.
434  string next_page_token = 2;
435}
436
437// The request message for
438// [Participants.UpdateParticipant][google.cloud.dialogflow.v2beta1.Participants.UpdateParticipant].
439message UpdateParticipantRequest {
440  // Required. The participant to update.
441  Participant participant = 1 [(google.api.field_behavior) = REQUIRED];
442
443  // Required. The mask to specify which fields to update.
444  google.protobuf.FieldMask update_mask = 2
445      [(google.api.field_behavior) = REQUIRED];
446}
447
448// Represents the natural language speech audio to be processed.
449message AudioInput {
450  // Required. Instructs the speech recognizer how to process the speech audio.
451  InputAudioConfig config = 1;
452
453  // Required. The natural language speech audio to be processed.
454  // A single request can contain up to 1 minute of speech audio data.
455  // The transcribed text cannot contain more than 256 bytes for virtual agent
456  // interactions.
457  bytes audio = 2;
458}
459
460// Represents the natural language speech audio to be played to the end user.
461message OutputAudio {
462  // Required. Instructs the speech synthesizer how to generate the speech
463  // audio.
464  OutputAudioConfig config = 1;
465
466  // Required. The natural language speech audio.
467  bytes audio = 2;
468}
469
470// Represents a response from an automated agent.
471message AutomatedAgentReply {
472  // Represents different automated agent reply types.
473  enum AutomatedAgentReplyType {
474    // Not specified. This should never happen.
475    AUTOMATED_AGENT_REPLY_TYPE_UNSPECIFIED = 0;
476
477    // Partial reply. e.g. Aggregated responses in a `Fulfillment` that enables
478    // `return_partial_response` can be returned as partial reply.
479    // WARNING: partial reply is not eligible for barge-in.
480    PARTIAL = 1;
481
482    // Final reply.
483    FINAL = 2;
484  }
485
486  // Required.
487  oneof response {
488    // Response of the Dialogflow
489    // [Sessions.DetectIntent][google.cloud.dialogflow.v2beta1.Sessions.DetectIntent]
490    // call.
491    DetectIntentResponse detect_intent_response = 1;
492  }
493
494  // Response messages from the automated agent.
495  repeated ResponseMessage response_messages = 3;
496
497  // Info on the query match for the automated agent response.
498  oneof match {
499    // Name of the intent if an intent is matched for the query.
500    // For a V2 query, the value format is `projects/<Project ID>/locations/
501    // <Location ID>/agent/intents/<Intent ID>`.
502    // For a V3 query, the value format is `projects/<Project ID>/locations/
503    // <Location ID>/agents/<Agent ID>/intents/<Intent ID>`.
504    string intent = 4 [(google.api.resource_reference) = {
505      type: "dialogflow.googleapis.com/Intent"
506    }];
507
508    // Event name if an event is triggered for the query.
509    string event = 5;
510  }
511
512  // The confidence of the match. Values range from 0.0 (completely uncertain)
513  // to 1.0 (completely certain).
514  // This value is for informational purpose only and is only used to help match
515  // the best intent within the classification threshold. This value may change
516  // for the same end-user expression at any time due to a model retraining or
517  // change in implementation.
518  float match_confidence = 9;
519
520  // The collection of current parameters at the time of this response.
521  google.protobuf.Struct parameters = 10;
522
523  // The collection of current Dialogflow CX agent session parameters at the
524  // time of this response.
525  // Deprecated: Use `parameters` instead.
526  google.protobuf.Struct cx_session_parameters = 6 [deprecated = true];
527
528  // AutomatedAgentReply type.
529  AutomatedAgentReplyType automated_agent_reply_type = 7;
530
531  // Indicates whether the partial automated agent reply is interruptible when a
532  // later reply message arrives. e.g. if the agent specified some music as
533  // partial response, it can be cancelled.
534  bool allow_cancellation = 8;
535
536  // The unique identifier of the current Dialogflow CX conversation page.
537  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
538  // ID>/flows/<Flow ID>/pages/<Page ID>`.
539  string cx_current_page = 11;
540}
541
542// Represents the selection of a suggestion.
543message SuggestionInput {
544  // Required. The ID of a suggestion selected by the human agent.
545  // The suggestion(s) were generated in a previous call to
546  // request Dialogflow assist.
547  // The format is:
548  // `projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
549  // ID>` where <Answer Record ID> is an alphanumeric string.
550  string answer_record = 1;
551
552  // Optional. If the customer edited the suggestion before using it, include
553  // the revised text here.
554  TextInput text_override = 2;
555
556  // In Dialogflow assist for v3, the user can submit a form by sending
557  // a [SuggestionInput][google.cloud.dialogflow.v2beta1.SuggestionInput]. The
558  // form is uniquely determined by the
559  // [answer_record][google.cloud.dialogflow.v2beta1.SuggestionInput.answer_record]
560  // field, which identifies a v3
561  // [QueryResult][google.cloud.dialogflow.v3alpha1.QueryResult] containing the
562  // current [page][google.cloud.dialogflow.v3alpha1.Page]. The form parameters
563  // are specified via the
564  // [parameters][google.cloud.dialogflow.v2beta1.SuggestionInput.parameters]
565  // field.
566  //
567  // Depending on your protocol or client library language, this is a
568  // map, associative array, symbol table, dictionary, or JSON object
569  // composed of a collection of (MapKey, MapValue) pairs:
570  //
571  // * MapKey type: string
572  // * MapKey value: parameter name
573  // * MapValue type: If parameter's entity type is a composite entity then use
574  // map, otherwise, depending on the parameter value type, it could be one of
575  // string, number, boolean, null, list or map.
576  // * MapValue value: If parameter's entity type is a composite entity then use
577  // map from composite entity property names to property values, otherwise,
578  // use parameter value.
579  google.protobuf.Struct parameters = 4;
580
581  // The intent to be triggered on V3 agent.
582  IntentInput intent_input = 6;
583}
584
585// Represents the intent to trigger programmatically rather than as a result of
586// natural language processing. The intent input is only used for V3 agent.
587message IntentInput {
588  // Required. The unique identifier of the intent in V3 agent.
589  // Format: `projects/<Project ID>/locations/<Location ID>/locations/<Location
590  // ID>/agents/<Agent ID>/intents/<Intent ID>`.
591  string intent = 1 [(google.api.field_behavior) = REQUIRED];
592
593  // Required. The language of this conversational query. See [Language
594  // Support](https://cloud.google.com/dialogflow/docs/reference/language)
595  // for a list of the currently supported language codes.
596  string language_code = 3 [(google.api.field_behavior) = REQUIRED];
597}
598
599// The type of Human Agent Assistant API suggestion to perform, and the maximum
600// number of results to return for that type. Multiple `Feature` objects can
601// be specified in the `features` list.
602message SuggestionFeature {
603  // Defines the type of Human Agent Assistant feature.
604  enum Type {
605    // Unspecified feature type.
606    TYPE_UNSPECIFIED = 0;
607
608    // Run article suggestion model for chat.
609    ARTICLE_SUGGESTION = 1;
610
611    // Run FAQ model.
612    FAQ = 2;
613
614    // Run smart reply model for chat.
615    SMART_REPLY = 3;
616
617    // Run Dialogflow assist model for chat, which will return automated agent
618    // response as suggestion.
619    DIALOGFLOW_ASSIST = 4;
620
621    // Run conversation summarization model for chat.
622    CONVERSATION_SUMMARIZATION = 8;
623
624    // Run knowledge search with text input from agent or text generated query.
625    KNOWLEDGE_SEARCH = 14;
626  }
627
628  // Type of Human Agent Assistant API feature to request.
629  Type type = 1;
630}
631
632// Represents the parameters of human assist query.
633message AssistQueryParameters {
634  // Key-value filters on the metadata of documents returned by article
635  // suggestion. If specified, article suggestion only returns suggested
636  // documents that match all filters in their
637  // [Document.metadata][google.cloud.dialogflow.v2beta1.Document.metadata].
638  // Multiple values for a metadata key should be concatenated by comma. For
639  // example, filters to match all documents that have 'US' or 'CA' in their
640  // market metadata values and 'agent' in their user metadata values will be
641  // ```
642  // documents_metadata_filters {
643  //   key: "market"
644  //   value: "US,CA"
645  // }
646  // documents_metadata_filters {
647  //   key: "user"
648  //   value: "agent"
649  // }
650  // ```
651  map<string, string> documents_metadata_filters = 1;
652}
653
654// The request message for
655// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent].
656message AnalyzeContentRequest {
657  // Required. The name of the participant this text comes from.
658  // Format: `projects/<Project ID>/locations/<Location
659  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
660  string participant = 1 [
661    (google.api.field_behavior) = REQUIRED,
662    (google.api.resource_reference) = {
663      type: "dialogflow.googleapis.com/Participant"
664    }
665  ];
666
667  // Required. The input content.
668  oneof input {
669    // The natural language text to be processed.
670    TextInput text_input = 6;
671
672    // The natural language speech audio to be processed.
673    AudioInput audio_input = 7;
674
675    // An input event to send to Dialogflow.
676    EventInput event_input = 8;
677
678    // An input representing the selection of a suggestion.
679    SuggestionInput suggestion_input = 12;
680
681    // The intent to be triggered on V3 agent.
682    IntentInput intent_input = 13;
683  }
684
685  // Speech synthesis configuration.
686  // The speech synthesis settings for a virtual agent that may be configured
687  // for the associated conversation profile are not used when calling
688  // AnalyzeContent. If this configuration is not supplied, speech synthesis
689  // is disabled.
690  OutputAudioConfig reply_audio_config = 5;
691
692  // Parameters for a Dialogflow virtual-agent query.
693  QueryParameters query_params = 9;
694
695  // Parameters for a human assist query.
696  AssistQueryParameters assist_query_params = 14;
697
698  // Additional parameters to be put into Dialogflow CX session parameters. To
699  // remove a parameter from the session, clients should explicitly set the
700  // parameter value to null.
701  //
702  // Note: this field should only be used if you are connecting to a Dialogflow
703  // CX agent.
704  google.protobuf.Struct cx_parameters = 18;
705
706  // The unique identifier of the CX page to override the `current_page` in the
707  // session.
708  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
709  // ID>/flows/<Flow ID>/pages/<Page ID>`.
710  //
711  // If `cx_current_page` is specified, the previous state of the session will
712  // be ignored by Dialogflow CX, including the [previous
713  // page][QueryResult.current_page] and the [previous session
714  // parameters][QueryResult.parameters]. In most cases, `cx_current_page` and
715  // `cx_parameters` should be configured together to direct a session to a
716  // specific state.
717  //
718  // Note: this field should only be used if you are connecting to a Dialogflow
719  // CX agent.
720  string cx_current_page = 20;
721
722  // Optional. The send time of the message from end user or human agent's
723  // perspective. It is used for identifying the same message under one
724  // participant.
725  //
726  // Given two messages under the same participant:
727  // * If send time are different regardless of whether the content of the
728  // messages are exactly the same, the conversation will regard them as
729  // two distinct messages sent by the participant.
730  // * If send time is the same regardless of whether the content of the
731  // messages are exactly the same, the conversation will regard them as
732  // same message, and ignore the message received later.
733  //
734  // If the value is not provided, a new request will always be regarded as a
735  // new message without any de-duplication.
736  google.protobuf.Timestamp message_send_time = 10;
737
738  // A unique identifier for this request. Restricted to 36 ASCII characters.
739  // A random UUID is recommended.
740  // This request is only idempotent if a `request_id` is provided.
741  string request_id = 11;
742}
743
744// The message in the response that indicates the parameters of DTMF.
745message DtmfParameters {
746  // Indicates whether DTMF input can be handled in the next request.
747  bool accepts_dtmf_input = 1;
748}
749
750// The response message for
751// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent].
752message AnalyzeContentResponse {
753  // Output only. The output text content.
754  // This field is set if the automated agent responded with text to show to
755  // the user.
756  string reply_text = 1;
757
758  // Optional. The audio data bytes encoded as specified in the request.
759  // This field is set if:
760  //
761  //  - `reply_audio_config` was specified in the request, or
762  //  - The automated agent responded with audio to play to the user. In such
763  //    case, `reply_audio.config` contains settings used to synthesize the
764  //    speech.
765  //
766  // In some scenarios, multiple output audio fields may be present in the
767  // response structure. In these cases, only the top-most-level audio output
768  // has content.
769  OutputAudio reply_audio = 2;
770
771  // Optional. Only set if a Dialogflow automated agent has responded.
772  // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
773  // and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
774  // are always empty, use
775  // [reply_audio][google.cloud.dialogflow.v2beta1.AnalyzeContentResponse.reply_audio]
776  // instead.
777  AutomatedAgentReply automated_agent_reply = 3;
778
779  // Output only. Message analyzed by CCAI.
780  Message message = 5;
781
782  // The suggestions for most recent human agent. The order is the same as
783  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
784  // of
785  // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.human_agent_suggestion_config].
786  //
787  // Note that any failure of Agent Assist features will not lead to the overall
788  // failure of an AnalyzeContent API call. Instead, the features will
789  // fail silently with the error field set in the corresponding
790  // SuggestionResult.
791  repeated SuggestionResult human_agent_suggestion_results = 6;
792
793  // The suggestions for end user. The order is the same as
794  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
795  // of
796  // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.end_user_suggestion_config].
797  //
798  // Same as human_agent_suggestion_results, any failure of Agent Assist
799  // features will not lead to the overall failure of an AnalyzeContent API
800  // call. Instead, the features will fail silently with the error field set in
801  // the corresponding SuggestionResult.
802  repeated SuggestionResult end_user_suggestion_results = 7;
803
804  // Indicates the parameters of DTMF.
805  DtmfParameters dtmf_parameters = 9;
806}
807
808// Defines the language used in the input text.
809message InputTextConfig {
810  // Required. The language of this conversational query. See [Language
811  // Support](https://cloud.google.com/dialogflow/docs/reference/language)
812  // for a list of the currently supported language codes.
813  string language_code = 1;
814}
815
816// The top-level message sent by the client to the
817// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent]
818// method.
819//
820// Multiple request messages should be sent in order:
821//
822// 1.  The first message must contain
823//     [participant][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.participant],
824//     [config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
825//     and optionally
826//     [query_params][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.query_params].
827//     If you want to receive an audio response, it should also contain
828//     [reply_audio_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.reply_audio_config].
829//     The message must not contain
830//     [input][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input].
831//
832// 2.  If
833// [config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
834// in the first message
835//     was set to
836//     [audio_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.audio_config],
837//     all subsequent messages must contain
838//     [input_audio][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_audio]
839//     to continue with Speech recognition. If you decide to rather analyze text
840//     input after you already started Speech recognition, please send a message
841//     with
842//     [StreamingAnalyzeContentRequest.input_text][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_text].
843//
844//     However, note that:
845//
846//     * Dialogflow will bill you for the audio so far.
847//     * Dialogflow discards all Speech recognition results in favor of the
848//       text input.
849//
850//  3. If
851//  [StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
852//  in the first message was set
853//    to
854//    [StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.text_config],
855//    then the second message must contain only
856//    [input_text][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_text].
857//    Moreover, you must not send more than two messages.
858//
859//  After you sent all input, you must half-close or abort the request stream.
860message StreamingAnalyzeContentRequest {
861  // Required. The name of the participant this text comes from.
862  // Format: `projects/<Project ID>/locations/<Location
863  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
864  string participant = 1 [
865    (google.api.field_behavior) = REQUIRED,
866    (google.api.resource_reference) = {
867      type: "dialogflow.googleapis.com/Participant"
868    }
869  ];
870
871  // Required. The input config.
872  oneof config {
873    // Instructs the speech recognizer how to process the speech audio.
874    InputAudioConfig audio_config = 2;
875
876    // The natural language text to be processed.
877    InputTextConfig text_config = 3;
878  }
879
880  // Speech synthesis configuration.
881  // The speech synthesis settings for a virtual agent that may be configured
882  // for the associated conversation profile are not used when calling
883  // StreamingAnalyzeContent. If this configuration is not supplied, speech
884  // synthesis is disabled.
885  OutputAudioConfig reply_audio_config = 4;
886
887  // Required. The input.
888  oneof input {
889    // The input audio content to be recognized. Must be sent if `audio_config`
890    // is set in the first message. The complete audio over all streaming
891    // messages must not exceed 1 minute.
892    bytes input_audio = 5;
893
894    // The UTF-8 encoded natural language text to be processed. Must be sent if
895    // `text_config` is set in the first message. Text length must not exceed
896    // 256 bytes for virtual agent interactions. The `input_text` field can be
897    // only sent once, and would cancel the speech recognition if any ongoing.
898    string input_text = 6;
899
900    // The DTMF digits used to invoke intent and fill in parameter value.
901    //
902    // This input is ignored if the previous response indicated that DTMF input
903    // is not accepted.
904    TelephonyDtmfEvents input_dtmf = 9;
905
906    // The intent to be triggered on V3 agent.
907    // Format: `projects/<Project ID>/locations/<Location ID>/locations/
908    // <Location ID>/agents/<Agent ID>/intents/<Intent ID>`.
909    string input_intent = 17;
910
911    // The input event name.
912    // This can only be sent once and would cancel the ongoing speech
913    // recognition if any.
914    string input_event = 20;
915  }
916
917  // Parameters for a Dialogflow virtual-agent query.
918  QueryParameters query_params = 7;
919
920  // Parameters for a human assist query.
921  AssistQueryParameters assist_query_params = 8;
922
923  // Additional parameters to be put into Dialogflow CX session parameters. To
924  // remove a parameter from the session, clients should explicitly set the
925  // parameter value to null.
926  //
927  // Note: this field should only be used if you are connecting to a Dialogflow
928  // CX agent.
929  google.protobuf.Struct cx_parameters = 13;
930
931  // The unique identifier of the CX page to override the `current_page` in the
932  // session.
933  // Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
934  // ID>/flows/<Flow ID>/pages/<Page ID>`.
935  //
936  // If `cx_current_page` is specified, the previous state of the session will
937  // be ignored by Dialogflow CX, including the [previous
938  // page][QueryResult.current_page] and the [previous session
939  // parameters][QueryResult.parameters]. In most cases, `cx_current_page` and
940  // `cx_parameters` should be configured together to direct a session to a
941  // specific state.
942  //
943  // Note: this field should only be used if you are connecting to a Dialogflow
944  // CX agent.
945  string cx_current_page = 15;
946
947  // Optional. Enable full bidirectional streaming. You can keep streaming the
948  // audio until timeout, and there's no need to half close the stream to get
949  // the response.
950  //
951  // Restrictions:
952  //
953  // - Timeout: 3 mins.
954  // - Audio Encoding: only supports
955  // [AudioEncoding.AUDIO_ENCODING_LINEAR_16][google.cloud.dialogflow.v2beta1.AudioEncoding.AUDIO_ENCODING_LINEAR_16]
956  // and
957  // [AudioEncoding.AUDIO_ENCODING_MULAW][google.cloud.dialogflow.v2beta1.AudioEncoding.AUDIO_ENCODING_MULAW]
958  // - Lifecycle: conversation should be in `Assist Stage`, go to
959  //   [Conversation.CreateConversation][] for more information.
960  //
961  // InvalidArgument Error will be returned if the one of restriction checks
962  // failed.
963  //
964  // You can find more details in
965  // https://cloud.google.com/agent-assist/docs/extended-streaming
966  bool enable_extended_streaming = 11 [(google.api.field_behavior) = OPTIONAL];
967
968  // Enable partial virtual agent responses. If this flag is not enabled,
969  // response stream still contains only one final response even if some
970  // `Fulfillment`s in Dialogflow virtual agent have been configured to return
971  // partial responses.
972  bool enable_partial_automated_agent_reply = 12;
973
974  // if true, `StreamingAnalyzeContentResponse.debugging_info` will get
975  // populated.
976  bool enable_debugging_info = 19;
977}
978
979// The top-level message returned from the `StreamingAnalyzeContent` method.
980//
981// Multiple response messages can be returned in order:
982//
983// 1.  If the input was set to streaming audio, the first one or more messages
984//     contain `recognition_result`. Each `recognition_result` represents a more
985//     complete transcript of what the user said. The last `recognition_result`
986//     has `is_final` set to `true`.
987//
988// 2.  In virtual agent stage: if `enable_partial_automated_agent_reply` is
989//     true, the following N (currently 1 <= N <= 4) messages
990//     contain `automated_agent_reply` and optionally `reply_audio`
991//     returned by the virtual agent. The first (N-1)
992//     `automated_agent_reply`s will have `automated_agent_reply_type` set to
993//     `PARTIAL`. The last `automated_agent_reply` has
994//     `automated_agent_reply_type` set to `FINAL`.
995//     If `enable_partial_automated_agent_reply` is not enabled, response stream
996//     only contains the final reply.
997//
998//     In human assist stage: the following N (N >= 1) messages contain
999//     `human_agent_suggestion_results`, `end_user_suggestion_results` or
1000//     `message`.
1001message StreamingAnalyzeContentResponse {
1002  // The result of speech recognition.
1003  StreamingRecognitionResult recognition_result = 1;
1004
1005  // Optional. The output text content.
1006  // This field is set if an automated agent responded with a text for the user.
1007  string reply_text = 2;
1008
1009  // Optional. The audio data bytes encoded as specified in the request.
1010  // This field is set if:
1011  //
1012  //  - The `reply_audio_config` field is specified in the request.
1013  //  - The automated agent, which this output comes from, responded with audio.
1014  //    In such case, the `reply_audio.config` field contains settings used to
1015  //    synthesize the speech.
1016  //
1017  // In some scenarios, multiple output audio fields may be present in the
1018  // response structure. In these cases, only the top-most-level audio output
1019  // has content.
1020  OutputAudio reply_audio = 3;
1021
1022  // Optional. Only set if a Dialogflow automated agent has responded.
1023  // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
1024  // and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
1025  // are always empty, use
1026  // [reply_audio][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentResponse.reply_audio]
1027  // instead.
1028  AutomatedAgentReply automated_agent_reply = 4;
1029
1030  // Output only. Message analyzed by CCAI.
1031  Message message = 6;
1032
1033  // The suggestions for most recent human agent. The order is the same as
1034  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
1035  // of
1036  // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.human_agent_suggestion_config].
1037  repeated SuggestionResult human_agent_suggestion_results = 7;
1038
1039  // The suggestions for end user. The order is the same as
1040  // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
1041  // of
1042  // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.end_user_suggestion_config].
1043  repeated SuggestionResult end_user_suggestion_results = 8;
1044
1045  // Indicates the parameters of DTMF.
1046  DtmfParameters dtmf_parameters = 10;
1047
1048  // Debugging info that would get populated when
1049  // `StreamingAnalyzeContentRequest.enable_debugging_info` is set to true.
1050  CloudConversationDebuggingInfo debugging_info = 11;
1051}
1052
1053// Represents a part of a message possibly annotated with an entity. The part
1054// can be an entity or purely a part of the message between two entities or
1055// message start/end.
1056message AnnotatedMessagePart {
1057  // Required. A part of a message possibly annotated with an entity.
1058  string text = 1;
1059
1060  // Optional. The [Dialogflow system entity
1061  // type](https://cloud.google.com/dialogflow/docs/reference/system-entities)
1062  // of this message part. If this is empty, Dialogflow could not annotate the
1063  // phrase part with a system entity.
1064  string entity_type = 2;
1065
1066  // Optional. The [Dialogflow system entity formatted value
1067  // ](https://cloud.google.com/dialogflow/docs/reference/system-entities) of
1068  // this message part. For example for a system entity of type
1069  // `@sys.unit-currency`, this may contain:
1070  // <pre>
1071  // {
1072  //   "amount": 5,
1073  //   "currency": "USD"
1074  // }
1075  // </pre>
1076  google.protobuf.Value formatted_value = 3;
1077}
1078
1079// Represents the result of annotation for the message.
1080message MessageAnnotation {
1081  // Optional. The collection of annotated message parts ordered by their
1082  // position in the message. You can recover the annotated message by
1083  // concatenating [AnnotatedMessagePart.text].
1084  repeated AnnotatedMessagePart parts = 1;
1085
1086  // Required. Indicates whether the text message contains entities.
1087  bool contain_entities = 2;
1088}
1089
1090// Represents article answer.
1091message ArticleAnswer {
1092  // The article title.
1093  string title = 1;
1094
1095  // The article URI.
1096  string uri = 2;
1097
1098  // Output only. Article snippets.
1099  repeated string snippets = 3;
1100
1101  // A map that contains metadata about the answer and the
1102  // document from which it originates.
1103  map<string, string> metadata = 5;
1104
1105  // The name of answer record, in the format of
1106  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1107  // ID>"
1108  string answer_record = 6;
1109}
1110
1111// Represents answer from "frequently asked questions".
1112message FaqAnswer {
1113  // The piece of text from the `source` knowledge base document.
1114  string answer = 1;
1115
1116  // The system's confidence score that this Knowledge answer is a good match
1117  // for this conversational query, range from 0.0 (completely uncertain)
1118  // to 1.0 (completely certain).
1119  float confidence = 2;
1120
1121  // The corresponding FAQ question.
1122  string question = 3;
1123
1124  // Indicates which Knowledge Document this answer was extracted
1125  // from.
1126  // Format: `projects/<Project ID>/locations/<Location
1127  // ID>/agent/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>`.
1128  string source = 4;
1129
1130  // A map that contains metadata about the answer and the
1131  // document from which it originates.
1132  map<string, string> metadata = 5;
1133
1134  // The name of answer record, in the format of
1135  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1136  // ID>"
1137  string answer_record = 6;
1138}
1139
1140// Represents a smart reply answer.
1141message SmartReplyAnswer {
1142  // The content of the reply.
1143  string reply = 1;
1144
1145  // Smart reply confidence.
1146  // The system's confidence score that this reply is a good match for
1147  // this conversation, as a value from 0.0 (completely uncertain) to 1.0
1148  // (completely certain).
1149  float confidence = 2;
1150
1151  // The name of answer record, in the format of
1152  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1153  // ID>"
1154  string answer_record = 3 [(google.api.resource_reference) = {
1155    type: "dialogflow.googleapis.com/AnswerRecord"
1156  }];
1157}
1158
1159// Represents an intent suggestion.
1160message IntentSuggestion {
1161  // The display name of the intent.
1162  string display_name = 1;
1163
1164  // The name of the intent.
1165  oneof intent {
1166    // The unique identifier of this
1167    // [intent][google.cloud.dialogflow.v2beta1.Intent]. Format:
1168    // `projects/<Project ID>/locations/<Location ID>/agent/intents/<Intent
1169    // ID>`.
1170    string intent_v2 = 2;
1171  }
1172
1173  // Human readable description for better understanding an intent like its
1174  // scope, content, result etc. Maximum character limit: 140 characters.
1175  string description = 5;
1176}
1177
1178// Represents a Dialogflow assist answer.
1179message DialogflowAssistAnswer {
1180  // Result from DetectIntent for one matched intent.
1181  oneof result {
1182    // Result from v2 agent.
1183    QueryResult query_result = 1;
1184
1185    // An intent suggestion generated from conversation.
1186    IntentSuggestion intent_suggestion = 5;
1187  }
1188
1189  // The name of answer record, in the format of
1190  // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer Record
1191  // ID>"
1192  string answer_record = 2;
1193}
1194
1195// One response of different type of suggestion response which is used in
1196// the response of
1197// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
1198// and
1199// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent],
1200// as well as
1201// [HumanAgentAssistantEvent][google.cloud.dialogflow.v2beta1.HumanAgentAssistantEvent].
1202message SuggestionResult {
1203  // Different type of suggestion response.
1204  oneof suggestion_response {
1205    // Error status if the request failed.
1206    google.rpc.Status error = 1;
1207
1208    // SuggestArticlesResponse if request is for ARTICLE_SUGGESTION.
1209    SuggestArticlesResponse suggest_articles_response = 2;
1210
1211    // SuggestFaqAnswersResponse if request is for FAQ_ANSWER.
1212    SuggestFaqAnswersResponse suggest_faq_answers_response = 3;
1213
1214    // SuggestSmartRepliesResponse if request is for SMART_REPLY.
1215    SuggestSmartRepliesResponse suggest_smart_replies_response = 4;
1216
1217    // SuggestDialogflowAssistsResponse if request is for DIALOGFLOW_ASSIST.
1218    SuggestDialogflowAssistsResponse suggest_dialogflow_assists_response = 5;
1219
1220    // SuggestDialogflowAssistsResponse if request is for ENTITY_EXTRACTION.
1221    SuggestDialogflowAssistsResponse suggest_entity_extraction_response = 7;
1222  }
1223}
1224
1225// The request message for
1226// [Participants.SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles].
1227message SuggestArticlesRequest {
1228  // Required. The name of the participant to fetch suggestion for.
1229  // Format: `projects/<Project ID>/locations/<Location
1230  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
1231  string parent = 1 [
1232    (google.api.field_behavior) = REQUIRED,
1233    (google.api.resource_reference) = {
1234      type: "dialogflow.googleapis.com/Participant"
1235    }
1236  ];
1237
1238  // Optional. The name of the latest conversation message to compile suggestion
1239  // for. If empty, it will be the latest message of the conversation.
1240  //
1241  // Format: `projects/<Project ID>/locations/<Location
1242  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1243  string latest_message = 2 [
1244    (google.api.field_behavior) = OPTIONAL,
1245    (google.api.resource_reference) = {
1246      type: "dialogflow.googleapis.com/Message"
1247    }
1248  ];
1249
1250  // Optional. Max number of messages prior to and including
1251  // [latest_message][google.cloud.dialogflow.v2beta1.SuggestArticlesRequest.latest_message]
1252  // to use as context when compiling the suggestion. By default 20 and at
1253  // most 50.
1254  int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
1255
1256  // Optional. Parameters for a human assist query.
1257  AssistQueryParameters assist_query_params = 4
1258      [(google.api.field_behavior) = OPTIONAL];
1259}
1260
1261// The response message for
1262// [Participants.SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles].
1263message SuggestArticlesResponse {
1264  // Output only. Articles ordered by score in descending order.
1265  repeated ArticleAnswer article_answers = 1;
1266
1267  // The name of the latest conversation message used to compile
1268  // suggestion for.
1269  //
1270  // Format: `projects/<Project ID>/locations/<Location
1271  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1272  string latest_message = 2;
1273
1274  // Number of messages prior to and including
1275  // [latest_message][google.cloud.dialogflow.v2beta1.SuggestArticlesResponse.latest_message]
1276  // to compile the suggestion. It may be smaller than the
1277  // [SuggestArticlesResponse.context_size][google.cloud.dialogflow.v2beta1.SuggestArticlesResponse.context_size]
1278  // field in the request if there aren't that many messages in the
1279  // conversation.
1280  int32 context_size = 3;
1281}
1282
1283// The request message for
1284// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers].
1285message SuggestFaqAnswersRequest {
1286  // Required. The name of the participant to fetch suggestion for.
1287  // Format: `projects/<Project ID>/locations/<Location
1288  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
1289  string parent = 1 [
1290    (google.api.field_behavior) = REQUIRED,
1291    (google.api.resource_reference) = {
1292      type: "dialogflow.googleapis.com/Participant"
1293    }
1294  ];
1295
1296  // Optional. The name of the latest conversation message to compile suggestion
1297  // for. If empty, it will be the latest message of the conversation.
1298  //
1299  // Format: `projects/<Project ID>/locations/<Location
1300  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1301  string latest_message = 2 [
1302    (google.api.field_behavior) = OPTIONAL,
1303    (google.api.resource_reference) = {
1304      type: "dialogflow.googleapis.com/Message"
1305    }
1306  ];
1307
1308  // Optional. Max number of messages prior to and including
1309  // [latest_message] to use as context when compiling the
1310  // suggestion. By default 20 and at most 50.
1311  int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
1312
1313  // Optional. Parameters for a human assist query.
1314  AssistQueryParameters assist_query_params = 4
1315      [(google.api.field_behavior) = OPTIONAL];
1316}
1317
1318// The request message for
1319// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers].
1320message SuggestFaqAnswersResponse {
1321  // Output only. Answers extracted from FAQ documents.
1322  repeated FaqAnswer faq_answers = 1;
1323
1324  // The name of the latest conversation message used to compile
1325  // suggestion for.
1326  //
1327  // Format: `projects/<Project ID>/locations/<Location
1328  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1329  string latest_message = 2;
1330
1331  // Number of messages prior to and including
1332  // [latest_message][google.cloud.dialogflow.v2beta1.SuggestFaqAnswersResponse.latest_message]
1333  // to compile the suggestion. It may be smaller than the
1334  // [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestFaqAnswersRequest.context_size]
1335  // field in the request if there aren't that many messages in the
1336  // conversation.
1337  int32 context_size = 3;
1338}
1339
1340// The request message for
1341// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2beta1.Participants.SuggestSmartReplies].
1342message SuggestSmartRepliesRequest {
1343  // Required. The name of the participant to fetch suggestion for.
1344  // Format: `projects/<Project ID>/locations/<Location
1345  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
1346  string parent = 1 [
1347    (google.api.field_behavior) = REQUIRED,
1348    (google.api.resource_reference) = {
1349      type: "dialogflow.googleapis.com/Participant"
1350    }
1351  ];
1352
1353  // The current natural language text segment to compile suggestion
1354  // for. This provides a way for user to get follow up smart reply suggestion
1355  // after a smart reply selection, without sending a text message.
1356  TextInput current_text_input = 4;
1357
1358  // The name of the latest conversation message to compile suggestion
1359  // for. If empty, it will be the latest message of the conversation.
1360  //
1361  // Format: `projects/<Project ID>/locations/<Location
1362  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1363  string latest_message = 2 [(google.api.resource_reference) = {
1364    type: "dialogflow.googleapis.com/Message"
1365  }];
1366
1367  // Optional. Max number of messages prior to and including
1368  // [latest_message] to use as context when compiling the
1369  // suggestion. By default 20 and at most 50.
1370  int32 context_size = 3;
1371}
1372
1373// The response message for
1374// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2beta1.Participants.SuggestSmartReplies].
1375message SuggestSmartRepliesResponse {
1376  // Output only. Multiple reply options provided by smart reply service. The
1377  // order is based on the rank of the model prediction.
1378  // The maximum number of the returned replies is set in SmartReplyConfig.
1379  repeated SmartReplyAnswer smart_reply_answers = 1;
1380
1381  // The name of the latest conversation message used to compile
1382  // suggestion for.
1383  //
1384  // Format: `projects/<Project ID>/locations/<Location
1385  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1386  string latest_message = 2 [(google.api.resource_reference) = {
1387    type: "dialogflow.googleapis.com/Message"
1388  }];
1389
1390  // Number of messages prior to and including
1391  // [latest_message][google.cloud.dialogflow.v2beta1.SuggestSmartRepliesResponse.latest_message]
1392  // to compile the suggestion. It may be smaller than the
1393  // [SuggestSmartRepliesRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestSmartRepliesRequest.context_size]
1394  // field in the request if there aren't that many messages in the
1395  // conversation.
1396  int32 context_size = 3;
1397}
1398
1399// The response message for
1400// [Participants.SuggestDialogflowAssists][google.cloud.dialogflow.v2beta1.Participants.SuggestDialogflowAssists].
1401message SuggestDialogflowAssistsResponse {
1402  // Output only. Multiple reply options provided by Dialogflow assist
1403  // service. The order is based on the rank of the model prediction.
1404  repeated DialogflowAssistAnswer dialogflow_assist_answers = 1;
1405
1406  // The name of the latest conversation message used to suggest answer.
1407  //
1408  // Format: `projects/<Project ID>/locations/<Location
1409  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1410  string latest_message = 2;
1411
1412  // Number of messages prior to and including
1413  // [latest_message][google.cloud.dialogflow.v2beta1.SuggestDialogflowAssistsResponse.latest_message]
1414  // to compile the suggestion. It may be smaller than the
1415  // [SuggestDialogflowAssistsRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestDialogflowAssistsRequest.context_size]
1416  // field in the request if there aren't that many messages in the
1417  // conversation.
1418  int32 context_size = 3;
1419}
1420
1421// Represents a suggestion for a human agent.
1422message Suggestion {
1423  option deprecated = true;
1424
1425  // Represents suggested article.
1426  message Article {
1427    // Output only. The article title.
1428    string title = 1;
1429
1430    // Output only. The article URI.
1431    string uri = 2;
1432
1433    // Output only. Article snippets.
1434    repeated string snippets = 3;
1435
1436    // Output only. A map that contains metadata about the answer and the
1437    // document from which it originates.
1438    map<string, string> metadata = 5;
1439
1440    // Output only. The name of answer record, in the format of
1441    // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer
1442    // Record ID>"
1443    string answer_record = 6;
1444  }
1445
1446  // Represents suggested answer from "frequently asked questions".
1447  message FaqAnswer {
1448    // Output only. The piece of text from the `source` knowledge base document.
1449    string answer = 1;
1450
1451    // The system's confidence score that this Knowledge answer is a good match
1452    // for this conversational query, range from 0.0 (completely uncertain)
1453    // to 1.0 (completely certain).
1454    float confidence = 2;
1455
1456    // Output only. The corresponding FAQ question.
1457    string question = 3;
1458
1459    // Output only. Indicates which Knowledge Document this answer was extracted
1460    // from.
1461    // Format: `projects/<Project ID>/locations/<Location
1462    // ID>/agent/knowledgeBases/<Knowledge Base ID>/documents/<Document ID>`.
1463    string source = 4;
1464
1465    // Output only. A map that contains metadata about the answer and the
1466    // document from which it originates.
1467    map<string, string> metadata = 5;
1468
1469    // Output only. The name of answer record, in the format of
1470    // "projects/<Project ID>/locations/<Location ID>/answerRecords/<Answer
1471    // Record ID>"
1472    string answer_record = 6;
1473  }
1474
1475  // Output only. The name of this suggestion.
1476  // Format:
1477  // `projects/<Project ID>/locations/<Location ID>/conversations/<Conversation
1478  // ID>/participants/*/suggestions/<Suggestion ID>`.
1479  string name = 1;
1480
1481  // Output only. Articles ordered by score in descending order.
1482  repeated Article articles = 2;
1483
1484  // Output only. Answers extracted from FAQ documents.
1485  repeated FaqAnswer faq_answers = 4;
1486
1487  // Output only. The time the suggestion was created.
1488  google.protobuf.Timestamp create_time = 5;
1489
1490  // Output only. Latest message used as context to compile this suggestion.
1491  //
1492  // Format: `projects/<Project ID>/locations/<Location
1493  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1494  string latest_message = 7;
1495}
1496
1497// The request message for
1498// [Participants.ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions].
1499message ListSuggestionsRequest {
1500  option deprecated = true;
1501
1502  // Required. The name of the participant to fetch suggestions for.
1503  // Format: `projects/<Project ID>/locations/<Location
1504  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
1505  string parent = 1;
1506
1507  // Optional. The maximum number of items to return in a single page. The
1508  // default value is 100; the maximum value is 1000.
1509  int32 page_size = 2;
1510
1511  // Optional. The next_page_token value returned from a previous list request.
1512  string page_token = 3;
1513
1514  // Optional. Filter on suggestions fields. Currently predicates on
1515  // `create_time` and `create_time_epoch_microseconds` are supported.
1516  // `create_time` only support milliseconds accuracy. E.g.,
1517  // `create_time_epoch_microseconds > 1551790877964485` or
1518  // `create_time > "2017-01-15T01:30:15.01Z"`
1519  //
1520  // For more information about filtering, see
1521  // [API Filtering](https://aip.dev/160).
1522  string filter = 4;
1523}
1524
1525// The response message for
1526// [Participants.ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions].
1527message ListSuggestionsResponse {
1528  option deprecated = true;
1529
1530  // Required. The list of suggestions. There will be a maximum number of items
1531  // returned based on the page_size field in the request. `suggestions` is
1532  // sorted by `create_time` in descending order.
1533  repeated Suggestion suggestions = 1;
1534
1535  // Optional. Token to retrieve the next page of results or empty if there are
1536  // no more results in the list.
1537  string next_page_token = 2;
1538}
1539
1540// The request message for
1541// [Participants.CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion].
1542message CompileSuggestionRequest {
1543  option deprecated = true;
1544
1545  // Required. The name of the participant to fetch suggestion for.
1546  // Format: `projects/<Project ID>/locations/<Location
1547  // ID>/conversations/<Conversation ID>/participants/<Participant ID>`.
1548  string parent = 1;
1549
1550  // Optional. The name of the latest conversation message to compile suggestion
1551  // for. If empty, it will be the latest message of the conversation.
1552  //
1553  // Format: `projects/<Project ID>/locations/<Location
1554  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1555  string latest_message = 2;
1556
1557  // Optional. Max number of messages prior to and including
1558  // [latest_message] to use as context when compiling the
1559  // suggestion. If zero or less than zero, 20 is used.
1560  int32 context_size = 3;
1561}
1562
1563// The response message for
1564// [Participants.CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion].
1565message CompileSuggestionResponse {
1566  option deprecated = true;
1567
1568  // The compiled suggestion.
1569  Suggestion suggestion = 1;
1570
1571  // The name of the latest conversation message used to compile
1572  // suggestion for.
1573  //
1574  // Format: `projects/<Project ID>/locations/<Location
1575  // ID>/conversations/<Conversation ID>/messages/<Message ID>`.
1576  string latest_message = 2;
1577
1578  // Number of messages prior to and including
1579  // [latest_message][google.cloud.dialogflow.v2beta1.CompileSuggestionResponse.latest_message]
1580  // to compile the suggestion. It may be smaller than the
1581  // [CompileSuggestionRequest.context_size][google.cloud.dialogflow.v2beta1.CompileSuggestionRequest.context_size]
1582  // field in the request if there aren't that many messages in the
1583  // conversation.
1584  int32 context_size = 3;
1585}
1586
1587// Response messages from an automated agent.
1588message ResponseMessage {
1589  // The text response message.
1590  message Text {
1591    // A collection of text responses.
1592    repeated string text = 1;
1593  }
1594
1595  // Indicates that the conversation should be handed off to a human agent.
1596  //
1597  // Dialogflow only uses this to determine which conversations were handed off
1598  // to a human agent for measurement purposes. What else to do with this signal
1599  // is up to you and your handoff procedures.
1600  //
1601  // You may set this, for example:
1602  //
1603  // * In the entry fulfillment of a CX Page if entering the page indicates
1604  //   something went extremely wrong in the conversation.
1605  // * In a webhook response when you determine that the customer issue can only
1606  //   be handled by a human.
1607  message LiveAgentHandoff {
1608    // Custom metadata for your handoff procedure. Dialogflow doesn't impose
1609    // any structure on this.
1610    google.protobuf.Struct metadata = 1;
1611  }
1612
1613  // Indicates that interaction with the Dialogflow agent has ended.
1614  message EndInteraction {}
1615
1616  // Represents an audio message that is composed of both segments
1617  // synthesized from the Dialogflow agent prompts and ones hosted externally
1618  // at the specified URIs.
1619  message MixedAudio {
1620    // Represents one segment of audio.
1621    message Segment {
1622      // Content of the segment.
1623      oneof content {
1624        // Raw audio synthesized from the Dialogflow agent's response using
1625        // the output config specified in the request.
1626        bytes audio = 1;
1627
1628        // Client-specific URI that points to an audio clip accessible to the
1629        // client.
1630        string uri = 2;
1631      }
1632
1633      // Whether the playback of this segment can be interrupted by the end
1634      // user's speech and the client should then start the next Dialogflow
1635      // request.
1636      bool allow_playback_interruption = 3;
1637    }
1638
1639    // Segments this audio response is composed of.
1640    repeated Segment segments = 1;
1641  }
1642
1643  // Represents the signal that telles the client to transfer the phone call
1644  // connected to the agent to a third-party endpoint.
1645  message TelephonyTransferCall {
1646    // Endpoint to transfer the call to.
1647    oneof endpoint {
1648      // Transfer the call to a phone number
1649      // in [E.164 format](https://en.wikipedia.org/wiki/E.164).
1650      string phone_number = 1;
1651
1652      // Transfer the call to a SIP endpoint.
1653      string sip_uri = 2;
1654    }
1655  }
1656
1657  // Required. The rich response message.
1658  oneof message {
1659    // Returns a text response.
1660    Text text = 1;
1661
1662    // Returns a response containing a custom, platform-specific payload.
1663    google.protobuf.Struct payload = 2;
1664
1665    // Hands off conversation to a live agent.
1666    LiveAgentHandoff live_agent_handoff = 3;
1667
1668    // A signal that indicates the interaction with the Dialogflow agent has
1669    // ended.
1670    EndInteraction end_interaction = 4;
1671
1672    // An audio response message composed of both the synthesized Dialogflow
1673    // agent responses and the audios hosted in places known to the client.
1674    MixedAudio mixed_audio = 5;
1675
1676    // A signal that the client should transfer the phone call connected to
1677    // this agent to a third-party endpoint.
1678    TelephonyTransferCall telephony_transfer_call = 6;
1679  }
1680}
1681