-
Notifications
You must be signed in to change notification settings - Fork 2.4k
/
dialogflow_v3.projects.locations.agents.flows.html
3510 lines (3436 loc) 路 521 KB
/
dialogflow_v3.projects.locations.agents.flows.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<html><body>
<style>
body, h1, h2, h3, div, span, p, pre, a {
margin: 0;
padding: 0;
border: 0;
font-weight: inherit;
font-style: inherit;
font-size: 100%;
font-family: inherit;
vertical-align: baseline;
}
body {
font-size: 13px;
padding: 1em;
}
h1 {
font-size: 26px;
margin-bottom: 1em;
}
h2 {
font-size: 24px;
margin-bottom: 1em;
}
h3 {
font-size: 20px;
margin-bottom: 1em;
margin-top: 1em;
}
pre, code {
line-height: 1.5;
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}
pre {
margin-top: 0.5em;
}
h1, h2, h3, p {
font-family: Arial, sans serif;
}
h1, h2, h3 {
border-bottom: solid #CCC 1px;
}
.toc_element {
margin-top: 0.5em;
}
.firstline {
margin-left: 2 em;
}
.method {
margin-top: 1em;
border: solid 1px #CCC;
padding: 1em;
background: #EEE;
}
.details {
font-weight: bold;
font-size: 14px;
}
</style>
<h1><a href="dialogflow_v3.html">Dialogflow API</a> . <a href="dialogflow_v3.projects.html">projects</a> . <a href="dialogflow_v3.projects.locations.html">locations</a> . <a href="dialogflow_v3.projects.locations.agents.html">agents</a> . <a href="dialogflow_v3.projects.locations.agents.flows.html">flows</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
<code><a href="dialogflow_v3.projects.locations.agents.flows.pages.html">pages()</a></code>
</p>
<p class="firstline">Returns the pages Resource.</p>
<p class="toc_element">
<code><a href="dialogflow_v3.projects.locations.agents.flows.transitionRouteGroups.html">transitionRouteGroups()</a></code>
</p>
<p class="firstline">Returns the transitionRouteGroups Resource.</p>
<p class="toc_element">
<code><a href="dialogflow_v3.projects.locations.agents.flows.versions.html">versions()</a></code>
</p>
<p class="firstline">Returns the versions Resource.</p>
<p class="toc_element">
<code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<p class="toc_element">
<code><a href="#create">create(parent, body=None, languageCode=None, x__xgafv=None)</a></code></p>
<p class="firstline">Creates a flow in the specified agent. Note: You should always train a flow prior to sending it queries. See the [training documentation](https://cloud.google.com/dialogflow/cx/docs/concept/training).</p>
<p class="toc_element">
<code><a href="#delete">delete(name, force=None, x__xgafv=None)</a></code></p>
<p class="firstline">Deletes a specified flow.</p>
<p class="toc_element">
<code><a href="#export">export(name, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Exports the specified flow to a binary file. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: An empty [Struct message](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct) - `response`: ExportFlowResponse Note that resources (e.g. intents, entities, webhooks) that the flow references will also be exported.</p>
<p class="toc_element">
<code><a href="#get">get(name, languageCode=None, x__xgafv=None)</a></code></p>
<p class="firstline">Retrieves the specified flow.</p>
<p class="toc_element">
<code><a href="#getValidationResult">getValidationResult(name, languageCode=None, x__xgafv=None)</a></code></p>
<p class="firstline">Gets the latest flow validation result. Flow validation is performed when ValidateFlow is called.</p>
<p class="toc_element">
<code><a href="#import_">import_(parent, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Imports the specified flow to the specified agent from a binary file. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: An empty [Struct message](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct) - `response`: ImportFlowResponse Note: You should always train a flow prior to sending it queries. See the [training documentation](https://cloud.google.com/dialogflow/cx/docs/concept/training).</p>
<p class="toc_element">
<code><a href="#list">list(parent, languageCode=None, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
<p class="firstline">Returns the list of all flows in the specified agent.</p>
<p class="toc_element">
<code><a href="#list_next">list_next()</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
<code><a href="#patch">patch(name, body=None, languageCode=None, updateMask=None, x__xgafv=None)</a></code></p>
<p class="firstline">Updates the specified flow. Note: You should always train a flow prior to sending it queries. See the [training documentation](https://cloud.google.com/dialogflow/cx/docs/concept/training).</p>
<p class="toc_element">
<code><a href="#train">train(name, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Trains the specified flow. Note that only the flow in 'draft' environment is trained. This method is a [long-running operation](https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation). The returned `Operation` type has the following method-specific fields: - `metadata`: An empty [Struct message](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct) - `response`: An [Empty message](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty) Note: You should always train a flow prior to sending it queries. See the [training documentation](https://cloud.google.com/dialogflow/cx/docs/concept/training).</p>
<p class="toc_element">
<code><a href="#validate">validate(name, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Validates the specified flow and creates or updates validation results. Please call this API after the training is completed to get the complete validation results.</p>
<h3>Method Details</h3>
<div class="method">
<code class="details" id="close">close()</code>
<pre>Close httplib2 connections.</pre>
</div>
<div class="method">
<code class="details" id="create">create(parent, body=None, languageCode=None, x__xgafv=None)</code>
<pre>Creates a flow in the specified agent. Note: You should always train a flow prior to sending it queries. See the [training documentation](https://cloud.google.com/dialogflow/cx/docs/concept/training).
Args:
parent: string, Required. The agent to create a flow for. Format: `projects//locations//agents/`. (required)
body: object, The request body.
The object takes the form of:
{ # Flows represents the conversation flows when you build your chatbot agent. A flow consists of many pages connected by the transition routes. Conversations always start with the built-in Start Flow (with an all-0 ID). Transition routes can direct the conversation session from the current flow (parent flow) to another flow (sub flow). When the sub flow is finished, Dialogflow will bring the session back to the parent flow, where the sub flow is started. Usually, when a transition route is followed by a matched intent, the intent will be "consumed". This means the intent won't activate more transition routes. However, when the followed transition route moves the conversation session into a different flow, the matched intent can be carried over and to be consumed in the target flow.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this flow. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"description": "A String", # The description of the flow. The maximum length is 500 characters. If exceeded, the request is rejected.
"displayName": "A String", # Required. The human-readable name of the flow.
"eventHandlers": [ # A flow's event handlers serve two purposes: * They are responsible for handling events (e.g. no match, webhook errors) in the flow. * They are inherited by every page's event handlers, which can be used to handle common events regardless of the current page. Event handlers defined in the page have higher priority than those defined in the flow. Unlike transition_routes, these handlers are evaluated on a first-match basis. The first one that matches the event get executed, with the rest being ignored.
{ # An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.
"event": "A String", # Required. The name of the event to handle.
"name": "A String", # Output only. The unique identifier of this event handler.
"targetFlow": "A String", # The target flow to transition to. Format: `projects//locations//agents//flows/`.
"targetPage": "A String", # The target page to transition to. Format: `projects//locations//agents//flows//pages/`.
"triggerFulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"conditionalCases": [ # Conditional cases for this fulfillment.
{ # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored.
"cases": [ # A list of cascading if-else conditions.
{ # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively.
"caseContent": [ # A list of case content.
{ # The list of messages or conditional cases to activate for this case.
"additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3FulfillmentConditionalCases # Additional cases to be evaluated.
"message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
},
],
"condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition).
},
],
},
],
"enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers.
"messages": [ # The list of rich message responses to present to the user.
{ # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
],
"returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.
"setParameterActions": [ # Set parameter values before executing the webhook.
{ # Setting a parameter value.
"parameter": "A String", # Display name of the parameter.
"value": "", # The new value of the parameter. A null value clears the parameter.
},
],
"tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified.
"webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`.
},
},
],
"knowledgeConnectorSettings": { # The Knowledge Connector settings for this page or flow. This includes information such as the attached Knowledge Bases, and the way to execute fulfillment. # Optional. Knowledge connector configuration.
"dataStoreConnections": [ # Optional. List of related data store connections.
{ # A data store connection. It represents a data store in Discovery Engine and the type of the contents it contains.
"dataStore": "A String", # The full name of the referenced data store. Formats: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}` `projects/{project}/locations/{location}/dataStores/{data_store}`
"dataStoreType": "A String", # The type of the connected data store.
},
],
"enabled": True or False, # Whether Knowledge Connector is enabled or not.
"targetFlow": "A String", # The target flow to transition to. Format: `projects//locations//agents//flows/`.
"targetPage": "A String", # The target page to transition to. Format: `projects//locations//agents//flows//pages/`.
"triggerFulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # The fulfillment to be triggered. When the answers from the Knowledge Connector are selected by Dialogflow, you can utitlize the request scoped parameter `$request.knowledge.answers` (contains up to the 5 highest confidence answers) and `$request.knowledge.questions` (contains the corresponding questions) to construct the fulfillment.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"conditionalCases": [ # Conditional cases for this fulfillment.
{ # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored.
"cases": [ # A list of cascading if-else conditions.
{ # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively.
"caseContent": [ # A list of case content.
{ # The list of messages or conditional cases to activate for this case.
"additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3FulfillmentConditionalCases # Additional cases to be evaluated.
"message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
},
],
"condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition).
},
],
},
],
"enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers.
"messages": [ # The list of rich message responses to present to the user.
{ # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
],
"returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.
"setParameterActions": [ # Set parameter values before executing the webhook.
{ # Setting a parameter value.
"parameter": "A String", # Display name of the parameter.
"value": "", # The new value of the parameter. A null value clears the parameter.
},
],
"tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified.
"webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`.
},
},
"multiLanguageSettings": { # Settings for multi-lingual agents. # Optional. Multi-lingual agent settings for this flow.
"enableMultiLanguageDetection": True or False, # Optional. Enable multi-language detection for this flow. This can be set only if agent level multi language setting is enabled.
"supportedResponseLanguageCodes": [ # Optional. Agent will respond in the detected language if the detected language code is in the supported resolved languages for this flow. This will be used only if multi-language training is enabled in the agent and multi-language detection is enabled in the flow. The supported languages must be a subset of the languages supported by the agent.
"A String",
],
},
"name": "A String", # The unique identifier of the flow. Format: `projects//locations//agents//flows/`.
"nluSettings": { # Settings related to NLU. # NLU related settings of the flow.
"classificationThreshold": 3.14, # To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a no-match event will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used.
"modelTrainingMode": "A String", # Indicates NLU model training mode.
"modelType": "A String", # Indicates the type of NLU model.
},
"transitionRouteGroups": [ # A flow's transition route group serve two purposes: * They are responsible for matching the user's first utterances in the flow. * They are inherited by every page's transition route groups. Transition route groups defined in the page have higher priority than those defined in the flow. Format:`projects//locations//agents//flows//transitionRouteGroups/` or `projects//locations//agents//transitionRouteGroups/` for agent-level groups.
"A String",
],
"transitionRoutes": [ # A flow's transition routes serve two purposes: * They are responsible for matching the user's first utterances in the flow. * They are inherited by every page's transition routes and can support use cases such as the user saying "help" or "can I talk to a human?", which can be handled in a common way regardless of the current page. Transition routes defined in the page have higher priority than those defined in the flow. TransitionRoutes are evalauted in the following order: * TransitionRoutes with intent specified. * TransitionRoutes with only condition specified. TransitionRoutes with intent specified are inherited by pages in the flow.
{ # A transition route specifies a intent that can be matched and/or a data condition that can be evaluated during a session. When a specified transition is matched, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the transition, it will be called. * If there is a `target_page` associated with the transition, the session will transition into the specified page. * If there is a `target_flow` associated with the transition, the session will transition into the specified flow.
"condition": "A String", # The condition to evaluate against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). At least one of `intent` or `condition` must be specified. When both `intent` and `condition` are specified, the transition can only happen when both are fulfilled.
"description": "A String", # Optional. The description of the transition route. The maximum length is 500 characters.
"intent": "A String", # The unique identifier of an Intent. Format: `projects//locations//agents//intents/`. Indicates that the transition can only happen when the given intent is matched. At least one of `intent` or `condition` must be specified. When both `intent` and `condition` are specified, the transition can only happen when both are fulfilled.
"name": "A String", # Output only. The unique identifier of this transition route.
"targetFlow": "A String", # The target flow to transition to. Format: `projects//locations//agents//flows/`.
"targetPage": "A String", # The target page to transition to. Format: `projects//locations//agents//flows//pages/`.
"triggerFulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # The fulfillment to call when the condition is satisfied. At least one of `trigger_fulfillment` and `target` must be specified. When both are defined, `trigger_fulfillment` is executed first.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"conditionalCases": [ # Conditional cases for this fulfillment.
{ # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored.
"cases": [ # A list of cascading if-else conditions.
{ # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively.
"caseContent": [ # A list of case content.
{ # The list of messages or conditional cases to activate for this case.
"additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3FulfillmentConditionalCases # Additional cases to be evaluated.
"message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
},
],
"condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition).
},
],
},
],
"enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers.
"messages": [ # The list of rich message responses to present to the user.
{ # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
],
"returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.
"setParameterActions": [ # Set parameter values before executing the webhook.
{ # Setting a parameter value.
"parameter": "A String", # Display name of the parameter.
"value": "", # The new value of the parameter. A null value clears the parameter.
},
],
"tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified.
"webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`.
},
},
],
}
languageCode: string, The language of the following fields in `flow`: * `Flow.event_handlers.trigger_fulfillment.messages` * `Flow.event_handlers.trigger_fulfillment.conditional_cases` * `Flow.transition_routes.trigger_fulfillment.messages` * `Flow.transition_routes.trigger_fulfillment.conditional_cases` If not specified, the agent's default language is used. [Many languages](https://cloud.google.com/dialogflow/cx/docs/reference/language) are supported. Note: languages must be enabled in the agent before they can be used.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
2 - v2 error format
Returns:
An object of the form:
{ # Flows represents the conversation flows when you build your chatbot agent. A flow consists of many pages connected by the transition routes. Conversations always start with the built-in Start Flow (with an all-0 ID). Transition routes can direct the conversation session from the current flow (parent flow) to another flow (sub flow). When the sub flow is finished, Dialogflow will bring the session back to the parent flow, where the sub flow is started. Usually, when a transition route is followed by a matched intent, the intent will be "consumed". This means the intent won't activate more transition routes. However, when the followed transition route moves the conversation session into a different flow, the matched intent can be carried over and to be consumed in the target flow.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this flow. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"description": "A String", # The description of the flow. The maximum length is 500 characters. If exceeded, the request is rejected.
"displayName": "A String", # Required. The human-readable name of the flow.
"eventHandlers": [ # A flow's event handlers serve two purposes: * They are responsible for handling events (e.g. no match, webhook errors) in the flow. * They are inherited by every page's event handlers, which can be used to handle common events regardless of the current page. Event handlers defined in the page have higher priority than those defined in the flow. Unlike transition_routes, these handlers are evaluated on a first-match basis. The first one that matches the event get executed, with the rest being ignored.
{ # An event handler specifies an event that can be handled during a session. When the specified event happens, the following actions are taken in order: * If there is a `trigger_fulfillment` associated with the event, it will be called. * If there is a `target_page` associated with the event, the session will transition into the specified page. * If there is a `target_flow` associated with the event, the session will transition into the specified flow.
"event": "A String", # Required. The name of the event to handle.
"name": "A String", # Output only. The unique identifier of this event handler.
"targetFlow": "A String", # The target flow to transition to. Format: `projects//locations//agents//flows/`.
"targetPage": "A String", # The target page to transition to. Format: `projects//locations//agents//flows//pages/`.
"triggerFulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"conditionalCases": [ # Conditional cases for this fulfillment.
{ # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored.
"cases": [ # A list of cascading if-else conditions.
{ # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively.
"caseContent": [ # A list of case content.
{ # The list of messages or conditional cases to activate for this case.
"additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3FulfillmentConditionalCases # Additional cases to be evaluated.
"message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
},
],
"condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition).
},
],
},
],
"enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers.
"messages": [ # The list of rich message responses to present to the user.
{ # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
],
"returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.
"setParameterActions": [ # Set parameter values before executing the webhook.
{ # Setting a parameter value.
"parameter": "A String", # Display name of the parameter.
"value": "", # The new value of the parameter. A null value clears the parameter.
},
],
"tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified.
"webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`.
},
},
],
"knowledgeConnectorSettings": { # The Knowledge Connector settings for this page or flow. This includes information such as the attached Knowledge Bases, and the way to execute fulfillment. # Optional. Knowledge connector configuration.
"dataStoreConnections": [ # Optional. List of related data store connections.
{ # A data store connection. It represents a data store in Discovery Engine and the type of the contents it contains.
"dataStore": "A String", # The full name of the referenced data store. Formats: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}` `projects/{project}/locations/{location}/dataStores/{data_store}`
"dataStoreType": "A String", # The type of the connected data store.
},
],
"enabled": True or False, # Whether Knowledge Connector is enabled or not.
"targetFlow": "A String", # The target flow to transition to. Format: `projects//locations//agents//flows/`.
"targetPage": "A String", # The target page to transition to. Format: `projects//locations//agents//flows//pages/`.
"triggerFulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # The fulfillment to be triggered. When the answers from the Knowledge Connector are selected by Dialogflow, you can utitlize the request scoped parameter `$request.knowledge.answers` (contains up to the 5 highest confidence answers) and `$request.knowledge.questions` (contains the corresponding questions) to construct the fulfillment.
"advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level.
"audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level
"uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation.
},
"dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level.
"enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance).
"finishDigit": "A String", # The digit that terminates a DTMF digit sequence.
"maxDigits": 42, # Max length of DTMF digits.
},
"loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level.
"enableInteractionLogging": True or False, # If true, DF Interaction logging is currently enabled.
"enableStackdriverLogging": True or False, # If true, StackDriver logging is currently enabled.
},
"speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level
"endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100.
"models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
"a_key": "A String",
},
"noSpeechTimeout": "A String", # Timeout before detecting no speech.
"useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivy as seconds of timeout value.
},
},
"conditionalCases": [ # Conditional cases for this fulfillment.
{ # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored.
"cases": [ # A list of cascading if-else conditions.
{ # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively.
"caseContent": [ # A list of case content.
{ # The list of messages or conditional cases to activate for this case.
"additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3FulfillmentConditionalCases # Additional cases to be evaluated.
"message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
},
],
"condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition).
},
],
},
],
"enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers.
"messages": [ # The list of rich message responses to present to the user.
{ # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard.
"channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.
"conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded.
"metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response.
},
"knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger.
},
"liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent.
"metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this.
"a_key": "", # Properties of the object.
},
},
"mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user.
"segments": [ # Segments this audio response is composed of.
{ # Represents one segment of audio.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request.
"audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request.
"uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it.
},
],
},
"outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml).
"text": "A String", # The raw text to be synthesized.
},
"payload": { # Returns a response containing a custom, platform-specific payload.
"a_key": "", # Properties of the object.
},
"playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.
},
"responseType": "A String", # Response type.
"telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint.
"phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164).
},
"text": { # The text response message. # Returns a text response.
"allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.
"text": [ # Required. A collection of text responses.
"A String",
],
},
},
],
"returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.
"setParameterActions": [ # Set parameter values before executing the webhook.
{ # Setting a parameter value.
"parameter": "A String", # Display name of the parameter.
"value": "", # The new value of the parameter. A null value clears the parameter.
},
],
"tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified.
"webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`.
},
},
"multiLanguageSettings": { # Settings for multi-lingual agents. # Optional. Multi-lingual agent settings for this flow.
"enableMultiLanguageDetection": True or False, # Optional. Enable multi-language detection for this flow. This can be set only if agent level multi language setting is enabled.
"supportedResponseLanguageCodes": [ # Optional. Agent will respond in the detected language if the detected language code is in the supported resolved languages for this flow. This will be used only if multi-language training is enabled in the agent and multi-language detection is enabled in the flow. The supported languages must be a subset of the languages supported by the agent.
"A String",