From 0693eaef11a0aef49f6964636f3330857f1c0745 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Tue, 2 Apr 2024 07:08:25 +0000 Subject: [PATCH] feat(discoveryengine): update the api #### discoveryengine:v1alpha The following keys were added: - resources.projects.resources.locations.resources.rankingConfigs.methods.rank (Total Keys: 12) - schemas.GoogleCloudDiscoveryengineV1alphaBigtableOptions (Total Keys: 19) - schemas.GoogleCloudDiscoveryengineV1alphaBigtableSource (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaChunk.properties.chunkMetadata (Total Keys: 2) - schemas.GoogleCloudDiscoveryengineV1alphaChunk.properties.pageSpan.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaChunkPageSpan (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaCloudSqlSource (Total Keys: 8) - schemas.GoogleCloudDiscoveryengineV1alphaFhirStoreSource (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaFirestoreSource (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaGroundingConfig (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsMetadata (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsResponse (Total Keys: 8) - schemas.GoogleCloudDiscoveryengineV1alphaImportDocumentsRequest.properties.bigtableSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaImportDocumentsRequest.properties.cloudSqlSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaImportDocumentsRequest.properties.fhirStoreSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaImportDocumentsRequest.properties.firestoreSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaImportDocumentsRequest.properties.spannerSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaRankRequest (Total Keys: 9) - schemas.GoogleCloudDiscoveryengineV1alphaRankResponse (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaRankingRecord (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpec.properties.chunkSpec.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecChunkSpec (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaSearchResponseSummaryReference.properties.chunkContents (Total Keys: 2) - schemas.GoogleCloudDiscoveryengineV1alphaSearchResponseSummaryReferenceChunkContent (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1alphaSpannerSource (Total Keys: 7) - schemas.GoogleCloudDiscoveryengineV1alphaTrainCustomModelResponse.properties.metrics (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaWidgetConfig.properties.enableSearchAsYouType.type (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaTrainCustomModelResponse.properties.metrics (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1betaTuneEngineMetadata (Total Keys: 3) #### discoveryengine:v1beta The following keys were added: - resources.projects.resources.locations.resources.collections.resources.engines.methods.pause (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.methods.resume (Total Keys: 12) - resources.projects.resources.locations.resources.collections.resources.engines.methods.tune (Total Keys: 12) - resources.projects.resources.locations.resources.rankingConfigs.methods.rank (Total Keys: 12) - schemas.GoogleCloudDiscoveryengineV1alphaGroundingConfig (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsMetadata (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsResponse (Total Keys: 8) - schemas.GoogleCloudDiscoveryengineV1alphaTrainCustomModelResponse.properties.metrics (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1betaBigtableOptions (Total Keys: 19) - schemas.GoogleCloudDiscoveryengineV1betaBigtableSource (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1betaCloudSqlSource (Total Keys: 8) - schemas.GoogleCloudDiscoveryengineV1betaFhirStoreSource (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1betaFirestoreSource (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1betaImportDocumentsRequest.properties.bigtableSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaImportDocumentsRequest.properties.cloudSqlSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaImportDocumentsRequest.properties.fhirStoreSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaImportDocumentsRequest.properties.firestoreSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaImportDocumentsRequest.properties.spannerSource.$ref (Total Keys: 1) - schemas.GoogleCloudDiscoveryengineV1betaPauseEngineRequest (Total Keys: 2) - schemas.GoogleCloudDiscoveryengineV1betaRankRequest (Total Keys: 9) - schemas.GoogleCloudDiscoveryengineV1betaRankResponse (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1betaRankingRecord (Total Keys: 6) - schemas.GoogleCloudDiscoveryengineV1betaResumeEngineRequest (Total Keys: 2) - schemas.GoogleCloudDiscoveryengineV1betaSearchResponseSummaryReference.properties.chunkContents (Total Keys: 2) - schemas.GoogleCloudDiscoveryengineV1betaSearchResponseSummaryReferenceChunkContent (Total Keys: 4) - schemas.GoogleCloudDiscoveryengineV1betaSpannerSource (Total Keys: 7) - schemas.GoogleCloudDiscoveryengineV1betaTrainCustomModelResponse.properties.metrics (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1betaTuneEngineMetadata (Total Keys: 3) - schemas.GoogleCloudDiscoveryengineV1betaTuneEngineRequest (Total Keys: 2) --- .../discoveryengine_v1alpha.locations.html | 63 +- ....dataStores.branches.documents.chunks.html | 24 + ...ections.dataStores.branches.documents.html | 52 +- ....collections.dataStores.conversations.html | 66 ++ ...collections.dataStores.servingConfigs.html | 38 + ...ons.collections.engines.conversations.html | 66 ++ ...ns.collections.engines.servingConfigs.html | 38 + ....dataStores.branches.documents.chunks.html | 24 + ...cations.dataStores.branches.documents.html | 52 +- ...ts.locations.dataStores.conversations.html | 66 ++ ...s.locations.dataStores.servingConfigs.html | 38 + ...veryengine_v1alpha.projects.locations.html | 5 + ...pha.projects.locations.rankingConfigs.html | 133 ++++ ...ections.dataStores.branches.documents.html | 52 +- ....collections.dataStores.conversations.html | 54 ++ ...collections.dataStores.servingConfigs.html | 6 + ...ons.collections.engines.conversations.html | 54 ++ ...rojects.locations.collections.engines.html | 158 +++++ ...ns.collections.engines.servingConfigs.html | 6 + ...cations.dataStores.branches.documents.html | 52 +- ...ts.locations.dataStores.conversations.html | 54 ++ ...s.locations.dataStores.servingConfigs.html | 6 + ...overyengine_v1beta.projects.locations.html | 5 + ...eta.projects.locations.rankingConfigs.html | 133 ++++ .../documents/discoveryengine.v1alpha.json | 652 ++++++++++++++++- .../documents/discoveryengine.v1beta.json | 664 +++++++++++++++++- 26 files changed, 2485 insertions(+), 76 deletions(-) create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.rankingConfigs.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.rankingConfigs.html diff --git a/docs/dyn/discoveryengine_v1alpha.locations.html b/docs/dyn/discoveryengine_v1alpha.locations.html index b8311a14d26..b7a6a108b20 100644 --- a/docs/dyn/discoveryengine_v1alpha.locations.html +++ b/docs/dyn/discoveryengine_v1alpha.locations.html @@ -128,16 +128,20 @@

Method Details

{ # Read-only data store component that contains data stores fields that may be used for filtering, it's the child of `CollectionComponent`. "displayName": "A String", # The display name of the data store. "id": "A String", # Output only. the identifier of the data store, used for widget service. For now it refers to data_store_id, in the future we will migrate the field to encrypted data store name UUID. - "name": "A String", # The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For widget service usage, such look up widget config, returned name should be skipped. + "name": "A String", # The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For APIs under WidgetService, such as LookUpWidgetConfig, the project number and location part is erased in this field. }, ], "displayName": "A String", # The display name of the collection. "id": "A String", # Output only. the identifier of the collection, used for widget service. For now it refers to collection_id, in the future we will migrate the field to encrypted collection name UUID. - "name": "A String", # The name of the collection. It should be collection resource name. Format: `projects/{project_number}/locations/{location}/collections/{collection_id}`. For widget service usage, such look up widget config, returned name should be skipped. + "name": "A String", # The name of the collection. It should be collection resource name. Format: `projects/{project_number}/locations/{location}/collections/{collection_id}`. For APIs under WidgetService, such as LookUpWidgetConfig, the project number and location part is erased in this field. }, ], "configId": "A String", # Output only. Unique obfuscated identifier of a WidgetConfig. "contentSearchSpec": { # A specification for configuring the behavior of content search. # The content search spec that configs the desired behavior of content search. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -186,7 +190,7 @@

Method Details

}, }, "id": "A String", # Output only. the identifier of the data store, used for widget service. For now it refers to data_store_id, in the future we will migrate the field to encrypted data store name UUID. - "name": "A String", # The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For widget service usage, such look up widget config, returned name should be skipped. + "name": "A String", # The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For APIs under WidgetService, such as LookUpWidgetConfig, the project number and location part is erased in this field. }, ], "displayName": "A String", # Required. The human readable widget config display name. Used in Discovery UI. This field must be a UTF-8 encoded string with a length limit of 128 characters. Otherwise, an INVALID_ARGUMENT error is returned. @@ -195,6 +199,7 @@

Method Details

"enableQualityFeedback": True or False, # Turn on or off collecting the search result quality feedback from end users. "enableResultScore": True or False, # Whether to show the result score. "enableSafeSearch": True or False, # Whether to enable safe search. + "enableSearchAsYouType": True or False, # Whether to enable search-as-you-type behavior for the search widget "enableSnippetResultSummary": True or False, # Turn on or off summary for each snippets result. "enableSummarization": True or False, # Turn on or off summarization for the search response. "enableWebApp": True or False, # Whether to enable standalone web app. @@ -333,6 +338,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -446,6 +457,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -513,6 +530,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -525,6 +548,14 @@

Method Details

"searchResults": [ # Search Results. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -535,6 +566,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -608,6 +643,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -777,6 +816,14 @@

Method Details

"results": [ # A list of matched documents. The order represents the ranking. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -787,6 +834,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -858,6 +909,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html index d529b95ef0d..f88cf00daa5 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.chunks.html @@ -107,6 +107,14 @@

Method Details

An object of the form: { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -117,6 +125,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, } @@ -139,6 +151,14 @@

Method Details

{ # Response message for ChunkService.ListChunks method. "chunks": [ # The Chunks. { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -149,6 +169,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, ], "nextPageToken": "A String", # A token that can be sent as ListChunksRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html index b223bf33ab4..f6216154677 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.branches.documents.html @@ -302,7 +302,7 @@

Method Details

The object takes the form of: { # Request message for Import methods. - "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. + "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "bigquerySource": { # BigQuery source import data from. # BigQuery input source. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for user event imports: * `user_event` (default): One UserEvent per row. Supported values for document imports: * `document` (default): One Document format per row. Each document must have a valid Document.id and one of Document.json_data or Document.struct_data. * `custom`: One custom data per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. "datasetId": "A String", # Required. The BigQuery data set to copy the data from with a length limit of 1,024 characters. @@ -315,16 +315,57 @@

Method Details

"projectId": "A String", # The project ID (can be project # or ID) that the BigQuery source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. "tableId": "A String", # Required. The BigQuery table to copy the data from with a length limit of 1,024 characters. }, + "bigtableSource": { # The Cloud Bigtable source for importing data # Cloud Bigtable input source. + "bigtableOptions": { # The Bigtable Options object that contains information to support the import. # Required. Bigtable options that contains information needed when parsing data into typed structures. For example, column type annotations. + "families": { # The mapping from family names to an object that contains column families level information for the given column family. If a family is not present in this map it will be ignored. + "a_key": { + "columns": [ # The list of objects that contains column level information for each column. If a column is not present in this list it will be ignored. + { + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the qualifier bytes with best effort. However, field name collisions could happen, where parsing behavior is undefined. + "qualifier": "A String", # Required. Qualifier of the column. If cannot decode with utf-8, store a base-64 encoded string. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + ], + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column family in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the family name with best effort. However, due to difference naming pattern, there could be field name collisions, where parsing behavior is undefined. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + }, + "keyFieldName": "A String", # The field name used for saving row key value in the UCS document. The name has to match a-zA-Z0-9* + }, + "instanceId": "A String", # Required. The instance ID of the Cloud Bigtable that needs to be exported. + "projectId": "A String", # The project ID (can be project # or ID) that the Bigtable source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table ID of the Cloud Bigtable that needs to be exported. + }, + "cloudSqlSource": { # Cloud SQL source import data from. # Cloud SQL input source. + "databaseId": "A String", # Required. The Cloud SQL database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Cloud SQL export to a specific Cloud Storage directory. Please ensure that the Cloud SQL service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "instanceId": "A String", # Required. The Cloud SQL instance to copy the data from with a length limit of 256 characters. + "offload": True or False, # Optional. Option for serverless export. Enabling this option will incur additional cost. More info: https://cloud.google.com/sql/pricing#serverless + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The Cloud SQL table to copy the data from with a length limit of 256 characters. + }, "errorConfig": { # Configuration of destination for Import related errors. # The desired location of errors incurred during the Import. "gcsPrefix": "A String", # Cloud Storage prefix for import errors. This must be an empty, existing Cloud Storage directory. Import errors are written to sharded files in this directory, one per line, as a JSON-encoded `google.rpc.Status` message. }, + "fhirStoreSource": { # Cloud FhirStore source import data from. # FhirStore input source. + "fhirStore": "A String", # Required. The full resource name of the FHIR store to import data from, in the format of `projects/{project}/locations/{location}/datasets/{dataset}/fhirStores/{fhir_store}`. + "gcsStagingDir": "A String", # Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the FhirStore export to a specific Cloud Storage directory. + }, + "firestoreSource": { # Firestore source import data from. # Firestore input source. + "collectionId": "A String", # Required. The Firestore collection to copy the data from with a length limit of 1500 characters. + "databaseId": "A String", # Required. The Firestore database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Firestore export to a specific Cloud Storage directory. Please ensure that the Firestore service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + }, "gcsSource": { # Cloud Storage location for input content. # Cloud Storage location for the input content. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for document imports: * `document` (default): One JSON Document per line. Each document must have a valid Document.id. * `content`: Unstructured data (e.g. PDF, HTML). Each file matched by `input_uris` becomes a document, with the ID set to the first 128 bits of SHA256(URI) encoded as a hex string. * `custom`: One custom data JSON per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. * `csv`: A CSV file with header conforming to the defined Schema of the data store. Each entry after the header is imported as a Document. This can only be used by Gen App Builder. Supported values for user even imports: * `user_event` (default): One JSON UserEvent per line. "inputUris": [ # Required. Cloud Storage URIs to input files. URI can be up to 2000 characters long. URIs can match the full object path (for example, `gs://bucket/directory/object.json`) or a pattern matching one or more files, such as `gs://bucket/directory/*.json`. A request can contain at most 100 files (or 100,000 files if `data_schema` is `content`). Each file can be up to 2 GB (or 100 MB if `data_schema` is `content`). "A String", ], }, - "idField": "A String", # The field in the Cloud Storage and BigQuery sources that indicates the unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For BigQuerySource it is the column name of the BigQuery table where the unique ids are stored. The values of the JSON field or the BigQuery column are used as the Document.ids. The JSON field or the BigQuery column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom`. And only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. + "idField": "A String", # The field indicates the ID field or column to be used as unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For others, it may be the column name of the table where the unique ids are stored. The values of the JSON field or the table column are used as the Document.ids. The JSON field or the table column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "inlineSource": { # The inline source for the input config for ImportDocuments method. # The Inline source for the input content for documents. "documents": [ # Required. A list of documents to update/create. Each document must have a valid Document.id. Recommended max of 100 items. { # Document captures all raw metadata information of items to be recommended or searched. @@ -361,6 +402,13 @@

Method Details

], }, "reconciliationMode": "A String", # The mode of reconciliation between existing documents and the documents to be imported. Defaults to ReconciliationMode.INCREMENTAL. + "spannerSource": { # The Spanner source for importing data # Spanner input source. + "databaseId": "A String", # Required. The database ID of the source Spanner table. + "enableDataBoost": True or False, # Optional. Whether to apply data boost on Spanner export. Enabling this option will incur additional cost. More info: https://cloud.google.com/spanner/docs/databoost/databoost-overview#billing_and_quotas + "instanceId": "A String", # Required. The instance ID of the source Spanner table. + "projectId": "A String", # The project ID that the Spanner source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table name of the Spanner database that needs to be imported. + }, } x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html index 8db311a98d1..5dffaf97606 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.conversations.html @@ -166,6 +166,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -276,6 +282,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -343,6 +355,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -355,6 +373,14 @@

Method Details

"searchResults": [ # Search Results. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -365,6 +391,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -463,6 +493,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -541,6 +577,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -644,6 +686,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -735,6 +783,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -832,6 +886,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -911,6 +971,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html index 2b3d32fb51d..100afbb556d 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html @@ -139,6 +139,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -242,6 +246,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -351,6 +359,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -443,6 +455,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -677,6 +693,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -844,6 +864,14 @@

Method Details

"results": [ # A list of matched documents. The order represents the ranking. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -854,6 +882,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -925,6 +957,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html index 9392db65911..b59db40688d 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.conversations.html @@ -166,6 +166,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -276,6 +282,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -343,6 +355,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -355,6 +373,14 @@

Method Details

"searchResults": [ # Search Results. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -365,6 +391,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -463,6 +493,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -541,6 +577,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -644,6 +686,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -735,6 +783,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -832,6 +886,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -911,6 +971,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html index 371a3e02f41..fb860aa6081 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html @@ -139,6 +139,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -242,6 +246,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -351,6 +359,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -443,6 +455,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -677,6 +693,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -844,6 +864,14 @@

Method Details

"results": [ # A list of matched documents. The order represents the ranking. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -854,6 +882,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -925,6 +957,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html index 61dc5da73bd..dd2e4d7a8b3 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.chunks.html @@ -107,6 +107,14 @@

Method Details

An object of the form: { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -117,6 +125,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, } @@ -139,6 +151,14 @@

Method Details

{ # Response message for ChunkService.ListChunks method. "chunks": [ # The Chunks. { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -149,6 +169,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, ], "nextPageToken": "A String", # A token that can be sent as ListChunksRequest.page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html index 717f4cd2759..8373303694e 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.branches.documents.html @@ -302,7 +302,7 @@

Method Details

The object takes the form of: { # Request message for Import methods. - "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. + "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "bigquerySource": { # BigQuery source import data from. # BigQuery input source. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for user event imports: * `user_event` (default): One UserEvent per row. Supported values for document imports: * `document` (default): One Document format per row. Each document must have a valid Document.id and one of Document.json_data or Document.struct_data. * `custom`: One custom data per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. "datasetId": "A String", # Required. The BigQuery data set to copy the data from with a length limit of 1,024 characters. @@ -315,16 +315,57 @@

Method Details

"projectId": "A String", # The project ID (can be project # or ID) that the BigQuery source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. "tableId": "A String", # Required. The BigQuery table to copy the data from with a length limit of 1,024 characters. }, + "bigtableSource": { # The Cloud Bigtable source for importing data # Cloud Bigtable input source. + "bigtableOptions": { # The Bigtable Options object that contains information to support the import. # Required. Bigtable options that contains information needed when parsing data into typed structures. For example, column type annotations. + "families": { # The mapping from family names to an object that contains column families level information for the given column family. If a family is not present in this map it will be ignored. + "a_key": { + "columns": [ # The list of objects that contains column level information for each column. If a column is not present in this list it will be ignored. + { + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the qualifier bytes with best effort. However, field name collisions could happen, where parsing behavior is undefined. + "qualifier": "A String", # Required. Qualifier of the column. If cannot decode with utf-8, store a base-64 encoded string. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + ], + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column family in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the family name with best effort. However, due to difference naming pattern, there could be field name collisions, where parsing behavior is undefined. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + }, + "keyFieldName": "A String", # The field name used for saving row key value in the UCS document. The name has to match a-zA-Z0-9* + }, + "instanceId": "A String", # Required. The instance ID of the Cloud Bigtable that needs to be exported. + "projectId": "A String", # The project ID (can be project # or ID) that the Bigtable source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table ID of the Cloud Bigtable that needs to be exported. + }, + "cloudSqlSource": { # Cloud SQL source import data from. # Cloud SQL input source. + "databaseId": "A String", # Required. The Cloud SQL database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Cloud SQL export to a specific Cloud Storage directory. Please ensure that the Cloud SQL service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "instanceId": "A String", # Required. The Cloud SQL instance to copy the data from with a length limit of 256 characters. + "offload": True or False, # Optional. Option for serverless export. Enabling this option will incur additional cost. More info: https://cloud.google.com/sql/pricing#serverless + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The Cloud SQL table to copy the data from with a length limit of 256 characters. + }, "errorConfig": { # Configuration of destination for Import related errors. # The desired location of errors incurred during the Import. "gcsPrefix": "A String", # Cloud Storage prefix for import errors. This must be an empty, existing Cloud Storage directory. Import errors are written to sharded files in this directory, one per line, as a JSON-encoded `google.rpc.Status` message. }, + "fhirStoreSource": { # Cloud FhirStore source import data from. # FhirStore input source. + "fhirStore": "A String", # Required. The full resource name of the FHIR store to import data from, in the format of `projects/{project}/locations/{location}/datasets/{dataset}/fhirStores/{fhir_store}`. + "gcsStagingDir": "A String", # Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the FhirStore export to a specific Cloud Storage directory. + }, + "firestoreSource": { # Firestore source import data from. # Firestore input source. + "collectionId": "A String", # Required. The Firestore collection to copy the data from with a length limit of 1500 characters. + "databaseId": "A String", # Required. The Firestore database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Firestore export to a specific Cloud Storage directory. Please ensure that the Firestore service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + }, "gcsSource": { # Cloud Storage location for input content. # Cloud Storage location for the input content. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for document imports: * `document` (default): One JSON Document per line. Each document must have a valid Document.id. * `content`: Unstructured data (e.g. PDF, HTML). Each file matched by `input_uris` becomes a document, with the ID set to the first 128 bits of SHA256(URI) encoded as a hex string. * `custom`: One custom data JSON per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. * `csv`: A CSV file with header conforming to the defined Schema of the data store. Each entry after the header is imported as a Document. This can only be used by Gen App Builder. Supported values for user even imports: * `user_event` (default): One JSON UserEvent per line. "inputUris": [ # Required. Cloud Storage URIs to input files. URI can be up to 2000 characters long. URIs can match the full object path (for example, `gs://bucket/directory/object.json`) or a pattern matching one or more files, such as `gs://bucket/directory/*.json`. A request can contain at most 100 files (or 100,000 files if `data_schema` is `content`). Each file can be up to 2 GB (or 100 MB if `data_schema` is `content`). "A String", ], }, - "idField": "A String", # The field in the Cloud Storage and BigQuery sources that indicates the unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For BigQuerySource it is the column name of the BigQuery table where the unique ids are stored. The values of the JSON field or the BigQuery column are used as the Document.ids. The JSON field or the BigQuery column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom`. And only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. + "idField": "A String", # The field indicates the ID field or column to be used as unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For others, it may be the column name of the table where the unique ids are stored. The values of the JSON field or the table column are used as the Document.ids. The JSON field or the table column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "inlineSource": { # The inline source for the input config for ImportDocuments method. # The Inline source for the input content for documents. "documents": [ # Required. A list of documents to update/create. Each document must have a valid Document.id. Recommended max of 100 items. { # Document captures all raw metadata information of items to be recommended or searched. @@ -361,6 +402,13 @@

Method Details

], }, "reconciliationMode": "A String", # The mode of reconciliation between existing documents and the documents to be imported. Defaults to ReconciliationMode.INCREMENTAL. + "spannerSource": { # The Spanner source for importing data # Spanner input source. + "databaseId": "A String", # Required. The database ID of the source Spanner table. + "enableDataBoost": True or False, # Optional. Whether to apply data boost on Spanner export. Enabling this option will incur additional cost. More info: https://cloud.google.com/spanner/docs/databoost/databoost-overview#billing_and_quotas + "instanceId": "A String", # Required. The instance ID of the source Spanner table. + "projectId": "A String", # The project ID that the Spanner source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table name of the Spanner database that needs to be imported. + }, } x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html index 11729ca9760..cb07c733607 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.conversations.html @@ -166,6 +166,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -276,6 +282,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -343,6 +355,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -355,6 +373,14 @@

Method Details

"searchResults": [ # Search Results. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -365,6 +391,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -463,6 +493,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -541,6 +577,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -644,6 +686,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -735,6 +783,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -832,6 +886,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -911,6 +971,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html index ff5b3931a5c..b81c12d6827 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html @@ -139,6 +139,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -242,6 +246,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -351,6 +359,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -443,6 +455,10 @@

Method Details

], "genericConfig": { # Specifies the configurations needed for Generic Discovery.Currently we support: * `content_search_spec`: configuration for generic content search. # The GenericConfig of the serving configuration. "contentSearchSpec": { # A specification for configuring the behavior of content search. # Specifies the expected behavior of content search. Only valid for content-search enabled data store. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -677,6 +693,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/collections/default_collection/dataStores/default_data_store/branches/0`. Use `default_branch` as the branch ID or leave this field empty, to search documents under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "contentSearchSpec": { # A specification for configuring the behavior of content search. # A specification for configuring the behavior of content search. + "chunkSpec": { # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS # Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS + "numNextChunks": 42, # The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned. + "numPreviousChunks": 42, # The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned. + }, "extractiveContentSpec": { # A specification for configuring the extractive content in a search response. # If there is no extractive_content_spec provided, there will be no extractive answer in the search response. "maxExtractiveAnswerCount": 42, # The maximum number of extractive answers returned in each search result. An extractive answer is a verbatim answer extracted from the original document, which provides a precise and contextually relevant answer to the search query. If the number of matching answers is less than the `max_extractive_answer_count`, return all of the answers. Otherwise, return the `max_extractive_answer_count`. At most five answers are returned for each SearchResult. "maxExtractiveSegmentCount": 42, # The max number of extractive segments returned in each search result. Only applied if the DataStore is set to DataStore.ContentConfig.CONTENT_REQUIRED or DataStore.solution_types is SOLUTION_TYPE_CHAT. An extractive segment is a text segment extracted from the original document that is relevant to the search query, and, in general, more verbose than an extractive answer. The segment could then be used as input for LLMs to generate summaries and answers. If the number of matching segments is less than `max_extractive_segment_count`, return all of the segments. Otherwise, return the `max_extractive_segment_count`. @@ -844,6 +864,14 @@

Method Details

"results": [ # A list of matched documents. The order represents the ranking. { # Represents the search results. "chunk": { # Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode. # The chunk data in the search response if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS. + "chunkMetadata": { # Metadata of the current chunk. This field is only populated on SearchService.Search API. # Output only. Metadata of the current chunk. + "nextChunks": [ # The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + "previousChunks": [ # The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API. + # Object with schema name: GoogleCloudDiscoveryengineV1alphaChunk + ], + }, "content": "A String", # Content is a string from a document (parsed content). "derivedStructData": { # Output only. This field is OUTPUT_ONLY. It contains derived data that are not in the original input document. "a_key": "", # Properties of the object. @@ -854,6 +882,10 @@

Method Details

}, "id": "A String", # Unique chunk id of the current chunk. "name": "A String", # The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "pageSpan": { # Page span of the chunk. # Page span of the chunk. + "pageEnd": 42, # The end page of the chunk. + "pageStart": 42, # The start page of the chunk. + }, }, "document": { # Document captures all raw metadata information of items to be recommended or searched. # The document data snippet in the search response. Only fields that are marked as retrievable are populated. "aclInfo": { # ACL Information of the Document. # Access control information for the document. @@ -925,6 +957,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.html index 4efd4f02600..da26113633c 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.html @@ -89,6 +89,11 @@

Instance Methods

Returns the operations Resource.

+

+ rankingConfigs() +

+

Returns the rankingConfigs Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.rankingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.rankingConfigs.html new file mode 100644 index 00000000000..2be33504e7e --- /dev/null +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.rankingConfigs.html @@ -0,0 +1,133 @@ + + + +

Discovery Engine API . projects . locations . rankingConfigs

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ rank(rankingConfig, body=None, x__xgafv=None)

+

Ranks a list of text records based on the given input query.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ rank(rankingConfig, body=None, x__xgafv=None) +
Ranks a list of text records based on the given input query.
+
+Args:
+  rankingConfig: string, Required. The resource name of the rank service config, such as `projects/{project_num}/locations/{location_id}/rankingConfigs/default_ranking_config`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for RankService.Rank method.
+  "ignoreRecordDetailsInResponse": True or False, # If true, the response will contain only record ID and score. By default, it is false, the response will contain record details.
+  "model": "A String", # The identifier of the model to use. It is one of: * `semantic-ranker-512@latest`: Semantic ranking model with maxiumn input token size 512. It is set to `semantic-ranker-512@latest` by default if unspecified.
+  "query": "A String", # The query to use.
+  "records": [ # Required. A list of records to rank.
+    { # Record message for RankService.Rank method.
+      "content": "A String", # The content of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+      "id": "A String", # The unique ID to represent the record.
+      "score": 3.14, # The score of this record based on the given query and selected model.
+      "title": "A String", # The title of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+    },
+  ],
+  "topN": 42, # The number of results to return. If this is unset or no bigger than zero, returns all results.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for RankService.Rank method.
+  "records": [ # A list of records sorted by descending score.
+    { # Record message for RankService.Rank method.
+      "content": "A String", # The content of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+      "id": "A String", # The unique ID to represent the record.
+      "score": 3.14, # The score of this record based on the given query and selected model.
+      "title": "A String", # The title of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.branches.documents.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.branches.documents.html index 40c6aeb5ee2..0244fa4502f 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.branches.documents.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.branches.documents.html @@ -229,7 +229,7 @@

Method Details

The object takes the form of: { # Request message for Import methods. - "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. + "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "bigquerySource": { # BigQuery source import data from. # BigQuery input source. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for user event imports: * `user_event` (default): One UserEvent per row. Supported values for document imports: * `document` (default): One Document format per row. Each document must have a valid Document.id and one of Document.json_data or Document.struct_data. * `custom`: One custom data per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. "datasetId": "A String", # Required. The BigQuery data set to copy the data from with a length limit of 1,024 characters. @@ -242,16 +242,57 @@

Method Details

"projectId": "A String", # The project ID (can be project # or ID) that the BigQuery source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. "tableId": "A String", # Required. The BigQuery table to copy the data from with a length limit of 1,024 characters. }, + "bigtableSource": { # The Cloud Bigtable source for importing data # Cloud Bigtable input source. + "bigtableOptions": { # The Bigtable Options object that contains information to support the import. # Required. Bigtable options that contains information needed when parsing data into typed structures. For example, column type annotations. + "families": { # The mapping from family names to an object that contains column families level information for the given column family. If a family is not present in this map it will be ignored. + "a_key": { + "columns": [ # The list of objects that contains column level information for each column. If a column is not present in this list it will be ignored. + { + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the qualifier bytes with best effort. However, field name collisions could happen, where parsing behavior is undefined. + "qualifier": "A String", # Required. Qualifier of the column. If cannot decode with utf-8, store a base-64 encoded string. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + ], + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column family in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the family name with best effort. However, due to difference naming pattern, there could be field name collisions, where parsing behavior is undefined. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + }, + "keyFieldName": "A String", # The field name used for saving row key value in the UCS document. The name has to match a-zA-Z0-9* + }, + "instanceId": "A String", # Required. The instance ID of the Cloud Bigtable that needs to be exported. + "projectId": "A String", # The project ID (can be project # or ID) that the Bigtable source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table ID of the Cloud Bigtable that needs to be exported. + }, + "cloudSqlSource": { # Cloud SQL source import data from. # Cloud SQL input source. + "databaseId": "A String", # Required. The Cloud SQL database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Cloud SQL export to a specific Cloud Storage directory. Please ensure that the Cloud SQL service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "instanceId": "A String", # Required. The Cloud SQL instance to copy the data from with a length limit of 256 characters. + "offload": True or False, # Optional. Option for serverless export. Enabling this option will incur additional cost. More info: https://cloud.google.com/sql/pricing#serverless + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The Cloud SQL table to copy the data from with a length limit of 256 characters. + }, "errorConfig": { # Configuration of destination for Import related errors. # The desired location of errors incurred during the Import. "gcsPrefix": "A String", # Cloud Storage prefix for import errors. This must be an empty, existing Cloud Storage directory. Import errors are written to sharded files in this directory, one per line, as a JSON-encoded `google.rpc.Status` message. }, + "fhirStoreSource": { # Cloud FhirStore source import data from. # FhirStore input source. + "fhirStore": "A String", # Required. The full resource name of the FHIR store to import data from, in the format of `projects/{project}/locations/{location}/datasets/{dataset}/fhirStores/{fhir_store}`. + "gcsStagingDir": "A String", # Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the FhirStore export to a specific Cloud Storage directory. + }, + "firestoreSource": { # Firestore source import data from. # Firestore input source. + "collectionId": "A String", # Required. The Firestore collection to copy the data from with a length limit of 1500 characters. + "databaseId": "A String", # Required. The Firestore database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Firestore export to a specific Cloud Storage directory. Please ensure that the Firestore service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + }, "gcsSource": { # Cloud Storage location for input content. # Cloud Storage location for the input content. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for document imports: * `document` (default): One JSON Document per line. Each document must have a valid Document.id. * `content`: Unstructured data (e.g. PDF, HTML). Each file matched by `input_uris` becomes a document, with the ID set to the first 128 bits of SHA256(URI) encoded as a hex string. * `custom`: One custom data JSON per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. * `csv`: A CSV file with header conforming to the defined Schema of the data store. Each entry after the header is imported as a Document. This can only be used by Gen App Builder. Supported values for user even imports: * `user_event` (default): One JSON UserEvent per line. "inputUris": [ # Required. Cloud Storage URIs to input files. URI can be up to 2000 characters long. URIs can match the full object path (for example, `gs://bucket/directory/object.json`) or a pattern matching one or more files, such as `gs://bucket/directory/*.json`. A request can contain at most 100 files (or 100,000 files if `data_schema` is `content`). Each file can be up to 2 GB (or 100 MB if `data_schema` is `content`). "A String", ], }, - "idField": "A String", # The field in the Cloud Storage and BigQuery sources that indicates the unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For BigQuerySource it is the column name of the BigQuery table where the unique ids are stored. The values of the JSON field or the BigQuery column are used as the Document.ids. The JSON field or the BigQuery column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom`. And only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. + "idField": "A String", # The field indicates the ID field or column to be used as unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For others, it may be the column name of the table where the unique ids are stored. The values of the JSON field or the table column are used as the Document.ids. The JSON field or the table column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "inlineSource": { # The inline source for the input config for ImportDocuments method. # The Inline source for the input content for documents. "documents": [ # Required. A list of documents to update/create. Each document must have a valid Document.id. Recommended max of 100 items. { # Document captures all raw metadata information of items to be recommended or searched. @@ -276,6 +317,13 @@

Method Details

], }, "reconciliationMode": "A String", # The mode of reconciliation between existing documents and the documents to be imported. Defaults to ReconciliationMode.INCREMENTAL. + "spannerSource": { # The Spanner source for importing data # Spanner input source. + "databaseId": "A String", # Required. The database ID of the source Spanner table. + "enableDataBoost": True or False, # Optional. Whether to apply data boost on Spanner export. Enabling this option will incur additional cost. More info: https://cloud.google.com/spanner/docs/databoost/databoost-overview#billing_and_quotas + "instanceId": "A String", # Required. The instance ID of the source Spanner table. + "projectId": "A String", # The project ID that the Spanner source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table name of the Spanner database that needs to be imported. + }, } x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html index cd06e40f57c..2d8d4e15af8 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.conversations.html @@ -166,6 +166,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -275,6 +281,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -342,6 +354,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -438,6 +456,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -516,6 +540,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -619,6 +649,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -710,6 +746,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -807,6 +849,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -886,6 +934,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html index 92bd82c7830..3110214f98b 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html @@ -848,6 +848,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html index afe938d7107..6e4409c9c4f 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.conversations.html @@ -166,6 +166,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -275,6 +281,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -342,6 +354,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -438,6 +456,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -516,6 +540,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -619,6 +649,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -710,6 +746,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -807,6 +849,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -886,6 +934,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html index 6fa1067a83e..6cb1f16e42b 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html @@ -110,6 +110,15 @@

Instance Methods

patch(name, body=None, updateMask=None, x__xgafv=None)

Updates an Engine

+

+ pause(name, body=None, x__xgafv=None)

+

Pauses the training of an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.

+

+ resume(name, body=None, x__xgafv=None)

+

Resumes the training of an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.

+

+ tune(name, body=None, x__xgafv=None)

+

Tunes an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.

Method Details

close() @@ -426,4 +435,153 @@

Method Details

}
+
+ pause(name, body=None, x__xgafv=None) +
Pauses the training of an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.
+
+Args:
+  name: string, Required. The name of the engine to pause. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for pausing training of an engine.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Metadata that describes the training and serving parameters of an Engine.
+  "chatEngineConfig": { # Configurations for a Chat Engine. # Configurations for the Chat Engine. Only applicable if solution_type is SOLUTION_TYPE_CHAT.
+    "agentCreationConfig": { # Configurations for generating a Dialogflow agent. Note that these configurations are one-time consumed by and passed to Dialogflow service. It means they cannot be retrieved using EngineService.GetEngine or EngineService.ListEngines API after engine creation. # The configurationt generate the Dialogflow agent that is associated to this Engine. Note that these configurations are one-time consumed by and passed to Dialogflow service. It means they cannot be retrieved using EngineService.GetEngine or EngineService.ListEngines API after engine creation.
+      "business": "A String", # Name of the company, organization or other entity that the agent represents. Used for knowledge connector LLM prompt and for knowledge search.
+      "defaultLanguageCode": "A String", # Required. The default language of the agent as a language tag. See [Language Support](https://cloud.google.com/dialogflow/docs/reference/language) for a list of the currently supported language codes.
+      "location": "A String", # Agent location for Agent creation, supported values: global/us/eu. If not provided, us Engine will create Agent using us-central-1 by default; eu Engine will create Agent using eu-west-1 by default.
+      "timeZone": "A String", # Required. The time zone of the agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, Europe/Paris.
+    },
+    "dialogflowAgentToLink": "A String", # The resource name of an exist Dialogflow agent to link to this Chat Engine. Customers can either provide `agent_creation_config` to create agent or provide an agent name that links the agent with the Chat engine. Format: `projects//locations//agents/`. Note that the `dialogflow_agent_to_link` are one-time consumed by and passed to Dialogflow service. It means they cannot be retrieved using EngineService.GetEngine or EngineService.ListEngines API after engine creation. Please use ChatEngineMetadata.dialogflow_agent for actual agent association after Engine is created.
+  },
+  "chatEngineMetadata": { # Additional information of a Chat Engine. Fields in this message are output only. # Output only. Additional information of the Chat Engine. Only applicable if solution_type is SOLUTION_TYPE_CHAT.
+    "dialogflowAgent": "A String", # The resource name of a Dialogflow agent, that this Chat Engine refers to. Format: `projects//locations//agents/`.
+  },
+  "commonConfig": { # Common configurations for an Engine. # Common config spec that specifies the metadata of the engine.
+    "companyName": "A String", # Immutable. The name of the company, business or entity that is associated with the engine. Setting this may help improve LLM related features.
+  },
+  "createTime": "A String", # Output only. Timestamp the Recommendation Engine was created at.
+  "dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations.
+    "A String",
+  ],
+  "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.
+  "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore liniked to the engine.
+  "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project_number}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned.
+  "searchEngineConfig": { # Configurations for a Search Engine. # Configurations for the Search Engine. Only applicable if solution_type is SOLUTION_TYPE_SEARCH.
+    "searchAddOns": [ # The add-on that this search engine enables.
+      "A String",
+    ],
+    "searchTier": "A String", # The search feature tier of this engine. Different tiers might have different pricing. To learn more, please check the pricing documentation. Defaults to SearchTier.SEARCH_TIER_STANDARD if not specified.
+  },
+  "solutionType": "A String", # Required. The solutions of the engine.
+  "updateTime": "A String", # Output only. Timestamp the Recommendation Engine was last updated.
+}
+
+ +
+ resume(name, body=None, x__xgafv=None) +
Resumes the training of an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.
+
+Args:
+  name: string, Required. The name of the engine to resume. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for resuming training of an engine.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Metadata that describes the training and serving parameters of an Engine.
+  "chatEngineConfig": { # Configurations for a Chat Engine. # Configurations for the Chat Engine. Only applicable if solution_type is SOLUTION_TYPE_CHAT.
+    "agentCreationConfig": { # Configurations for generating a Dialogflow agent. Note that these configurations are one-time consumed by and passed to Dialogflow service. It means they cannot be retrieved using EngineService.GetEngine or EngineService.ListEngines API after engine creation. # The configurationt generate the Dialogflow agent that is associated to this Engine. Note that these configurations are one-time consumed by and passed to Dialogflow service. It means they cannot be retrieved using EngineService.GetEngine or EngineService.ListEngines API after engine creation.
+      "business": "A String", # Name of the company, organization or other entity that the agent represents. Used for knowledge connector LLM prompt and for knowledge search.
+      "defaultLanguageCode": "A String", # Required. The default language of the agent as a language tag. See [Language Support](https://cloud.google.com/dialogflow/docs/reference/language) for a list of the currently supported language codes.
+      "location": "A String", # Agent location for Agent creation, supported values: global/us/eu. If not provided, us Engine will create Agent using us-central-1 by default; eu Engine will create Agent using eu-west-1 by default.
+      "timeZone": "A String", # Required. The time zone of the agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, Europe/Paris.
+    },
+    "dialogflowAgentToLink": "A String", # The resource name of an exist Dialogflow agent to link to this Chat Engine. Customers can either provide `agent_creation_config` to create agent or provide an agent name that links the agent with the Chat engine. Format: `projects//locations//agents/`. Note that the `dialogflow_agent_to_link` are one-time consumed by and passed to Dialogflow service. It means they cannot be retrieved using EngineService.GetEngine or EngineService.ListEngines API after engine creation. Please use ChatEngineMetadata.dialogflow_agent for actual agent association after Engine is created.
+  },
+  "chatEngineMetadata": { # Additional information of a Chat Engine. Fields in this message are output only. # Output only. Additional information of the Chat Engine. Only applicable if solution_type is SOLUTION_TYPE_CHAT.
+    "dialogflowAgent": "A String", # The resource name of a Dialogflow agent, that this Chat Engine refers to. Format: `projects//locations//agents/`.
+  },
+  "commonConfig": { # Common configurations for an Engine. # Common config spec that specifies the metadata of the engine.
+    "companyName": "A String", # Immutable. The name of the company, business or entity that is associated with the engine. Setting this may help improve LLM related features.
+  },
+  "createTime": "A String", # Output only. Timestamp the Recommendation Engine was created at.
+  "dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations.
+    "A String",
+  ],
+  "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.
+  "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore liniked to the engine.
+  "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project_number}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned.
+  "searchEngineConfig": { # Configurations for a Search Engine. # Configurations for the Search Engine. Only applicable if solution_type is SOLUTION_TYPE_SEARCH.
+    "searchAddOns": [ # The add-on that this search engine enables.
+      "A String",
+    ],
+    "searchTier": "A String", # The search feature tier of this engine. Different tiers might have different pricing. To learn more, please check the pricing documentation. Defaults to SearchTier.SEARCH_TIER_STANDARD if not specified.
+  },
+  "solutionType": "A String", # Required. The solutions of the engine.
+  "updateTime": "A String", # Output only. Timestamp the Recommendation Engine was last updated.
+}
+
+ +
+ tune(name, body=None, x__xgafv=None) +
Tunes an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.
+
+Args:
+  name: string, Required. The resource name of the engine to tune. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request to manually start a tuning process now (instead of waiting for the periodically scheduled tuning to happen).
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html index 86d27edaf8e..7096727e744 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html @@ -848,6 +848,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.branches.documents.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.branches.documents.html index ed42eafac3d..667cfd95c03 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.branches.documents.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.branches.documents.html @@ -229,7 +229,7 @@

Method Details

The object takes the form of: { # Request message for Import methods. - "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. + "autoGenerateIds": True or False, # Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "bigquerySource": { # BigQuery source import data from. # BigQuery input source. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for user event imports: * `user_event` (default): One UserEvent per row. Supported values for document imports: * `document` (default): One Document format per row. Each document must have a valid Document.id and one of Document.json_data or Document.struct_data. * `custom`: One custom data per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. "datasetId": "A String", # Required. The BigQuery data set to copy the data from with a length limit of 1,024 characters. @@ -242,16 +242,57 @@

Method Details

"projectId": "A String", # The project ID (can be project # or ID) that the BigQuery source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. "tableId": "A String", # Required. The BigQuery table to copy the data from with a length limit of 1,024 characters. }, + "bigtableSource": { # The Cloud Bigtable source for importing data # Cloud Bigtable input source. + "bigtableOptions": { # The Bigtable Options object that contains information to support the import. # Required. Bigtable options that contains information needed when parsing data into typed structures. For example, column type annotations. + "families": { # The mapping from family names to an object that contains column families level information for the given column family. If a family is not present in this map it will be ignored. + "a_key": { + "columns": [ # The list of objects that contains column level information for each column. If a column is not present in this list it will be ignored. + { + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the qualifier bytes with best effort. However, field name collisions could happen, where parsing behavior is undefined. + "qualifier": "A String", # Required. Qualifier of the column. If cannot decode with utf-8, store a base-64 encoded string. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + ], + "encoding": "A String", # Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it. + "fieldName": "A String", # The field name to use for this column family in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the family name with best effort. However, due to difference naming pattern, there could be field name collisions, where parsing behavior is undefined. + "type": "A String", # Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY. + }, + }, + "keyFieldName": "A String", # The field name used for saving row key value in the UCS document. The name has to match a-zA-Z0-9* + }, + "instanceId": "A String", # Required. The instance ID of the Cloud Bigtable that needs to be exported. + "projectId": "A String", # The project ID (can be project # or ID) that the Bigtable source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table ID of the Cloud Bigtable that needs to be exported. + }, + "cloudSqlSource": { # Cloud SQL source import data from. # Cloud SQL input source. + "databaseId": "A String", # Required. The Cloud SQL database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Cloud SQL export to a specific Cloud Storage directory. Please ensure that the Cloud SQL service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "instanceId": "A String", # Required. The Cloud SQL instance to copy the data from with a length limit of 256 characters. + "offload": True or False, # Optional. Option for serverless export. Enabling this option will incur additional cost. More info: https://cloud.google.com/sql/pricing#serverless + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The Cloud SQL table to copy the data from with a length limit of 256 characters. + }, "errorConfig": { # Configuration of destination for Import related errors. # The desired location of errors incurred during the Import. "gcsPrefix": "A String", # Cloud Storage prefix for import errors. This must be an empty, existing Cloud Storage directory. Import errors are written to sharded files in this directory, one per line, as a JSON-encoded `google.rpc.Status` message. }, + "fhirStoreSource": { # Cloud FhirStore source import data from. # FhirStore input source. + "fhirStore": "A String", # Required. The full resource name of the FHIR store to import data from, in the format of `projects/{project}/locations/{location}/datasets/{dataset}/fhirStores/{fhir_store}`. + "gcsStagingDir": "A String", # Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the FhirStore export to a specific Cloud Storage directory. + }, + "firestoreSource": { # Firestore source import data from. # Firestore input source. + "collectionId": "A String", # Required. The Firestore collection to copy the data from with a length limit of 1500 characters. + "databaseId": "A String", # Required. The Firestore database to copy the data from with a length limit of 256 characters. + "gcsStagingDir": "A String", # Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Firestore export to a specific Cloud Storage directory. Please ensure that the Firestore service account has the necessary GCS Storage Admin permissions to access the specified GCS directory. + "projectId": "A String", # Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + }, "gcsSource": { # Cloud Storage location for input content. # Cloud Storage location for the input content. "dataSchema": "A String", # The schema to use when parsing the data from the source. Supported values for document imports: * `document` (default): One JSON Document per line. Each document must have a valid Document.id. * `content`: Unstructured data (e.g. PDF, HTML). Each file matched by `input_uris` becomes a document, with the ID set to the first 128 bits of SHA256(URI) encoded as a hex string. * `custom`: One custom data JSON per row in arbitrary format that conforms to the defined Schema of the data store. This can only be used by Gen App Builder. * `csv`: A CSV file with header conforming to the defined Schema of the data store. Each entry after the header is imported as a Document. This can only be used by Gen App Builder. Supported values for user even imports: * `user_event` (default): One JSON UserEvent per line. "inputUris": [ # Required. Cloud Storage URIs to input files. URI can be up to 2000 characters long. URIs can match the full object path (for example, `gs://bucket/directory/object.json`) or a pattern matching one or more files, such as `gs://bucket/directory/*.json`. A request can contain at most 100 files (or 100,000 files if `data_schema` is `content`). Each file can be up to 2 GB (or 100 MB if `data_schema` is `content`). "A String", ], }, - "idField": "A String", # The field in the Cloud Storage and BigQuery sources that indicates the unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For BigQuerySource it is the column name of the BigQuery table where the unique ids are stored. The values of the JSON field or the BigQuery column are used as the Document.ids. The JSON field or the BigQuery column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom`. And only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. + "idField": "A String", # The field indicates the ID field or column to be used as unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{"my_id": "some_uuid"}`. For others, it may be the column name of the table where the unique ids are stored. The values of the JSON field or the table column are used as the Document.ids. The JSON field or the table column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource "inlineSource": { # The inline source for the input config for ImportDocuments method. # The Inline source for the input content for documents. "documents": [ # Required. A list of documents to update/create. Each document must have a valid Document.id. Recommended max of 100 items. { # Document captures all raw metadata information of items to be recommended or searched. @@ -276,6 +317,13 @@

Method Details

], }, "reconciliationMode": "A String", # The mode of reconciliation between existing documents and the documents to be imported. Defaults to ReconciliationMode.INCREMENTAL. + "spannerSource": { # The Spanner source for importing data # Spanner input source. + "databaseId": "A String", # Required. The database ID of the source Spanner table. + "enableDataBoost": True or False, # Optional. Whether to apply data boost on Spanner export. Enabling this option will incur additional cost. More info: https://cloud.google.com/spanner/docs/databoost/databoost-overview#billing_and_quotas + "instanceId": "A String", # Required. The instance ID of the source Spanner table. + "projectId": "A String", # The project ID that the Spanner source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request. + "tableId": "A String", # Required. The table name of the Spanner database that needs to be imported. + }, } x__xgafv: string, V1 error format. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html index 3bb4ea3b64e..5a132e56bde 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.conversations.html @@ -166,6 +166,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -275,6 +281,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -342,6 +354,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -438,6 +456,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -516,6 +540,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -619,6 +649,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -710,6 +746,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -807,6 +849,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. @@ -886,6 +934,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html index ab0ede5db93..ceed7cca943 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html @@ -848,6 +848,12 @@

Method Details

}, "references": [ # Document References. { # Document reference. + "chunkContents": [ # List of cited chunk contents derived from document content. + { # Chunk content. + "content": "A String", # Chunk textual content. + "pageIdentifier": "A String", # Page identifier. + }, + ], "document": "A String", # Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`. "title": "A String", # Title of the document. "uri": "A String", # Cloud Storage or HTTP uri for the document. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.html index 6513c6242b2..692facc6ea9 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.html @@ -89,6 +89,11 @@

Instance Methods

Returns the operations Resource.

+

+ rankingConfigs() +

+

Returns the rankingConfigs Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.rankingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.rankingConfigs.html new file mode 100644 index 00000000000..fa2a2b018d1 --- /dev/null +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.rankingConfigs.html @@ -0,0 +1,133 @@ + + + +

Discovery Engine API . projects . locations . rankingConfigs

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ rank(rankingConfig, body=None, x__xgafv=None)

+

Ranks a list of text records based on the given input query.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ rank(rankingConfig, body=None, x__xgafv=None) +
Ranks a list of text records based on the given input query.
+
+Args:
+  rankingConfig: string, Required. The resource name of the rank service config, such as `projects/{project_num}/locations/{location_id}/rankingConfigs/default_ranking_config`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for RankService.Rank method.
+  "ignoreRecordDetailsInResponse": True or False, # If true, the response will contain only record ID and score. By default, it is false, the response will contain record details.
+  "model": "A String", # The identifier of the model to use. It is one of: * `semantic-ranker-512@latest`: Semantic ranking model with maxiumn input token size 512. It is set to `semantic-ranker-512@latest` by default if unspecified.
+  "query": "A String", # The query to use.
+  "records": [ # Required. A list of records to rank.
+    { # Record message for RankService.Rank method.
+      "content": "A String", # The content of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+      "id": "A String", # The unique ID to represent the record.
+      "score": 3.14, # The score of this record based on the given query and selected model.
+      "title": "A String", # The title of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+    },
+  ],
+  "topN": 42, # The number of results to return. If this is unset or no bigger than zero, returns all results.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for RankService.Rank method.
+  "records": [ # A list of records sorted by descending score.
+    { # Record message for RankService.Rank method.
+      "content": "A String", # The content of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+      "id": "A String", # The unique ID to represent the record.
+      "score": 3.14, # The score of this record based on the given query and selected model.
+      "title": "A String", # The title of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.
+    },
+  ],
+}
+
+ + \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json index 28add5e02d1..736fb29fe39 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json @@ -5110,6 +5110,38 @@ ] } } +}, +"rankingConfigs": { +"methods": { +"rank": { +"description": "Ranks a list of text records based on the given input query.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/rankingConfigs/{rankingConfigsId}:rank", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.rankingConfigs.rank", +"parameterOrder": [ +"rankingConfig" +], +"parameters": { +"rankingConfig": { +"description": "Required. The resource name of the rank service config, such as `projects/{project_num}/locations/{location_id}/rankingConfigs/default_ranking_config`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/rankingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+rankingConfig}:rank", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaRankRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaRankResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} } } }, @@ -5186,7 +5218,7 @@ } } }, -"revision": "20240318", +"revision": "20240329", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -5478,12 +5510,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -5498,13 +5532,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -5731,12 +5767,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -5754,13 +5792,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -6398,10 +6438,164 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaBigtableOptions": { +"description": "The Bigtable Options object that contains information to support the import.", +"id": "GoogleCloudDiscoveryengineV1alphaBigtableOptions", +"properties": { +"families": { +"additionalProperties": { +"$ref": "GoogleCloudDiscoveryengineV1alphaBigtableOptionsBigtableColumnFamily" +}, +"description": "The mapping from family names to an object that contains column families level information for the given column family. If a family is not present in this map it will be ignored.", +"type": "object" +}, +"keyFieldName": { +"description": "The field name used for saving row key value in the UCS document. The name has to match a-zA-Z0-9*", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaBigtableOptionsBigtableColumn": { +"id": "GoogleCloudDiscoveryengineV1alphaBigtableOptionsBigtableColumn", +"properties": { +"encoding": { +"description": "Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.", +"enum": [ +"ENCODING_UNSPECIFIED", +"TEXT", +"BINARY" +], +"enumDescriptions": [ +"", +"", +"" +], +"type": "string" +}, +"fieldName": { +"description": "The field name to use for this column in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the qualifier bytes with best effort. However, field name collisions could happen, where parsing behavior is undefined.", +"type": "string" +}, +"qualifier": { +"description": "Required. Qualifier of the column. If cannot decode with utf-8, store a base-64 encoded string.", +"format": "byte", +"type": "string" +}, +"type": { +"description": "Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY.", +"enum": [ +"TYPE_UNSPECIFIED", +"STRING", +"NUMBER", +"INTEGER", +"VAR_INTEGER", +"BIG_NUMERIC", +"BOOLEAN", +"JSON" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaBigtableOptionsBigtableColumnFamily": { +"id": "GoogleCloudDiscoveryengineV1alphaBigtableOptionsBigtableColumnFamily", +"properties": { +"columns": { +"description": "The list of objects that contains column level information for each column. If a column is not present in this list it will be ignored.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaBigtableOptionsBigtableColumn" +}, +"type": "array" +}, +"encoding": { +"description": "Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.", +"enum": [ +"ENCODING_UNSPECIFIED", +"TEXT", +"BINARY" +], +"enumDescriptions": [ +"", +"", +"" +], +"type": "string" +}, +"fieldName": { +"description": "The field name to use for this column family in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the family name with best effort. However, due to difference naming pattern, there could be field name collisions, where parsing behavior is undefined.", +"type": "string" +}, +"type": { +"description": "Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY.", +"enum": [ +"TYPE_UNSPECIFIED", +"STRING", +"NUMBER", +"INTEGER", +"VAR_INTEGER", +"BIG_NUMERIC", +"BOOLEAN", +"JSON" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaBigtableSource": { +"description": "The Cloud Bigtable source for importing data", +"id": "GoogleCloudDiscoveryengineV1alphaBigtableSource", +"properties": { +"bigtableOptions": { +"$ref": "GoogleCloudDiscoveryengineV1alphaBigtableOptions", +"description": "Required. Bigtable options that contains information needed when parsing data into typed structures. For example, column type annotations." +}, +"instanceId": { +"description": "Required. The instance ID of the Cloud Bigtable that needs to be exported.", +"type": "string" +}, +"projectId": { +"description": "The project ID (can be project # or ID) that the Bigtable source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +}, +"tableId": { +"description": "Required. The table ID of the Cloud Bigtable that needs to be exported.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaChunk": { "description": "Chunk captures all raw metadata information of items to be recommended or searched in the chunk mode.", "id": "GoogleCloudDiscoveryengineV1alphaChunk", "properties": { +"chunkMetadata": { +"$ref": "GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata", +"description": "Output only. Metadata of the current chunk.", +"readOnly": true +}, "content": { "description": "Content is a string from a document (parsed content).", "type": "string" @@ -6426,6 +6620,31 @@ "name": { "description": "The full resource name of the chunk. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/branches/{branch}/documents/{document_id}/chunks/{chunk_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.", "type": "string" +}, +"pageSpan": { +"$ref": "GoogleCloudDiscoveryengineV1alphaChunkPageSpan", +"description": "Page span of the chunk." +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata": { +"description": "Metadata of the current chunk. This field is only populated on SearchService.Search API.", +"id": "GoogleCloudDiscoveryengineV1alphaChunkChunkMetadata", +"properties": { +"nextChunks": { +"description": "The next chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_next_chunks. This field is only populated on SearchService.Search API.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaChunk" +}, +"type": "array" +}, +"previousChunks": { +"description": "The previous chunks of the current chunk. The number is controlled by SearchRequest.ContentSearchSpec.ChunkSpec.num_previous_chunks. This field is only populated on SearchService.Search API.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaChunk" +}, +"type": "array" } }, "type": "object" @@ -6445,6 +6664,54 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaChunkPageSpan": { +"description": "Page span of the chunk.", +"id": "GoogleCloudDiscoveryengineV1alphaChunkPageSpan", +"properties": { +"pageEnd": { +"description": "The end page of the chunk.", +"format": "int32", +"type": "integer" +}, +"pageStart": { +"description": "The start page of the chunk.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaCloudSqlSource": { +"description": "Cloud SQL source import data from.", +"id": "GoogleCloudDiscoveryengineV1alphaCloudSqlSource", +"properties": { +"databaseId": { +"description": "Required. The Cloud SQL database to copy the data from with a length limit of 256 characters.", +"type": "string" +}, +"gcsStagingDir": { +"description": "Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Cloud SQL export to a specific Cloud Storage directory. Please ensure that the Cloud SQL service account has the necessary GCS Storage Admin permissions to access the specified GCS directory.", +"type": "string" +}, +"instanceId": { +"description": "Required. The Cloud SQL instance to copy the data from with a length limit of 256 characters.", +"type": "string" +}, +"offload": { +"description": "Optional. Option for serverless export. Enabling this option will incur additional cost. More info: https://cloud.google.com/sql/pricing#serverless", +"type": "boolean" +}, +"projectId": { +"description": "Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +}, +"tableId": { +"description": "Required. The Cloud SQL table to copy the data from with a length limit of 256 characters.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaCompleteQueryRequest": { "description": "Request message for CompletionService.CompleteQuery method.", "id": "GoogleCloudDiscoveryengineV1alphaCompleteQueryRequest", @@ -6855,12 +7122,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -6875,13 +7144,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -7324,12 +7595,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -7360,13 +7633,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -7695,6 +7970,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaFhirStoreSource": { +"description": "Cloud FhirStore source import data from.", +"id": "GoogleCloudDiscoveryengineV1alphaFhirStoreSource", +"properties": { +"fhirStore": { +"description": "Required. The full resource name of the FHIR store to import data from, in the format of `projects/{project}/locations/{location}/datasets/{dataset}/fhirStores/{fhir_store}`.", +"type": "string" +}, +"gcsStagingDir": { +"description": "Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the FhirStore export to a specific Cloud Storage directory.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaFieldConfig": { "description": "Configurations for fields of a schema. For example, configuring a field is indexable, or searchable.", "id": "GoogleCloudDiscoveryengineV1alphaFieldConfig", @@ -7820,6 +8110,29 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaFirestoreSource": { +"description": "Firestore source import data from.", +"id": "GoogleCloudDiscoveryengineV1alphaFirestoreSource", +"properties": { +"collectionId": { +"description": "Required. The Firestore collection to copy the data from with a length limit of 1500 characters.", +"type": "string" +}, +"databaseId": { +"description": "Required. The Firestore database to copy the data from with a length limit of 256 characters.", +"type": "string" +}, +"gcsStagingDir": { +"description": "Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Firestore export to a specific Cloud Storage directory. Please ensure that the Firestore service account has the necessary GCS Storage Admin permissions to access the specified GCS directory.", +"type": "string" +}, +"projectId": { +"description": "Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaGcsSource": { "description": "Cloud Storage location for input content.", "id": "GoogleCloudDiscoveryengineV1alphaGcsSource", @@ -7838,6 +8151,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaGroundingConfig": { +"description": "Grounding configuration.", +"id": "GoogleCloudDiscoveryengineV1alphaGroundingConfig", +"properties": { +"name": { +"description": "Required. Name of the GroundingConfig, of the form `projects/{project}/locations/{location}/groundingConfig`.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaGuidedSearchSpec": { "description": "Defines guided search spec.", "id": "GoogleCloudDiscoveryengineV1alphaGuidedSearchSpec", @@ -7894,6 +8218,47 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsMetadata": { +"description": "Metadata related to the progress of the ImportCompletionSuggestions operation. This will be returned by the google.longrunning.Operation.metadata field.", +"id": "GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsMetadata", +"properties": { +"createTime": { +"description": "Operation create time.", +"format": "google-datetime", +"type": "string" +}, +"updateTime": { +"description": "Operation last update time. If the operation is done, this is also the finish time.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsResponse": { +"description": "Response of the CompletionService.ImportCompletionSuggestions method. If the long running operation is done, this message is returned by the google.longrunning.Operations.response field if the operation is successful.", +"id": "GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsResponse", +"properties": { +"errorSamples": { +"description": "A sample of errors encountered while processing the request.", +"items": { +"$ref": "GoogleRpcStatus" +}, +"type": "array" +}, +"failureCount": { +"description": "Count of CompletionSuggestions that failed to be imported.", +"format": "int64", +"type": "string" +}, +"successCount": { +"description": "Count of CompletionSuggestions successfully imported.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaImportDocumentsMetadata": { "description": "Metadata related to the progress of the ImportDocuments operation. This is returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaImportDocumentsMetadata", @@ -7926,23 +8291,39 @@ "id": "GoogleCloudDiscoveryengineV1alphaImportDocumentsRequest", "properties": { "autoGenerateIds": { -"description": "Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown.", +"description": "Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource", "type": "boolean" }, "bigquerySource": { "$ref": "GoogleCloudDiscoveryengineV1alphaBigQuerySource", "description": "BigQuery input source." }, +"bigtableSource": { +"$ref": "GoogleCloudDiscoveryengineV1alphaBigtableSource", +"description": "Cloud Bigtable input source." +}, +"cloudSqlSource": { +"$ref": "GoogleCloudDiscoveryengineV1alphaCloudSqlSource", +"description": "Cloud SQL input source." +}, "errorConfig": { "$ref": "GoogleCloudDiscoveryengineV1alphaImportErrorConfig", "description": "The desired location of errors incurred during the Import." }, +"fhirStoreSource": { +"$ref": "GoogleCloudDiscoveryengineV1alphaFhirStoreSource", +"description": "FhirStore input source." +}, +"firestoreSource": { +"$ref": "GoogleCloudDiscoveryengineV1alphaFirestoreSource", +"description": "Firestore input source." +}, "gcsSource": { "$ref": "GoogleCloudDiscoveryengineV1alphaGcsSource", "description": "Cloud Storage location for the input content." }, "idField": { -"description": "The field in the Cloud Storage and BigQuery sources that indicates the unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{\"my_id\": \"some_uuid\"}`. For BigQuerySource it is the column name of the BigQuery table where the unique ids are stored. The values of the JSON field or the BigQuery column are used as the Document.ids. The JSON field or the BigQuery column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom`. And only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources.", +"description": "The field indicates the ID field or column to be used as unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{\"my_id\": \"some_uuid\"}`. For others, it may be the column name of the table where the unique ids are stored. The values of the JSON field or the table column are used as the Document.ids. The JSON field or the table column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource", "type": "string" }, "inlineSource": { @@ -7962,6 +8343,10 @@ "Calculates diff and replaces the entire document dataset. Existing documents may be deleted if they are not present in the source location." ], "type": "string" +}, +"spannerSource": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSpannerSource", +"description": "Spanner input source." } }, "type": "object" @@ -8651,6 +9036,75 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaRankRequest": { +"description": "Request message for RankService.Rank method.", +"id": "GoogleCloudDiscoveryengineV1alphaRankRequest", +"properties": { +"ignoreRecordDetailsInResponse": { +"description": "If true, the response will contain only record ID and score. By default, it is false, the response will contain record details.", +"type": "boolean" +}, +"model": { +"description": "The identifier of the model to use. It is one of: * `semantic-ranker-512@latest`: Semantic ranking model with maxiumn input token size 512. It is set to `semantic-ranker-512@latest` by default if unspecified.", +"type": "string" +}, +"query": { +"description": "The query to use.", +"type": "string" +}, +"records": { +"description": "Required. A list of records to rank.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaRankingRecord" +}, +"type": "array" +}, +"topN": { +"description": "The number of results to return. If this is unset or no bigger than zero, returns all results.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaRankResponse": { +"description": "Response message for RankService.Rank method.", +"id": "GoogleCloudDiscoveryengineV1alphaRankResponse", +"properties": { +"records": { +"description": "A list of records sorted by descending score.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaRankingRecord" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaRankingRecord": { +"description": "Record message for RankService.Rank method.", +"id": "GoogleCloudDiscoveryengineV1alphaRankingRecord", +"properties": { +"content": { +"description": "The content of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.", +"type": "string" +}, +"id": { +"description": "The unique ID to represent the record.", +"type": "string" +}, +"score": { +"description": "The score of this record based on the given query and selected model.", +"format": "float", +"type": "number" +}, +"title": { +"description": "The title of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaRecommendRequest": { "description": "Request message for Recommend method.", "id": "GoogleCloudDiscoveryengineV1alphaRecommendRequest", @@ -9120,6 +9574,10 @@ "description": "A specification for configuring the behavior of content search.", "id": "GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpec", "properties": { +"chunkSpec": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecChunkSpec", +"description": "Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS" +}, "extractiveContentSpec": { "$ref": "GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecExtractiveContentSpec", "description": "If there is no extractive_content_spec provided, there will be no extractive answer in the search response." @@ -9149,6 +9607,23 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecChunkSpec": { +"description": "Specifies the chunk spec to be returned from the search response. Only available if the SearchRequest.ContentSearchSpec.search_result_mode is set to CHUNKS", +"id": "GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecChunkSpec", +"properties": { +"numNextChunks": { +"description": "The number of next chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no next chunks will be returned.", +"format": "int32", +"type": "integer" +}, +"numPreviousChunks": { +"description": "The number of previous chunks to be returned of the current chunk. The maximum allowed value is 3. If not specified, no previous chunks will be returned.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecExtractiveContentSpec": { "description": "A specification for configuring the extractive content in a search response.", "id": "GoogleCloudDiscoveryengineV1alphaSearchRequestContentSearchSpecExtractiveContentSpec", @@ -9737,6 +10212,13 @@ "description": "Document reference.", "id": "GoogleCloudDiscoveryengineV1alphaSearchResponseSummaryReference", "properties": { +"chunkContents": { +"description": "List of cited chunk contents derived from document content.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchResponseSummaryReferenceChunkContent" +}, +"type": "array" +}, "document": { "description": "Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.", "type": "string" @@ -9752,6 +10234,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaSearchResponseSummaryReferenceChunkContent": { +"description": "Chunk content.", +"id": "GoogleCloudDiscoveryengineV1alphaSearchResponseSummaryReferenceChunkContent", +"properties": { +"content": { +"description": "Chunk textual content.", +"type": "string" +}, +"pageIdentifier": { +"description": "Page identifier.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaSearchResponseSummarySafetyAttributes": { "description": "Safety Attribute categories and their associated confidence scores.", "id": "GoogleCloudDiscoveryengineV1alphaSearchResponseSummarySafetyAttributes", @@ -9901,13 +10398,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -10003,6 +10502,33 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaSpannerSource": { +"description": "The Spanner source for importing data", +"id": "GoogleCloudDiscoveryengineV1alphaSpannerSource", +"properties": { +"databaseId": { +"description": "Required. The database ID of the source Spanner table.", +"type": "string" +}, +"enableDataBoost": { +"description": "Optional. Whether to apply data boost on Spanner export. Enabling this option will incur additional cost. More info: https://cloud.google.com/spanner/docs/databoost/databoost-overview#billing_and_quotas", +"type": "boolean" +}, +"instanceId": { +"description": "Required. The instance ID of the source Spanner table.", +"type": "string" +}, +"projectId": { +"description": "The project ID that the Spanner source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +}, +"tableId": { +"description": "Required. The table name of the Spanner database that needs to be imported.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaSuggestionDenyListEntry": { "description": "Suggestion deny list entry identifying the phrase to block from suggestions and the applied operation for the phrase.", "id": "GoogleCloudDiscoveryengineV1alphaSuggestionDenyListEntry", @@ -10214,6 +10740,14 @@ }, "type": "array" }, +"metrics": { +"additionalProperties": { +"format": "double", +"type": "number" +}, +"description": "The metrics of the trained model.", +"type": "object" +}, "modelStatus": { "description": "The trained model status. Possible values are: * **bad-data**: The training data quality is bad. * **no-improvement**: Tuning didn't improve performance. Won't deploy. * **in-progress**: Model training is in progress. * **ready**: The model is ready for serving.", "type": "string" @@ -10556,6 +11090,10 @@ "description": "Whether to enable safe search.", "type": "boolean" }, +"enableSearchAsYouType": { +"description": "Whether to enable search-as-you-type behavior for the search widget", +"type": "boolean" +}, "enableSnippetResultSummary": { "description": "Turn on or off summary for each snippets result.", "type": "boolean" @@ -10587,12 +11125,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "readOnly": true, "type": "string" @@ -10631,13 +11171,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -10671,7 +11213,7 @@ "type": "string" }, "name": { -"description": "The name of the collection. It should be collection resource name. Format: `projects/{project_number}/locations/{location}/collections/{collection_id}`. For widget service usage, such look up widget config, returned name should be skipped.", +"description": "The name of the collection. It should be collection resource name. Format: `projects/{project_number}/locations/{location}/collections/{collection_id}`. For APIs under WidgetService, such as LookUpWidgetConfig, the project number and location part is erased in this field.", "type": "string" } }, @@ -10691,7 +11233,7 @@ "type": "string" }, "name": { -"description": "The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For widget service usage, such look up widget config, returned name should be skipped.", +"description": "The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For APIs under WidgetService, such as LookUpWidgetConfig, the project number and location part is erased in this field.", "type": "string" } }, @@ -10721,7 +11263,7 @@ "type": "string" }, "name": { -"description": "The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For widget service usage, such look up widget config, returned name should be skipped.", +"description": "The name of the data store. It should be data store resource name Format: `projects/{project_number}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}`. For APIs under WidgetService, such as LookUpWidgetConfig, the project number and location part is erased in this field.", "type": "string" } }, @@ -10993,12 +11535,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -11013,13 +11557,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -11246,12 +11792,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -11269,13 +11817,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -11384,6 +11934,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaGroundingConfig": { +"description": "Grounding configuration.", +"id": "GoogleCloudDiscoveryengineV1betaGroundingConfig", +"properties": { +"name": { +"description": "Required. Name of the GroundingConfig, of the form `projects/{project}/locations/{location}/groundingConfig`.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaImportDocumentsMetadata": { "description": "Metadata related to the progress of the ImportDocuments operation. This is returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1betaImportDocumentsMetadata", @@ -11803,6 +12364,14 @@ }, "type": "array" }, +"metrics": { +"additionalProperties": { +"format": "double", +"type": "number" +}, +"description": "The metrics of the trained model.", +"type": "object" +}, "modelStatus": { "description": "The trained model status. Possible values are: * **bad-data**: The training data quality is bad. * **no-improvement**: Tuning didn't improve performance. Won't deploy. * **in-progress**: Model training is in progress. * **ready**: The model is ready for serving.", "type": "string" @@ -11810,6 +12379,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaTuneEngineMetadata": { +"description": "Metadata associated with a tune operation.", +"id": "GoogleCloudDiscoveryengineV1betaTuneEngineMetadata", +"properties": { +"engine": { +"description": "Required. The resource name of the engine that this tune applies to. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}`", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaUpdateSchemaMetadata": { "description": "Metadata for UpdateSchema LRO.", "id": "GoogleCloudDiscoveryengineV1betaUpdateSchemaMetadata", diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json index f98e24386c6..8b978be741b 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json @@ -2245,6 +2245,90 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"pause": { +"description": "Pauses the training of an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}:pause", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.pause", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the engine to pause. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+name}:pause", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaPauseEngineRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaEngine" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"resume": { +"description": "Resumes the training of an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}:resume", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.resume", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the engine to resume. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+name}:resume", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaResumeEngineRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaEngine" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"tune": { +"description": "Tunes an existing engine. Only applicable if SolutionType is SOLUTION_TYPE_RECOMMENDATION.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}:tune", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.collections.engines.tune", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the engine to tune. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+name}:tune", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaTuneEngineRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } }, "resources": { @@ -4401,6 +4485,38 @@ ] } } +}, +"rankingConfigs": { +"methods": { +"rank": { +"description": "Ranks a list of text records based on the given input query.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/rankingConfigs/{rankingConfigsId}:rank", +"httpMethod": "POST", +"id": "discoveryengine.projects.locations.rankingConfigs.rank", +"parameterOrder": [ +"rankingConfig" +], +"parameters": { +"rankingConfig": { +"description": "Required. The resource name of the rank service config, such as `projects/{project_num}/locations/{location_id}/rankingConfigs/default_ranking_config`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/rankingConfigs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+rankingConfig}:rank", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1betaRankRequest" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1betaRankResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} } } }, @@ -4477,7 +4593,7 @@ } } }, -"revision": "20240318", +"revision": "20240329", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -4769,12 +4885,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -4789,13 +4907,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -5022,12 +5142,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -5045,13 +5167,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -5748,12 +5872,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -5768,13 +5894,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -6042,12 +6170,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -6078,13 +6208,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -6452,6 +6584,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaGroundingConfig": { +"description": "Grounding configuration.", +"id": "GoogleCloudDiscoveryengineV1alphaGroundingConfig", +"properties": { +"name": { +"description": "Required. Name of the GroundingConfig, of the form `projects/{project}/locations/{location}/groundingConfig`.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaIdpConfig": { "description": "Identity Provider Config.", "id": "GoogleCloudDiscoveryengineV1alphaIdpConfig", @@ -6488,6 +6631,47 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsMetadata": { +"description": "Metadata related to the progress of the ImportCompletionSuggestions operation. This will be returned by the google.longrunning.Operation.metadata field.", +"id": "GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsMetadata", +"properties": { +"createTime": { +"description": "Operation create time.", +"format": "google-datetime", +"type": "string" +}, +"updateTime": { +"description": "Operation last update time. If the operation is done, this is also the finish time.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsResponse": { +"description": "Response of the CompletionService.ImportCompletionSuggestions method. If the long running operation is done, this message is returned by the google.longrunning.Operations.response field if the operation is successful.", +"id": "GoogleCloudDiscoveryengineV1alphaImportCompletionSuggestionsResponse", +"properties": { +"errorSamples": { +"description": "A sample of errors encountered while processing the request.", +"items": { +"$ref": "GoogleRpcStatus" +}, +"type": "array" +}, +"failureCount": { +"description": "Count of CompletionSuggestions that failed to be imported.", +"format": "int64", +"type": "string" +}, +"successCount": { +"description": "Count of CompletionSuggestions successfully imported.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaImportDocumentsMetadata": { "description": "Metadata related to the progress of the ImportDocuments operation. This is returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1alphaImportDocumentsMetadata", @@ -7062,6 +7246,14 @@ }, "type": "array" }, +"metrics": { +"additionalProperties": { +"format": "double", +"type": "number" +}, +"description": "The metrics of the trained model.", +"type": "object" +}, "modelStatus": { "description": "The trained model status. Possible values are: * **bad-data**: The training data quality is bad. * **no-improvement**: Tuning didn't improve performance. Won't deploy. * **in-progress**: Model training is in progress. * **ready**: The model is ready for serving.", "type": "string" @@ -7202,6 +7394,186 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaBigtableOptions": { +"description": "The Bigtable Options object that contains information to support the import.", +"id": "GoogleCloudDiscoveryengineV1betaBigtableOptions", +"properties": { +"families": { +"additionalProperties": { +"$ref": "GoogleCloudDiscoveryengineV1betaBigtableOptionsBigtableColumnFamily" +}, +"description": "The mapping from family names to an object that contains column families level information for the given column family. If a family is not present in this map it will be ignored.", +"type": "object" +}, +"keyFieldName": { +"description": "The field name used for saving row key value in the UCS document. The name has to match a-zA-Z0-9*", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaBigtableOptionsBigtableColumn": { +"id": "GoogleCloudDiscoveryengineV1betaBigtableOptionsBigtableColumn", +"properties": { +"encoding": { +"description": "Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.", +"enum": [ +"ENCODING_UNSPECIFIED", +"TEXT", +"BINARY" +], +"enumDescriptions": [ +"", +"", +"" +], +"type": "string" +}, +"fieldName": { +"description": "The field name to use for this column in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the qualifier bytes with best effort. However, field name collisions could happen, where parsing behavior is undefined.", +"type": "string" +}, +"qualifier": { +"description": "Required. Qualifier of the column. If cannot decode with utf-8, store a base-64 encoded string.", +"format": "byte", +"type": "string" +}, +"type": { +"description": "Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY.", +"enum": [ +"TYPE_UNSPECIFIED", +"STRING", +"NUMBER", +"INTEGER", +"VAR_INTEGER", +"BIG_NUMERIC", +"BOOLEAN", +"JSON" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaBigtableOptionsBigtableColumnFamily": { +"id": "GoogleCloudDiscoveryengineV1betaBigtableOptionsBigtableColumnFamily", +"properties": { +"columns": { +"description": "The list of objects that contains column level information for each column. If a column is not present in this list it will be ignored.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaBigtableOptionsBigtableColumn" +}, +"type": "array" +}, +"encoding": { +"description": "Optional. The encoding mode of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.", +"enum": [ +"ENCODING_UNSPECIFIED", +"TEXT", +"BINARY" +], +"enumDescriptions": [ +"", +"", +"" +], +"type": "string" +}, +"fieldName": { +"description": "The field name to use for this column family in the UCS document. The name has to match a-zA-Z0-9* If not set, we will parse it from the family name with best effort. However, due to difference naming pattern, there could be field name collisions, where parsing behavior is undefined.", +"type": "string" +}, +"type": { +"description": "Optional. The type of values in this column family. The values are expected to be encoded using HBase Bytes.toBytes function when the encoding value is set to BINARY.", +"enum": [ +"TYPE_UNSPECIFIED", +"STRING", +"NUMBER", +"INTEGER", +"VAR_INTEGER", +"BIG_NUMERIC", +"BOOLEAN", +"JSON" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaBigtableSource": { +"description": "The Cloud Bigtable source for importing data", +"id": "GoogleCloudDiscoveryengineV1betaBigtableSource", +"properties": { +"bigtableOptions": { +"$ref": "GoogleCloudDiscoveryengineV1betaBigtableOptions", +"description": "Required. Bigtable options that contains information needed when parsing data into typed structures. For example, column type annotations." +}, +"instanceId": { +"description": "Required. The instance ID of the Cloud Bigtable that needs to be exported.", +"type": "string" +}, +"projectId": { +"description": "The project ID (can be project # or ID) that the Bigtable source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +}, +"tableId": { +"description": "Required. The table ID of the Cloud Bigtable that needs to be exported.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaCloudSqlSource": { +"description": "Cloud SQL source import data from.", +"id": "GoogleCloudDiscoveryengineV1betaCloudSqlSource", +"properties": { +"databaseId": { +"description": "Required. The Cloud SQL database to copy the data from with a length limit of 256 characters.", +"type": "string" +}, +"gcsStagingDir": { +"description": "Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Cloud SQL export to a specific Cloud Storage directory. Please ensure that the Cloud SQL service account has the necessary GCS Storage Admin permissions to access the specified GCS directory.", +"type": "string" +}, +"instanceId": { +"description": "Required. The Cloud SQL instance to copy the data from with a length limit of 256 characters.", +"type": "string" +}, +"offload": { +"description": "Optional. Option for serverless export. Enabling this option will incur additional cost. More info: https://cloud.google.com/sql/pricing#serverless", +"type": "boolean" +}, +"projectId": { +"description": "Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +}, +"tableId": { +"description": "Required. The Cloud SQL table to copy the data from with a length limit of 256 characters.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaCompleteQueryResponse": { "description": "Response message for CompletionService.CompleteQuery method.", "id": "GoogleCloudDiscoveryengineV1betaCompleteQueryResponse", @@ -7561,12 +7933,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -7581,13 +7955,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -7957,12 +8333,14 @@ "enum": [ "INDUSTRY_VERTICAL_UNSPECIFIED", "GENERIC", -"MEDIA" +"MEDIA", +"HEALTHCARE_FHIR" ], "enumDescriptions": [ "Value used when unset.", "The generic vertical for documents that are not specific to any industry vertical.", -"The media industry vertical." +"The media industry vertical.", +"The healthcare FHIR vertical." ], "type": "string" }, @@ -7980,13 +8358,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -8118,6 +8498,44 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaFhirStoreSource": { +"description": "Cloud FhirStore source import data from.", +"id": "GoogleCloudDiscoveryengineV1betaFhirStoreSource", +"properties": { +"fhirStore": { +"description": "Required. The full resource name of the FHIR store to import data from, in the format of `projects/{project}/locations/{location}/datasets/{dataset}/fhirStores/{fhir_store}`.", +"type": "string" +}, +"gcsStagingDir": { +"description": "Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the FhirStore export to a specific Cloud Storage directory.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaFirestoreSource": { +"description": "Firestore source import data from.", +"id": "GoogleCloudDiscoveryengineV1betaFirestoreSource", +"properties": { +"collectionId": { +"description": "Required. The Firestore collection to copy the data from with a length limit of 1500 characters.", +"type": "string" +}, +"databaseId": { +"description": "Required. The Firestore database to copy the data from with a length limit of 256 characters.", +"type": "string" +}, +"gcsStagingDir": { +"description": "Optional. Intermediate Cloud Storage directory used for the import with a length limit of 2,000 characters. Can be specified if one wants to have the Firestore export to a specific Cloud Storage directory. Please ensure that the Firestore service account has the necessary GCS Storage Admin permissions to access the specified GCS directory.", +"type": "string" +}, +"projectId": { +"description": "Optional. The project ID (can be project # or ID) that the Cloud SQL source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaGcsSource": { "description": "Cloud Storage location for input content.", "id": "GoogleCloudDiscoveryengineV1betaGcsSource", @@ -8136,6 +8554,17 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaGroundingConfig": { +"description": "Grounding configuration.", +"id": "GoogleCloudDiscoveryengineV1betaGroundingConfig", +"properties": { +"name": { +"description": "Required. Name of the GroundingConfig, of the form `projects/{project}/locations/{location}/groundingConfig`.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaImportDocumentsMetadata": { "description": "Metadata related to the progress of the ImportDocuments operation. This is returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1betaImportDocumentsMetadata", @@ -8168,23 +8597,39 @@ "id": "GoogleCloudDiscoveryengineV1betaImportDocumentsRequest", "properties": { "autoGenerateIds": { -"description": "Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown.", +"description": "Whether to automatically generate IDs for the documents if absent. If set to `true`, Document.ids are automatically generated based on the hash of the payload, where IDs may not be consistent during multiple imports. In which case ReconciliationMode.FULL is highly recommended to avoid duplicate contents. If unset or set to `false`, Document.ids have to be specified using id_field, otherwise, documents without IDs fail to be imported. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource", "type": "boolean" }, "bigquerySource": { "$ref": "GoogleCloudDiscoveryengineV1betaBigQuerySource", "description": "BigQuery input source." }, +"bigtableSource": { +"$ref": "GoogleCloudDiscoveryengineV1betaBigtableSource", +"description": "Cloud Bigtable input source." +}, +"cloudSqlSource": { +"$ref": "GoogleCloudDiscoveryengineV1betaCloudSqlSource", +"description": "Cloud SQL input source." +}, "errorConfig": { "$ref": "GoogleCloudDiscoveryengineV1betaImportErrorConfig", "description": "The desired location of errors incurred during the Import." }, +"fhirStoreSource": { +"$ref": "GoogleCloudDiscoveryengineV1betaFhirStoreSource", +"description": "FhirStore input source." +}, +"firestoreSource": { +"$ref": "GoogleCloudDiscoveryengineV1betaFirestoreSource", +"description": "Firestore input source." +}, "gcsSource": { "$ref": "GoogleCloudDiscoveryengineV1betaGcsSource", "description": "Cloud Storage location for the input content." }, "idField": { -"description": "The field in the Cloud Storage and BigQuery sources that indicates the unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{\"my_id\": \"some_uuid\"}`. For BigQuerySource it is the column name of the BigQuery table where the unique ids are stored. The values of the JSON field or the BigQuery column are used as the Document.ids. The JSON field or the BigQuery column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when using GcsSource or BigQuerySource, and when GcsSource.data_schema or BigQuerySource.data_schema is `custom`. And only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources.", +"description": "The field indicates the ID field or column to be used as unique IDs of the documents. For GcsSource it is the key of the JSON field. For instance, `my_id` for JSON `{\"my_id\": \"some_uuid\"}`. For others, it may be the column name of the table where the unique ids are stored. The values of the JSON field or the table column are used as the Document.ids. The JSON field or the table column must be of string type, and the values must be set as valid strings conform to [RFC-1034](https://tools.ietf.org/html/rfc1034) with 1-63 characters. Otherwise, documents without valid IDs fail to be imported. Only set this field when auto_generate_ids is unset or set as `false`. Otherwise, an INVALID_ARGUMENT error is thrown. If it is unset, a default value `_id` is used when importing from the allowed data sources. Supported data sources: * GcsSource. GcsSource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * BigQuerySource. BigQuerySource.data_schema must be `custom` or `csv`. Otherwise, an INVALID_ARGUMENT error is thrown. * SpannerSource * CloudSqlSource * FirestoreSource * BigtableSource", "type": "string" }, "inlineSource": { @@ -8204,6 +8649,10 @@ "Calculates diff and replaces the entire document dataset. Existing documents may be deleted if they are not present in the source location." ], "type": "string" +}, +"spannerSource": { +"$ref": "GoogleCloudDiscoveryengineV1betaSpannerSource", +"description": "Spanner input source." } }, "type": "object" @@ -8636,6 +9085,12 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaPauseEngineRequest": { +"description": "Request for pausing training of an engine.", +"id": "GoogleCloudDiscoveryengineV1betaPauseEngineRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaPurgeDocumentsMetadata": { "description": "Metadata related to the progress of the PurgeDocuments operation. This will be returned by the google.longrunning.Operation.metadata field.", "id": "GoogleCloudDiscoveryengineV1betaPurgeDocumentsMetadata", @@ -8744,6 +9199,75 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaRankRequest": { +"description": "Request message for RankService.Rank method.", +"id": "GoogleCloudDiscoveryengineV1betaRankRequest", +"properties": { +"ignoreRecordDetailsInResponse": { +"description": "If true, the response will contain only record ID and score. By default, it is false, the response will contain record details.", +"type": "boolean" +}, +"model": { +"description": "The identifier of the model to use. It is one of: * `semantic-ranker-512@latest`: Semantic ranking model with maxiumn input token size 512. It is set to `semantic-ranker-512@latest` by default if unspecified.", +"type": "string" +}, +"query": { +"description": "The query to use.", +"type": "string" +}, +"records": { +"description": "Required. A list of records to rank.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaRankingRecord" +}, +"type": "array" +}, +"topN": { +"description": "The number of results to return. If this is unset or no bigger than zero, returns all results.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaRankResponse": { +"description": "Response message for RankService.Rank method.", +"id": "GoogleCloudDiscoveryengineV1betaRankResponse", +"properties": { +"records": { +"description": "A list of records sorted by descending score.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaRankingRecord" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaRankingRecord": { +"description": "Record message for RankService.Rank method.", +"id": "GoogleCloudDiscoveryengineV1betaRankingRecord", +"properties": { +"content": { +"description": "The content of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.", +"type": "string" +}, +"id": { +"description": "The unique ID to represent the record.", +"type": "string" +}, +"score": { +"description": "The score of this record based on the given query and selected model.", +"format": "float", +"type": "number" +}, +"title": { +"description": "The title of the record. Empty by default. At least one of title or content should be set otherwise an INVALID_ARGUMENT error is thrown.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaRecommendRequest": { "description": "Request message for Recommend method.", "id": "GoogleCloudDiscoveryengineV1betaRecommendRequest", @@ -8897,6 +9421,12 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaResumeEngineRequest": { +"description": "Request for resuming training of an engine.", +"id": "GoogleCloudDiscoveryengineV1betaResumeEngineRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaSchema": { "description": "Defines the structure and layout of a type of document data.", "id": "GoogleCloudDiscoveryengineV1betaSchema", @@ -9682,6 +10212,13 @@ "description": "Document reference.", "id": "GoogleCloudDiscoveryengineV1betaSearchResponseSummaryReference", "properties": { +"chunkContents": { +"description": "List of cited chunk contents derived from document content.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchResponseSummaryReferenceChunkContent" +}, +"type": "array" +}, "document": { "description": "Required. Document.name of the document. Full resource name of the referenced document, in the format `projects/*/locations/*/collections/*/dataStores/*/branches/*/documents/*`.", "type": "string" @@ -9697,6 +10234,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaSearchResponseSummaryReferenceChunkContent": { +"description": "Chunk content.", +"id": "GoogleCloudDiscoveryengineV1betaSearchResponseSummaryReferenceChunkContent", +"properties": { +"content": { +"description": "Chunk textual content.", +"type": "string" +}, +"pageIdentifier": { +"description": "Page identifier.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaSearchResponseSummarySafetyAttributes": { "description": "Safety Attribute categories and their associated confidence scores.", "id": "GoogleCloudDiscoveryengineV1betaSearchResponseSummarySafetyAttributes", @@ -9838,13 +10390,15 @@ "SOLUTION_TYPE_UNSPECIFIED", "SOLUTION_TYPE_RECOMMENDATION", "SOLUTION_TYPE_SEARCH", -"SOLUTION_TYPE_CHAT" +"SOLUTION_TYPE_CHAT", +"SOLUTION_TYPE_GENERATIVE_CHAT" ], "enumDescriptions": [ "Default value.", "Used for Recommendations AI.", "Used for Discovery Search.", -"Used for use cases related to the Generative AI agent." +"Used for use cases related to the Generative AI agent.", +"Used for use cases related to the Generative Chat agent. It's used for Generative chat engine only, the associated data stores must enrolled with `SOLUTION_TYPE_CHAT` solution." ], "type": "string" }, @@ -9940,6 +10494,33 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaSpannerSource": { +"description": "The Spanner source for importing data", +"id": "GoogleCloudDiscoveryengineV1betaSpannerSource", +"properties": { +"databaseId": { +"description": "Required. The database ID of the source Spanner table.", +"type": "string" +}, +"enableDataBoost": { +"description": "Optional. Whether to apply data boost on Spanner export. Enabling this option will incur additional cost. More info: https://cloud.google.com/spanner/docs/databoost/databoost-overview#billing_and_quotas", +"type": "boolean" +}, +"instanceId": { +"description": "Required. The instance ID of the source Spanner table.", +"type": "string" +}, +"projectId": { +"description": "The project ID that the Spanner source is in with a length limit of 128 characters. If not specified, inherits the project ID from the parent request.", +"type": "string" +}, +"tableId": { +"description": "Required. The table name of the Spanner database that needs to be imported.", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaSuggestionDenyListEntry": { "description": "Suggestion deny list entry identifying the phrase to block from suggestions and the applied operation for the phrase.", "id": "GoogleCloudDiscoveryengineV1betaSuggestionDenyListEntry", @@ -10151,6 +10732,14 @@ }, "type": "array" }, +"metrics": { +"additionalProperties": { +"format": "double", +"type": "number" +}, +"description": "The metrics of the trained model.", +"type": "object" +}, "modelStatus": { "description": "The trained model status. Possible values are: * **bad-data**: The training data quality is bad. * **no-improvement**: Tuning didn't improve performance. Won't deploy. * **in-progress**: Model training is in progress. * **ready**: The model is ready for serving.", "type": "string" @@ -10193,6 +10782,23 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1betaTuneEngineMetadata": { +"description": "Metadata associated with a tune operation.", +"id": "GoogleCloudDiscoveryengineV1betaTuneEngineMetadata", +"properties": { +"engine": { +"description": "Required. The resource name of the engine that this tune applies to. Format: `projects/{project_number}/locations/{location_id}/collections/{collection_id}/engines/{engine_id}`", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDiscoveryengineV1betaTuneEngineRequest": { +"description": "Request to manually start a tuning process now (instead of waiting for the periodically scheduled tuning to happen).", +"id": "GoogleCloudDiscoveryengineV1betaTuneEngineRequest", +"properties": {}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1betaUpdateSchemaMetadata": { "description": "Metadata for UpdateSchema LRO.", "id": "GoogleCloudDiscoveryengineV1betaUpdateSchemaMetadata",