diff --git a/docs/dyn/bigquery_v2.datasets.html b/docs/dyn/bigquery_v2.datasets.html index 6a597721cf9..5a57aa2c8fc 100644 --- a/docs/dyn/bigquery_v2.datasets.html +++ b/docs/dyn/bigquery_v2.datasets.html @@ -143,9 +143,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -163,7 +163,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. @@ -218,9 +218,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -238,7 +238,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. @@ -287,9 +287,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -307,7 +307,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. @@ -413,9 +413,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -433,7 +433,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. @@ -482,9 +482,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -502,7 +502,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. @@ -558,9 +558,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -578,7 +578,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. @@ -627,9 +627,9 @@

Method Details

"iamMember": "A String", # [Pick one] Some other type of member that appears in the IAM Policy but isn't a user, group, domain, or special group. "role": "A String", # [Required] An IAM role ID that should be granted to the user, group, or domain specified in this access entry. The following legacy mappings will be applied: OWNER roles/bigquery.dataOwner WRITER roles/bigquery.dataEditor READER roles/bigquery.dataViewer This field will accept any of the above formats, but will return only the legacy format. For example, if you set this field to "roles/bigquery.dataOwner", it will be returned back as "OWNER". "routine": { # [Pick one] A routine from a different dataset to grant access to. Queries executed against that routine will have read access to views/tables/routines in this dataset. Only UDF is supported for now. The role field is not required when this field is set. If that routine is updated by any user, access to the routine needs to be granted again via an update operation. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "specialGroup": "A String", # [Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users. Maps to similarly-named IAM members. "userByEmail": "A String", # [Pick one] An email address of a user to grant access to. For example: fred@example.com. Maps to IAM policy member "user:EMAIL" or "serviceAccount:EMAIL". @@ -647,7 +647,7 @@

Method Details

}, "defaultCollation": "A String", # [Output-only] The default collation of the dataset. "defaultEncryptionConfiguration": { - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "defaultPartitionExpirationMs": "A String", # [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. "defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the dataset. diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html index fa19c582a64..4d3e47168bb 100644 --- a/docs/dyn/bigquery_v2.jobs.html +++ b/docs/dyn/bigquery_v2.jobs.html @@ -123,7 +123,7 @@

Method Details

"copy": { # [Pick one] Copies a table. "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationExpirationTime": "", # [Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed. "destinationTable": { # [Required] The destination table @@ -157,9 +157,9 @@

Method Details

"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','. Not applicable when extracting models. "printHeader": true, # [Optional] Whether to print out a header row in the results. Default is true. Not applicable when extracting models. "sourceModel": { # A reference to the model being exported. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "sourceTable": { # A reference to the table being exported. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -194,7 +194,7 @@

Method Details

"A String", ], "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Required] The destination table to load the data into. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -212,6 +212,9 @@

Method Details

"encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). @@ -307,7 +310,7 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results. This property must be set for large results that exceed the maximum response size. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -405,11 +408,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -530,15 +539,15 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "ddlTargetRoutine": { # The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "ddlTargetRowAccessPolicy": { # [Output only] [Preview] The DDL target row access policy. Present only for CREATE/DROP ROW ACCESS POLICY queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this row access policy. - "policyId": "A String", # [Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. - "projectId": "A String", # [Required] The ID of the project containing this row access policy. - "tableId": "A String", # [Required] The ID of the table containing this row access policy. + "datasetId": "A String", # Required. The ID of the dataset containing this row access policy. + "policyId": "A String", # Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "projectId": "A String", # Required. The ID of the project containing this row access policy. + "tableId": "A String", # Required. The ID of the table containing this row access policy. }, "ddlTargetTable": { # [Output only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -615,9 +624,9 @@

Method Details

], "referencedRoutines": [ # [Output only] Referenced routines (persistent user-defined functions and stored procedures) for the job. { - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, ], "referencedTables": [ # [Output only] Referenced tables for the job. Queries that reference more than 50 tables will not have a complete list. @@ -823,7 +832,7 @@

Method Details

"copy": { # [Pick one] Copies a table. "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationExpirationTime": "", # [Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed. "destinationTable": { # [Required] The destination table @@ -857,9 +866,9 @@

Method Details

"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','. Not applicable when extracting models. "printHeader": true, # [Optional] Whether to print out a header row in the results. Default is true. Not applicable when extracting models. "sourceModel": { # A reference to the model being exported. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "sourceTable": { # A reference to the table being exported. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -894,7 +903,7 @@

Method Details

"A String", ], "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Required] The destination table to load the data into. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -912,6 +921,9 @@

Method Details

"encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). @@ -1007,7 +1019,7 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results. This property must be set for large results that exceed the maximum response size. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -1105,11 +1117,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -1230,15 +1248,15 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "ddlTargetRoutine": { # The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "ddlTargetRowAccessPolicy": { # [Output only] [Preview] The DDL target row access policy. Present only for CREATE/DROP ROW ACCESS POLICY queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this row access policy. - "policyId": "A String", # [Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. - "projectId": "A String", # [Required] The ID of the project containing this row access policy. - "tableId": "A String", # [Required] The ID of the table containing this row access policy. + "datasetId": "A String", # Required. The ID of the dataset containing this row access policy. + "policyId": "A String", # Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "projectId": "A String", # Required. The ID of the project containing this row access policy. + "tableId": "A String", # Required. The ID of the table containing this row access policy. }, "ddlTargetTable": { # [Output only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -1315,9 +1333,9 @@

Method Details

], "referencedRoutines": [ # [Output only] Referenced routines (persistent user-defined functions and stored procedures) for the job. { - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, ], "referencedTables": [ # [Output only] Referenced tables for the job. Queries that reference more than 50 tables will not have a complete list. @@ -1595,7 +1613,7 @@

Method Details

"copy": { # [Pick one] Copies a table. "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationExpirationTime": "", # [Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed. "destinationTable": { # [Required] The destination table @@ -1629,9 +1647,9 @@

Method Details

"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','. Not applicable when extracting models. "printHeader": true, # [Optional] Whether to print out a header row in the results. Default is true. Not applicable when extracting models. "sourceModel": { # A reference to the model being exported. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "sourceTable": { # A reference to the table being exported. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -1666,7 +1684,7 @@

Method Details

"A String", ], "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Required] The destination table to load the data into. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -1684,6 +1702,9 @@

Method Details

"encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). @@ -1779,7 +1800,7 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results. This property must be set for large results that exceed the maximum response size. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -1877,11 +1898,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -2002,15 +2029,15 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "ddlTargetRoutine": { # The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "ddlTargetRowAccessPolicy": { # [Output only] [Preview] The DDL target row access policy. Present only for CREATE/DROP ROW ACCESS POLICY queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this row access policy. - "policyId": "A String", # [Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. - "projectId": "A String", # [Required] The ID of the project containing this row access policy. - "tableId": "A String", # [Required] The ID of the table containing this row access policy. + "datasetId": "A String", # Required. The ID of the dataset containing this row access policy. + "policyId": "A String", # Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "projectId": "A String", # Required. The ID of the project containing this row access policy. + "tableId": "A String", # Required. The ID of the table containing this row access policy. }, "ddlTargetTable": { # [Output only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -2087,9 +2114,9 @@

Method Details

], "referencedRoutines": [ # [Output only] Referenced routines (persistent user-defined functions and stored procedures) for the job. { - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, ], "referencedTables": [ # [Output only] Referenced tables for the job. Queries that reference more than 50 tables will not have a complete list. @@ -2270,7 +2297,7 @@

Method Details

"copy": { # [Pick one] Copies a table. "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationExpirationTime": "", # [Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed. "destinationTable": { # [Required] The destination table @@ -2304,9 +2331,9 @@

Method Details

"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','. Not applicable when extracting models. "printHeader": true, # [Optional] Whether to print out a header row in the results. Default is true. Not applicable when extracting models. "sourceModel": { # A reference to the model being exported. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "sourceTable": { # A reference to the table being exported. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -2341,7 +2368,7 @@

Method Details

"A String", ], "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Required] The destination table to load the data into. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -2359,6 +2386,9 @@

Method Details

"encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). @@ -2454,7 +2484,7 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results. This property must be set for large results that exceed the maximum response size. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -2552,11 +2582,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -2677,15 +2713,15 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "ddlTargetRoutine": { # The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "ddlTargetRowAccessPolicy": { # [Output only] [Preview] The DDL target row access policy. Present only for CREATE/DROP ROW ACCESS POLICY queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this row access policy. - "policyId": "A String", # [Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. - "projectId": "A String", # [Required] The ID of the project containing this row access policy. - "tableId": "A String", # [Required] The ID of the table containing this row access policy. + "datasetId": "A String", # Required. The ID of the dataset containing this row access policy. + "policyId": "A String", # Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "projectId": "A String", # Required. The ID of the project containing this row access policy. + "tableId": "A String", # Required. The ID of the table containing this row access policy. }, "ddlTargetTable": { # [Output only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -2762,9 +2798,9 @@

Method Details

], "referencedRoutines": [ # [Output only] Referenced routines (persistent user-defined functions and stored procedures) for the job. { - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, ], "referencedTables": [ # [Output only] Referenced tables for the job. Queries that reference more than 50 tables will not have a complete list. @@ -2968,7 +3004,7 @@

Method Details

"copy": { # [Pick one] Copies a table. "createDisposition": "A String", # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationExpirationTime": "", # [Optional] The time when the destination table expires. Expired tables will be deleted and their storage reclaimed. "destinationTable": { # [Required] The destination table @@ -3002,9 +3038,9 @@

Method Details

"fieldDelimiter": "A String", # [Optional] Delimiter to use between fields in the exported data. Default is ','. Not applicable when extracting models. "printHeader": true, # [Optional] Whether to print out a header row in the results. Default is true. Not applicable when extracting models. "sourceModel": { # A reference to the model being exported. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "sourceTable": { # A reference to the table being exported. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -3039,7 +3075,7 @@

Method Details

"A String", ], "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Required] The destination table to load the data into. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -3057,6 +3093,9 @@

Method Details

"encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. "fieldDelimiter": "A String", # [Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). @@ -3152,7 +3191,7 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "destinationEncryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "destinationTable": { # [Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results. This property must be set for large results that exceed the maximum response size. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -3250,11 +3289,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -3380,15 +3425,15 @@

Method Details

"projectId": "A String", # [Optional] The ID of the project containing this dataset. }, "ddlTargetRoutine": { # The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "ddlTargetRowAccessPolicy": { # [Output only] [Preview] The DDL target row access policy. Present only for CREATE/DROP ROW ACCESS POLICY queries. - "datasetId": "A String", # [Required] The ID of the dataset containing this row access policy. - "policyId": "A String", # [Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. - "projectId": "A String", # [Required] The ID of the project containing this row access policy. - "tableId": "A String", # [Required] The ID of the table containing this row access policy. + "datasetId": "A String", # Required. The ID of the dataset containing this row access policy. + "policyId": "A String", # Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "projectId": "A String", # Required. The ID of the project containing this row access policy. + "tableId": "A String", # Required. The ID of the table containing this row access policy. }, "ddlTargetTable": { # [Output only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. "datasetId": "A String", # [Required] The ID of the dataset containing this table. @@ -3465,9 +3510,9 @@

Method Details

], "referencedRoutines": [ # [Output only] Referenced routines (persistent user-defined functions and stored procedures) for the job. { - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, ], "referencedTables": [ # [Output only] Referenced tables for the job. Queries that reference more than 50 tables will not have a complete list. diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html index 13f09ec3bf3..328fba97b37 100644 --- a/docs/dyn/bigquery_v2.models.html +++ b/docs/dyn/bigquery_v2.models.html @@ -127,7 +127,7 @@

Method Details

"defaultTrialId": "A String", # Output only. The default trial_id to use in TVFs when the trial_id is not passed in. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the best trial ID. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the smallest trial ID among all Pareto optimal trials. "description": "A String", # Optional. A user-friendly description of this model. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). This shows the encryption configuration of the model data while stored in BigQuery storage. This field can be used with PatchModel to update encryption key for an already encrypted model. - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # Output only. A hash of this resource. "expirationTime": "A String", # Optional. The time when this model expires, in milliseconds since the epoch. If not present, the model will persist indefinitely. Expired models will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created models. @@ -655,6 +655,7 @@

Method Details

}, "hparams": { # Options used in model training. # The hyperprameters selected for this trial. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -690,6 +691,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -724,6 +726,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -736,6 +739,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "startTimeMs": "A String", # Starting time of the trial. "status": "A String", # The status of the trial. @@ -763,14 +767,20 @@

Method Details

"lastModifiedTime": "A String", # Output only. The time when this model was last modified, in millisecs since the epoch. "location": "A String", # Output only. The geographic location where the model resides. This value is inherited from the dataset. "modelReference": { # Required. Unique identifier for this model. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "modelType": "A String", # Output only. Type of the model resource. "optimalTrialIds": [ # Output only. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it only contains the best trial. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it contains all Pareto optimal trials sorted by trial_id. "A String", ], + "remoteModelInfo": { # Remote Model Info # Output only. Remote model info + "connection": "A String", # Output only. Fully qualified name of the user-provided connection object of the remote model. Format: ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + "endpoint": "A String", # Output only. The endpoint for remote model. + "maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. + "remoteServiceType": "A String", # Output only. The remote service type for remote model. + }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. "classLevelGlobalExplanations": [ # Output only. Global explanation contains the explanation of top features on the class level. Applies to classification models only. @@ -967,6 +977,7 @@

Method Details

"startTime": "A String", # Output only. The start time of this training run. "trainingOptions": { # Options used in model training. # Output only. Options that were used for this training run, includes user specified and default options that were used. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -1002,6 +1013,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -1036,6 +1048,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -1048,6 +1061,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "trainingStartTime": "A String", # Output only. The start time of this training run, in milliseconds since epoch. "vertexAiModelId": "A String", # The model id in the [Vertex AI Model Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) for this training run. @@ -1078,7 +1092,7 @@

Method Details

"defaultTrialId": "A String", # Output only. The default trial_id to use in TVFs when the trial_id is not passed in. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the best trial ID. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the smallest trial ID among all Pareto optimal trials. "description": "A String", # Optional. A user-friendly description of this model. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). This shows the encryption configuration of the model data while stored in BigQuery storage. This field can be used with PatchModel to update encryption key for an already encrypted model. - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # Output only. A hash of this resource. "expirationTime": "A String", # Optional. The time when this model expires, in milliseconds since the epoch. If not present, the model will persist indefinitely. Expired models will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created models. @@ -1606,6 +1620,7 @@

Method Details

}, "hparams": { # Options used in model training. # The hyperprameters selected for this trial. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -1641,6 +1656,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -1675,6 +1691,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -1687,6 +1704,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "startTimeMs": "A String", # Starting time of the trial. "status": "A String", # The status of the trial. @@ -1714,14 +1732,20 @@

Method Details

"lastModifiedTime": "A String", # Output only. The time when this model was last modified, in millisecs since the epoch. "location": "A String", # Output only. The geographic location where the model resides. This value is inherited from the dataset. "modelReference": { # Required. Unique identifier for this model. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "modelType": "A String", # Output only. Type of the model resource. "optimalTrialIds": [ # Output only. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it only contains the best trial. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it contains all Pareto optimal trials sorted by trial_id. "A String", ], + "remoteModelInfo": { # Remote Model Info # Output only. Remote model info + "connection": "A String", # Output only. Fully qualified name of the user-provided connection object of the remote model. Format: ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + "endpoint": "A String", # Output only. The endpoint for remote model. + "maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. + "remoteServiceType": "A String", # Output only. The remote service type for remote model. + }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. "classLevelGlobalExplanations": [ # Output only. Global explanation contains the explanation of top features on the class level. Applies to classification models only. @@ -1918,6 +1942,7 @@

Method Details

"startTime": "A String", # Output only. The start time of this training run. "trainingOptions": { # Options used in model training. # Output only. Options that were used for this training run, includes user specified and default options that were used. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -1953,6 +1978,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -1987,6 +2013,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -1999,6 +2026,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "trainingStartTime": "A String", # Output only. The start time of this training run, in milliseconds since epoch. "vertexAiModelId": "A String", # The model id in the [Vertex AI Model Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) for this training run. @@ -2042,7 +2070,7 @@

Method Details

"defaultTrialId": "A String", # Output only. The default trial_id to use in TVFs when the trial_id is not passed in. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the best trial ID. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the smallest trial ID among all Pareto optimal trials. "description": "A String", # Optional. A user-friendly description of this model. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). This shows the encryption configuration of the model data while stored in BigQuery storage. This field can be used with PatchModel to update encryption key for an already encrypted model. - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # Output only. A hash of this resource. "expirationTime": "A String", # Optional. The time when this model expires, in milliseconds since the epoch. If not present, the model will persist indefinitely. Expired models will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created models. @@ -2570,6 +2598,7 @@

Method Details

}, "hparams": { # Options used in model training. # The hyperprameters selected for this trial. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -2605,6 +2634,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -2639,6 +2669,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -2651,6 +2682,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "startTimeMs": "A String", # Starting time of the trial. "status": "A String", # The status of the trial. @@ -2678,14 +2710,20 @@

Method Details

"lastModifiedTime": "A String", # Output only. The time when this model was last modified, in millisecs since the epoch. "location": "A String", # Output only. The geographic location where the model resides. This value is inherited from the dataset. "modelReference": { # Required. Unique identifier for this model. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "modelType": "A String", # Output only. Type of the model resource. "optimalTrialIds": [ # Output only. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it only contains the best trial. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it contains all Pareto optimal trials sorted by trial_id. "A String", ], + "remoteModelInfo": { # Remote Model Info # Output only. Remote model info + "connection": "A String", # Output only. Fully qualified name of the user-provided connection object of the remote model. Format: ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + "endpoint": "A String", # Output only. The endpoint for remote model. + "maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. + "remoteServiceType": "A String", # Output only. The remote service type for remote model. + }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. "classLevelGlobalExplanations": [ # Output only. Global explanation contains the explanation of top features on the class level. Applies to classification models only. @@ -2882,6 +2920,7 @@

Method Details

"startTime": "A String", # Output only. The start time of this training run. "trainingOptions": { # Options used in model training. # Output only. Options that were used for this training run, includes user specified and default options that were used. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -2917,6 +2956,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -2951,6 +2991,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -2963,6 +3004,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "trainingStartTime": "A String", # Output only. The start time of this training run, in milliseconds since epoch. "vertexAiModelId": "A String", # The model id in the [Vertex AI Model Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) for this training run. @@ -2981,7 +3023,7 @@

Method Details

"defaultTrialId": "A String", # Output only. The default trial_id to use in TVFs when the trial_id is not passed in. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the best trial ID. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, this is the smallest trial ID among all Pareto optimal trials. "description": "A String", # Optional. A user-friendly description of this model. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). This shows the encryption configuration of the model data while stored in BigQuery storage. This field can be used with PatchModel to update encryption key for an already encrypted model. - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # Output only. A hash of this resource. "expirationTime": "A String", # Optional. The time when this model expires, in milliseconds since the epoch. If not present, the model will persist indefinitely. Expired models will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created models. @@ -3509,6 +3551,7 @@

Method Details

}, "hparams": { # Options used in model training. # The hyperprameters selected for this trial. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -3544,6 +3587,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -3578,6 +3622,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -3590,6 +3635,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "startTimeMs": "A String", # Starting time of the trial. "status": "A String", # The status of the trial. @@ -3617,14 +3663,20 @@

Method Details

"lastModifiedTime": "A String", # Output only. The time when this model was last modified, in millisecs since the epoch. "location": "A String", # Output only. The geographic location where the model resides. This value is inherited from the dataset. "modelReference": { # Required. Unique identifier for this model. - "datasetId": "A String", # [Required] The ID of the dataset containing this model. - "modelId": "A String", # [Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. - "projectId": "A String", # [Required] The ID of the project containing this model. + "datasetId": "A String", # Required. The ID of the dataset containing this model. + "modelId": "A String", # Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. + "projectId": "A String", # Required. The ID of the project containing this model. }, "modelType": "A String", # Output only. Type of the model resource. "optimalTrialIds": [ # Output only. For single-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it only contains the best trial. For multi-objective [hyperparameter tuning](/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) models, it contains all Pareto optimal trials sorted by trial_id. "A String", ], + "remoteModelInfo": { # Remote Model Info # Output only. Remote model info + "connection": "A String", # Output only. Fully qualified name of the user-provided connection object of the remote model. Format: ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + "endpoint": "A String", # Output only. The endpoint for remote model. + "maxBatchingRows": "A String", # Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically. + "remoteServiceType": "A String", # Output only. The remote service type for remote model. + }, "trainingRuns": [ # Information for all training runs in increasing order of start_time. { # Information about a single training query run for the model. "classLevelGlobalExplanations": [ # Output only. Global explanation contains the explanation of top features on the class level. Applies to classification models only. @@ -3821,6 +3873,7 @@

Method Details

"startTime": "A String", # Output only. The start time of this training run. "trainingOptions": { # Options used in model training. # Output only. Options that were used for this training run, includes user specified and default options that were used. "adjustStepChanges": True or False, # If true, detect step changes and make data adjustment in the input time series. + "approxGlobalFeatureContrib": True or False, # Whether to use approximate feature contribution method in XGBoost model explanation for global explain. "autoArima": True or False, # Whether to enable auto ARIMA or not. "autoArimaMaxOrder": "A String", # The max value of the sum of non-seasonal p and q. "autoArimaMinOrder": "A String", # The min value of the sum of non-seasonal p and q. @@ -3856,6 +3909,7 @@

Method Details

"inputLabelColumns": [ # Name of input label columns in training data. "A String", ], + "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. @@ -3890,6 +3944,7 @@

Method Details

"preserveInputStructs": True or False, # Whether to preserve the input structs in output feature names. Suppose there is a struct A with field b. When false (default), the output feature name is A_b. When true, the output feature name is A.b. "sampledShapleyNumPaths": "A String", # Number of paths for the sampled Shapley explain method. "subsample": 3.14, # Subsample fraction of the training data to grow tree to prevent overfitting for boosted tree models. + "tfVersion": "A String", # Based on the selected TF version, the corresponding docker image is used to train external models. "timeSeriesDataColumn": "A String", # Column to be designated as time series data for ARIMA model. "timeSeriesIdColumn": "A String", # The time series id column that was used during ARIMA model training. "timeSeriesIdColumns": [ # The time series id columns that were used during ARIMA model training. @@ -3902,6 +3957,7 @@

Method Details

"userColumn": "A String", # User column specified for matrix factorization models. "walsAlpha": 3.14, # Hyperparameter for matrix factoration when implicit feedback type is specified. "warmStart": True or False, # Whether to train a model from the last checkpoint. + "xgboostVersion": "A String", # User-selected XGBoost versions for training of XGBoost models. }, "trainingStartTime": "A String", # Output only. The start time of this training run, in milliseconds since epoch. "vertexAiModelId": "A String", # The model id in the [Vertex AI Model Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) for this training run. diff --git a/docs/dyn/bigquery_v2.routines.html b/docs/dyn/bigquery_v2.routines.html index 9af1c46ae66..aae1690c2bd 100644 --- a/docs/dyn/bigquery_v2.routines.html +++ b/docs/dyn/bigquery_v2.routines.html @@ -186,9 +186,9 @@

Method Details

"typeKind": "A String", # Required. The top level type of this field. Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). }, "routineReference": { # Required. Reference describing the ID of this routine. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "routineType": "A String", # Required. The type of routine. "sparkOptions": { # Options for a user-defined Spark routine. # Optional. Spark specific options. @@ -288,9 +288,9 @@

Method Details

"typeKind": "A String", # Required. The top level type of this field. Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). }, "routineReference": { # Required. Reference describing the ID of this routine. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "routineType": "A String", # Required. The type of routine. "sparkOptions": { # Options for a user-defined Spark routine. # Optional. Spark specific options. @@ -383,9 +383,9 @@

Method Details

"typeKind": "A String", # Required. The top level type of this field. Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). }, "routineReference": { # Required. Reference describing the ID of this routine. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "routineType": "A String", # Required. The type of routine. "sparkOptions": { # Options for a user-defined Spark routine. # Optional. Spark specific options. @@ -421,7 +421,7 @@

Method Details

Args: projectId: string, Required. Project ID of the routines to list (required) datasetId: string, Required. Dataset ID of the routines to list (required) - filter: string, If set, then only the Routines matching this filter are returned. The current supported form is either "routine_type:" or "routineType:", where is a RoutineType enum. Example: "routineType:SCALAR_FUNCTION". + filter: string, If set, then only the Routines matching this filter are returned. The supported format is `routineType:{RoutineType}`, where `{RoutineType}` is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. maxResults: integer, The maximum number of results to return in a single response page. Leverage the page tokens to iterate through the entire collection. pageToken: string, Page token, returned by a previous call, to request the next page of results readMask: string, If set, then only the Routine fields in the field mask, as well as project_id, dataset_id and routine_id, are returned in the response. If unset, then the following Routine fields are returned: etag, project_id, dataset_id, routine_id, routine_type, creation_time, last_modified_time, and language. @@ -493,9 +493,9 @@

Method Details

"typeKind": "A String", # Required. The top level type of this field. Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). }, "routineReference": { # Required. Reference describing the ID of this routine. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "routineType": "A String", # Required. The type of routine. "sparkOptions": { # Options for a user-defined Spark routine. # Optional. Spark specific options. @@ -612,9 +612,9 @@

Method Details

"typeKind": "A String", # Required. The top level type of this field. Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). }, "routineReference": { # Required. Reference describing the ID of this routine. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "routineType": "A String", # Required. The type of routine. "sparkOptions": { # Options for a user-defined Spark routine. # Optional. Spark specific options. @@ -707,9 +707,9 @@

Method Details

"typeKind": "A String", # Required. The top level type of this field. Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). }, "routineReference": { # Required. Reference describing the ID of this routine. - "datasetId": "A String", # [Required] The ID of the dataset containing this routine. - "projectId": "A String", # [Required] The ID of the project containing this routine. - "routineId": "A String", # [Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "datasetId": "A String", # Required. The ID of the dataset containing this routine. + "projectId": "A String", # Required. The ID of the project containing this routine. + "routineId": "A String", # Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. }, "routineType": "A String", # Required. The type of routine. "sparkOptions": { # Options for a user-defined Spark routine. # Optional. Spark specific options. diff --git a/docs/dyn/bigquery_v2.rowAccessPolicies.html b/docs/dyn/bigquery_v2.rowAccessPolicies.html index e9b051adc4f..3b82494542a 100644 --- a/docs/dyn/bigquery_v2.rowAccessPolicies.html +++ b/docs/dyn/bigquery_v2.rowAccessPolicies.html @@ -173,10 +173,10 @@

Method Details

"filterPredicate": "A String", # Required. A SQL boolean expression that represents the rows defined by this row access policy, similar to the boolean expression in a WHERE clause of a SELECT query on a table. References to other tables, routines, and temporary functions are not supported. Examples: region="EU" date_field = CAST('2019-9-27' as DATE) nullable_field is not NULL numeric_field BETWEEN 1.0 AND 5.0 "lastModifiedTime": "A String", # Output only. The time when this row access policy was last modified, in milliseconds since the epoch. "rowAccessPolicyReference": { # Required. Reference describing the ID of this row access policy. - "datasetId": "A String", # [Required] The ID of the dataset containing this row access policy. - "policyId": "A String", # [Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. - "projectId": "A String", # [Required] The ID of the project containing this row access policy. - "tableId": "A String", # [Required] The ID of the table containing this row access policy. + "datasetId": "A String", # Required. The ID of the dataset containing this row access policy. + "policyId": "A String", # Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters. + "projectId": "A String", # Required. The ID of the project containing this row access policy. + "tableId": "A String", # Required. The ID of the table containing this row access policy. }, }, ], diff --git a/docs/dyn/bigquery_v2.tables.html b/docs/dyn/bigquery_v2.tables.html index 179142b4187..0a80c9358a2 100644 --- a/docs/dyn/bigquery_v2.tables.html +++ b/docs/dyn/bigquery_v2.tables.html @@ -162,7 +162,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -213,11 +213,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -312,18 +318,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -376,6 +382,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. @@ -483,7 +512,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -534,11 +563,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -633,18 +668,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -697,6 +732,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. @@ -745,7 +803,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -796,11 +854,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -895,18 +959,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -959,6 +1023,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. @@ -1090,7 +1177,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -1141,11 +1228,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -1240,18 +1333,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -1304,6 +1397,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. @@ -1353,7 +1469,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -1404,11 +1520,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -1503,18 +1625,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -1567,6 +1689,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. @@ -1730,7 +1875,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -1781,11 +1926,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -1880,18 +2031,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -1944,6 +2095,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. @@ -1993,7 +2167,7 @@

Method Details

"defaultRoundingMode": "A String", # [Output-only] The default rounding mode of the table. "description": "A String", # [Optional] A user-friendly description of this table. "encryptionConfiguration": { # Custom encryption configuration (e.g., Cloud KMS keys). - "kmsKeyName": "A String", # [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. + "kmsKeyName": "A String", # Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. }, "etag": "A String", # [Output-only] A hash of the table metadata. Used to ensure there were no concurrent modifications to the resource when attempting an update. Not guaranteed to change when the table contents or the fields numRows, numBytes, numLongTermBytes or lastModifiedTime change. "expirationTime": "A String", # [Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed. The defaultTableExpirationMs property of the encapsulating dataset can be used to set a default expirationTime on newly created tables. @@ -2044,11 +2218,17 @@

Method Details

"skipLeadingRows": "A String", # [Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema. }, "hivePartitioningOptions": { # [Optional] Options to configure hive partitioning support. + "fields": [ # [Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field. + "A String", + ], "mode": "A String", # [Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet. "requirePartitionFilter": True or False, # [Optional] If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. Note that this field should only be true when creating a permanent external table or querying a temporary external table. Hive-partitioned loads with requirePartitionFilter explicitly set to true will fail. "sourceUriPrefix": "A String", # [Optional] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter). }, "ignoreUnknownValues": True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored. + "jsonOptions": { # Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`. + "encoding": "A String", # [Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. + }, "maxBadRecords": 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. "metadataCacheMode": "A String", # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source. "objectMetadata": "A String", # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type. @@ -2143,18 +2323,18 @@

Method Details

}, ], }, + "numActiveLogicalBytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. + "numActivePhysicalBytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numBytes": "A String", # [Output-only] The size of this table in bytes, excluding any data in the streaming buffer. "numLongTermBytes": "A String", # [Output-only] The number of bytes in the table that are considered "long-term storage". + "numLongTermLogicalBytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. + "numLongTermPhysicalBytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numPartitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "numPhysicalBytes": "A String", # [Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel. "numRows": "A String", # [Output-only] The number of rows of data in this table, excluding any data in the streaming buffer. - "num_active_logical_bytes": "A String", # [Output-only] Number of logical bytes that are less than 90 days old. - "num_active_physical_bytes": "A String", # [Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_long_term_logical_bytes": "A String", # [Output-only] Number of logical bytes that are more than 90 days old. - "num_long_term_physical_bytes": "A String", # [Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_partitions": "A String", # [Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_time_travel_physical_bytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. - "num_total_logical_bytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. - "num_total_physical_bytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTimeTravelPhysicalBytes": "A String", # [Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes. + "numTotalLogicalBytes": "A String", # [Output-only] Total number of logical bytes in the table or materialized view. + "numTotalPhysicalBytes": "A String", # [Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes. "rangePartitioning": { # [TrustedTester] Range partitioning specification for this table. Only one of timePartitioning and rangePartitioning should be specified. "field": "A String", # [TrustedTester] [Required] The table is partitioned by this field. The field must be a top-level NULLABLE/REQUIRED field. The only supported type is INTEGER/INT64. "range": { # [TrustedTester] [Required] Defines the ranges for range partitioning. @@ -2207,6 +2387,29 @@

Method Details

"estimatedRows": "A String", # [Output-only] A lower-bound estimate of the number of rows currently in the streaming buffer. "oldestEntryTime": "A String", # [Output-only] Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. }, + "tableConstraints": { # [Optional] The table constraints on the table. + "foreignKeys": [ # [Optional] The foreign keys of the tables. + { + "columnReferences": [ + { + "referencedColumn": "A String", + "referencingColumn": "A String", + }, + ], + "name": "A String", + "referencedTable": { + "datasetId": "A String", + "projectId": "A String", + "tableId": "A String", + }, + }, + ], + "primaryKey": { # [Optional] The primary key of the table. + "columns": [ + "A String", + ], + }, + }, "tableReference": { # [Required] Reference describing the ID of this table. "datasetId": "A String", # [Required] The ID of the dataset containing this table. "projectId": "A String", # [Required] The ID of the project containing this table. diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json index f16da2af04e..49cf0cb4386 100644 --- a/googleapiclient/discovery_cache/documents/bigquery.v2.json +++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json @@ -1011,7 +1011,7 @@ "type": "string" }, "filter": { - "description": "If set, then only the Routines matching this filter are returned. The current supported form is either \"routine_type:\" or \"routineType:\", where is a RoutineType enum. Example: \"routineType:SCALAR_FUNCTION\".", + "description": "If set, then only the Routines matching this filter are returned. The supported format is `routineType:{RoutineType}`, where `{RoutineType}` is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`.", "location": "query", "type": "string" }, @@ -1710,7 +1710,7 @@ } } }, - "revision": "20230408", + "revision": "20230520", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -3105,7 +3105,7 @@ "id": "EncryptionConfiguration", "properties": { "kmsKeyName": { - "description": "[Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.", + "description": "Optional. Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key.", "type": "string" } }, @@ -3445,6 +3445,10 @@ "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.", "type": "boolean" }, + "jsonOptions": { + "$ref": "JsonOptions", + "description": "Additional properties to set if `sourceFormat` is set to `NEWLINE_DELIMITED_JSON`." + }, "maxBadRecords": { "description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.", "format": "int32", @@ -3642,6 +3646,13 @@ "HivePartitioningOptions": { "id": "HivePartitioningOptions", "properties": { + "fields": { + "description": "[Output-only] For permanent external tables, this field is populated with the hive partition keys in the order they were inferred. The types of the partition keys can be deduced by checking the table schema (which will include the partition keys). Not every API will populate this field in the output. For example, Tables.Get will populate it, but Tables.List will not contain this field.", + "items": { + "type": "string" + }, + "type": "array" + }, "mode": { "description": "[Optional] When set, what mode of hive partitioning to use when reading data. The following modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. (3) CUSTOM: partition key schema is encoded in the source URI prefix. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.", "type": "string" @@ -4906,6 +4917,16 @@ "id": "JsonObject", "type": "object" }, + "JsonOptions": { + "id": "JsonOptions", + "properties": { + "encoding": { + "description": "[Optional] The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.", + "type": "string" + } + }, + "type": "object" + }, "JsonValue": { "id": "JsonValue", "type": "any" @@ -5128,6 +5149,7 @@ "DNN_CLASSIFIER", "TENSORFLOW", "DNN_REGRESSOR", + "XGBOOST", "BOOSTED_TREE_REGRESSOR", "BOOSTED_TREE_CLASSIFIER", "ARIMA", @@ -5138,8 +5160,11 @@ "DNN_LINEAR_COMBINED_REGRESSOR", "AUTOENCODER", "ARIMA_PLUS", + "ARIMA_PLUS_XREG", "RANDOM_FOREST_REGRESSOR", - "RANDOM_FOREST_CLASSIFIER" + "RANDOM_FOREST_CLASSIFIER", + "TENSORFLOW_LITE", + "ONNX" ], "enumDescriptions": [ "", @@ -5150,6 +5175,7 @@ "DNN classifier model.", "An imported TensorFlow model.", "DNN regressor model.", + "An imported XGBoost model.", "Boosted tree regressor model.", "Boosted tree classifier model.", "ARIMA model.", @@ -5160,8 +5186,11 @@ "Wide-and-deep regressor model.", "Autoencoder model.", "New name for the ARIMA model.", - "Random Forest regressor model.", - "Random Forest classifier model." + "ARIMA with external regressors.", + "Random forest regressor model.", + "Random forest classifier model.", + "An imported TensorFlow Lite model.", + "An imported ONNX model." ], "readOnly": true, "type": "string" @@ -5175,6 +5204,11 @@ "readOnly": true, "type": "array" }, + "remoteModelInfo": { + "$ref": "RemoteModelInfo", + "description": "Output only. Remote model info", + "readOnly": true + }, "trainingRuns": { "description": "Information for all training runs in increasing order of start_time.", "items": { @@ -5220,15 +5254,15 @@ "id": "ModelReference", "properties": { "datasetId": { - "description": "[Required] The ID of the dataset containing this model.", + "description": "Required. The ID of the dataset containing this model.", "type": "string" }, "modelId": { - "description": "[Required] The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", + "description": "Required. The ID of the model. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.", "type": "string" }, "projectId": { - "description": "[Required] The ID of the project containing this model.", + "description": "Required. The ID of the project containing this model.", "type": "string" } }, @@ -5785,6 +5819,46 @@ }, "type": "object" }, + "RemoteModelInfo": { + "description": "Remote Model Info", + "id": "RemoteModelInfo", + "properties": { + "connection": { + "description": "Output only. Fully qualified name of the user-provided connection object of the remote model. Format: ```\"projects/{project_id}/locations/{location_id}/connections/{connection_id}\"```", + "readOnly": true, + "type": "string" + }, + "endpoint": { + "description": "Output only. The endpoint for remote model.", + "readOnly": true, + "type": "string" + }, + "maxBatchingRows": { + "description": "Output only. Max number of rows in each batch sent to the remote service. If unset, the number of rows in each batch is set dynamically.", + "format": "int64", + "readOnly": true, + "type": "string" + }, + "remoteServiceType": { + "description": "Output only. The remote service type for remote model.", + "enum": [ + "REMOTE_SERVICE_TYPE_UNSPECIFIED", + "CLOUD_AI_TRANSLATE_V3", + "CLOUD_AI_VISION_V1", + "CLOUD_AI_NATURAL_LANGUAGE_V1" + ], + "enumDescriptions": [ + "Unspecified remote service type.", + "V3 Cloud AI Translation API. See more details at [Cloud Translation API] (https://cloud.google.com/translate/docs/reference/rest).", + "V1 Cloud AI Vision API See more details at [Cloud Vision API] (https://cloud.google.com/vision/docs/reference/rest).", + "V1 Cloud AI Natural Language API. See more details at [REST Resource: documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents)." + ], + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Routine": { "description": "A user-defined function or a stored procedure.", "id": "Routine", @@ -5884,13 +5958,15 @@ "ROUTINE_TYPE_UNSPECIFIED", "SCALAR_FUNCTION", "PROCEDURE", - "TABLE_VALUED_FUNCTION" + "TABLE_VALUED_FUNCTION", + "AGGREGATE_FUNCTION" ], "enumDescriptions": [ "", - "Non-builtin permanent scalar function.", + "Non-built-in persistent scalar function.", "Stored procedure.", - "Non-builtin permanent TVF." + "Non-built-in persistent TVF.", + "Non-built-in persistent aggregate function." ], "type": "string" }, @@ -5909,15 +5985,15 @@ "id": "RoutineReference", "properties": { "datasetId": { - "description": "[Required] The ID of the dataset containing this routine.", + "description": "Required. The ID of the dataset containing this routine.", "type": "string" }, "projectId": { - "description": "[Required] The ID of the project containing this routine.", + "description": "Required. The ID of the project containing this routine.", "type": "string" }, "routineId": { - "description": "[Required] The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", + "description": "Required. The ID of the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", "type": "string" } }, @@ -5977,19 +6053,19 @@ "id": "RowAccessPolicyReference", "properties": { "datasetId": { - "description": "[Required] The ID of the dataset containing this row access policy.", + "description": "Required. The ID of the dataset containing this row access policy.", "type": "string" }, "policyId": { - "description": "[Required] The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", + "description": "Required. The ID of the row access policy. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.", "type": "string" }, "projectId": { - "description": "[Required] The ID of the project containing this row access policy.", + "description": "Required. The ID of the project containing this row access policy.", "type": "string" }, "tableId": { - "description": "[Required] The ID of the table containing this row access policy.", + "description": "Required. The ID of the table containing this row access policy.", "type": "string" } }, @@ -6435,62 +6511,62 @@ "$ref": "ModelDefinition", "description": "[Output-only, Beta] Present iff this table represents a ML model. Describes the training information for the model, and it is required to run 'PREDICT' queries." }, - "numBytes": { - "description": "[Output-only] The size of this table in bytes, excluding any data in the streaming buffer.", + "numActiveLogicalBytes": { + "description": "[Output-only] Number of logical bytes that are less than 90 days old.", "format": "int64", "type": "string" }, - "numLongTermBytes": { - "description": "[Output-only] The number of bytes in the table that are considered \"long-term storage\".", + "numActivePhysicalBytes": { + "description": "[Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "numPhysicalBytes": { - "description": "[Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel.", + "numBytes": { + "description": "[Output-only] The size of this table in bytes, excluding any data in the streaming buffer.", "format": "int64", "type": "string" }, - "numRows": { - "description": "[Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.", - "format": "uint64", + "numLongTermBytes": { + "description": "[Output-only] The number of bytes in the table that are considered \"long-term storage\".", + "format": "int64", "type": "string" }, - "num_active_logical_bytes": { - "description": "[Output-only] Number of logical bytes that are less than 90 days old.", + "numLongTermLogicalBytes": { + "description": "[Output-only] Number of logical bytes that are more than 90 days old.", "format": "int64", "type": "string" }, - "num_active_physical_bytes": { - "description": "[Output-only] Number of physical bytes less than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", + "numLongTermPhysicalBytes": { + "description": "[Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "num_long_term_logical_bytes": { - "description": "[Output-only] Number of logical bytes that are more than 90 days old.", + "numPartitions": { + "description": "[Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "num_long_term_physical_bytes": { - "description": "[Output-only] Number of physical bytes more than 90 days old. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", + "numPhysicalBytes": { + "description": "[Output-only] [TrustedTester] The physical size of this table in bytes, excluding any data in the streaming buffer. This includes compression and storage used for time travel.", "format": "int64", "type": "string" }, - "num_partitions": { - "description": "[Output-only] The number of partitions present in the table or materialized view. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", - "format": "int64", + "numRows": { + "description": "[Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.", + "format": "uint64", "type": "string" }, - "num_time_travel_physical_bytes": { + "numTimeTravelPhysicalBytes": { "description": "[Output-only] Number of physical bytes used by time travel storage (deleted or changed data). This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" }, - "num_total_logical_bytes": { + "numTotalLogicalBytes": { "description": "[Output-only] Total number of logical bytes in the table or materialized view.", "format": "int64", "type": "string" }, - "num_total_physical_bytes": { + "numTotalPhysicalBytes": { "description": "[Output-only] The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.", "format": "int64", "type": "string" @@ -6520,6 +6596,10 @@ "$ref": "Streamingbuffer", "description": "[Output-only] Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer." }, + "tableConstraints": { + "$ref": "TableConstraints", + "description": "[Optional] The table constraints on the table." + }, "tableReference": { "$ref": "TableReference", "description": "[Required] Reference describing the ID of this table." @@ -6548,6 +6628,64 @@ }, "type": "object" }, + "TableConstraints": { + "id": "TableConstraints", + "properties": { + "foreignKeys": { + "description": "[Optional] The foreign keys of the tables.", + "items": { + "properties": { + "columnReferences": { + "items": { + "properties": { + "referencedColumn": { + "type": "string" + }, + "referencingColumn": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "name": { + "type": "string" + }, + "referencedTable": { + "properties": { + "datasetId": { + "type": "string" + }, + "projectId": { + "type": "string" + }, + "tableId": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + }, + "primaryKey": { + "description": "[Optional] The primary key of the table.", + "properties": { + "columns": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, "TableDataInsertAllRequest": { "id": "TableDataInsertAllRequest", "properties": { @@ -6942,6 +7080,10 @@ "description": "If true, detect step changes and make data adjustment in the input time series.", "type": "boolean" }, + "approxGlobalFeatureContrib": { + "description": "Whether to use approximate feature contribution method in XGBoost model explanation for global explain.", + "type": "boolean" + }, "autoArima": { "description": "Whether to enable auto ARIMA or not.", "type": "boolean" @@ -7352,6 +7494,10 @@ }, "type": "array" }, + "instanceWeightColumn": { + "description": "Name of the instance weight column for training data. This column isn't be used as a feature.", + "type": "string" + }, "integratedGradientsNumSteps": { "description": "Number of integral steps for the integrated gradients explain method.", "format": "int64", @@ -7528,6 +7674,10 @@ "format": "double", "type": "number" }, + "tfVersion": { + "description": "Based on the selected TF version, the corresponding docker image is used to train external models.", + "type": "string" + }, "timeSeriesDataColumn": { "description": "Column to be designated as time series data for ARIMA model.", "type": "string" @@ -7587,6 +7737,10 @@ "warmStart": { "description": "Whether to train a model from the last checkpoint.", "type": "boolean" + }, + "xgboostVersion": { + "description": "User-selected XGBoost versions for training of XGBoost models.", + "type": "string" } }, "type": "object"