Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit e7a8f0e

Browse files
committedOct 25, 2022
feat(bigquery): update the api
#### bigquery:v2 The following keys were added: - schemas.Dataset.properties.storageBillingModel.type (Total Keys: 1) - schemas.ExternalDataConfiguration.properties.metadataCacheMode.type (Total Keys: 1) - schemas.ExternalDataConfiguration.properties.objectMetadata.type (Total Keys: 1)
1 parent 98ef5fc commit e7a8f0e

File tree

4 files changed

+44
-1
lines changed

4 files changed

+44
-1
lines changed
 

‎docs/dyn/bigquery_v2.datasets.html

+7
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,7 @@ <h3>Method Details</h3>
181181
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
182182
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
183183
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
184+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
184185
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
185186
{
186187
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.
@@ -254,6 +255,7 @@ <h3>Method Details</h3>
254255
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
255256
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
256257
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
258+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
257259
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
258260
{
259261
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.
@@ -321,6 +323,7 @@ <h3>Method Details</h3>
321323
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
322324
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
323325
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
326+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
324327
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
325328
{
326329
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.
@@ -445,6 +448,7 @@ <h3>Method Details</h3>
445448
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
446449
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
447450
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
451+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
448452
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
449453
{
450454
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.
@@ -512,6 +516,7 @@ <h3>Method Details</h3>
512516
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
513517
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
514518
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
519+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
515520
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
516521
{
517522
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.
@@ -586,6 +591,7 @@ <h3>Method Details</h3>
586591
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
587592
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
588593
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
594+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
589595
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
590596
{
591597
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.
@@ -653,6 +659,7 @@ <h3>Method Details</h3>
653659
&quot;maxTimeTravelHours&quot;: &quot;A String&quot;, # [Optional] Number of hours for the max time travel for all tables in the dataset.
654660
&quot;satisfiesPzs&quot;: True or False, # [Output-only] Reserved for future use.
655661
&quot;selfLink&quot;: &quot;A String&quot;, # [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
662+
&quot;storageBillingModel&quot;: &quot;A String&quot;, # [Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.
656663
&quot;tags&quot;: [ # [Optional]The tags associated with this dataset. Tag keys are globally unique.
657664
{
658665
&quot;tagKey&quot;: &quot;A String&quot;, # [Required] The namespaced friendly name of the tag key, e.g. &quot;12345/environment&quot; where 12345 is org id.

‎docs/dyn/bigquery_v2.jobs.html

+10
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,8 @@ <h3>Method Details</h3>
409409
},
410410
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
411411
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
412+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
413+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
412414
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
413415
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
414416
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -1102,6 +1104,8 @@ <h3>Method Details</h3>
11021104
},
11031105
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
11041106
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
1107+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
1108+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
11051109
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
11061110
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
11071111
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -1866,6 +1870,8 @@ <h3>Method Details</h3>
18661870
},
18671871
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
18681872
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
1873+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
1874+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
18691875
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
18701876
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
18711877
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -2534,6 +2540,8 @@ <h3>Method Details</h3>
25342540
},
25352541
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
25362542
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
2543+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
2544+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
25372545
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
25382546
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
25392547
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -3225,6 +3233,8 @@ <h3>Method Details</h3>
32253233
},
32263234
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
32273235
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
3236+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
3237+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
32283238
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
32293239
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
32303240
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

‎docs/dyn/bigquery_v2.tables.html

+14
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,8 @@ <h3>Method Details</h3>
218218
},
219219
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
220220
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
221+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
222+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
221223
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
222224
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
223225
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -533,6 +535,8 @@ <h3>Method Details</h3>
533535
},
534536
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
535537
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
538+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
539+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
536540
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
537541
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
538542
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -789,6 +793,8 @@ <h3>Method Details</h3>
789793
},
790794
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
791795
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
796+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
797+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
792798
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
793799
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
794800
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -1128,6 +1134,8 @@ <h3>Method Details</h3>
11281134
},
11291135
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
11301136
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
1137+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
1138+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
11311139
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
11321140
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
11331141
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -1385,6 +1393,8 @@ <h3>Method Details</h3>
13851393
},
13861394
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
13871395
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
1396+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
1397+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
13881398
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
13891399
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
13901400
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -1756,6 +1766,8 @@ <h3>Method Details</h3>
17561766
},
17571767
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
17581768
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
1769+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
1770+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
17591771
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
17601772
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
17611773
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.
@@ -2013,6 +2025,8 @@ <h3>Method Details</h3>
20132025
},
20142026
&quot;ignoreUnknownValues&quot;: True or False, # [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don&#x27;t match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
20152027
&quot;maxBadRecords&quot;: 42, # [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
2028+
&quot;metadataCacheMode&quot;: &quot;A String&quot;, # [Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.
2029+
&quot;objectMetadata&quot;: &quot;A String&quot;, # ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.
20162030
&quot;parquetOptions&quot;: { # Additional properties to set if sourceFormat is set to Parquet.
20172031
&quot;enableListInference&quot;: True or False, # [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type.
20182032
&quot;enumAsString&quot;: True or False, # [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.

‎googleapiclient/discovery_cache/documents/bigquery.v2.json

+13-1
Original file line numberDiff line numberDiff line change
@@ -1710,7 +1710,7 @@
17101710
}
17111711
}
17121712
},
1713-
"revision": "20220924",
1713+
"revision": "20221015",
17141714
"rootUrl": "https://bigquery.googleapis.com/",
17151715
"schemas": {
17161716
"AggregateClassificationMetrics": {
@@ -2856,6 +2856,10 @@
28562856
"description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.",
28572857
"type": "string"
28582858
},
2859+
"storageBillingModel": {
2860+
"description": "[Optional] Storage billing model to be used for all tables in the dataset. Can be set to PHYSICAL. Default is LOGICAL.",
2861+
"type": "string"
2862+
},
28592863
"tags": {
28602864
"description": "[Optional]The tags associated with this dataset. Tag keys are globally unique.",
28612865
"items": {
@@ -3435,6 +3439,14 @@
34353439
"format": "int32",
34363440
"type": "integer"
34373441
},
3442+
"metadataCacheMode": {
3443+
"description": "[Optional] Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.",
3444+
"type": "string"
3445+
},
3446+
"objectMetadata": {
3447+
"description": "ObjectMetadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the source_uris. If ObjectMetadata is set, source_format should be omitted. Currently SIMPLE is the only supported Object Metadata type.",
3448+
"type": "string"
3449+
},
34383450
"parquetOptions": {
34393451
"$ref": "ParquetOptions",
34403452
"description": "Additional properties to set if sourceFormat is set to Parquet."

0 commit comments

Comments
 (0)
Please sign in to comment.