Skip to content

Commit

Permalink
Add extra BigQuery options to DLP inspect job trigger (#6749) (#12980)
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <magic-modules@google.com>

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician committed Nov 8, 2022
1 parent 12d406e commit e623d9f
Show file tree
Hide file tree
Showing 4 changed files with 345 additions and 4 deletions.
3 changes: 3 additions & 0 deletions .changelog/6749.txt
@@ -0,0 +1,3 @@
```release-note:enhancement
dlp: added fields `rows_limit`, `rows_limit_percent`, and `sample_method` to `big_query_options` in `google_data_loss_prevention_job_trigger`
```
99 changes: 99 additions & 0 deletions google/resource_data_loss_prevention_job_trigger.go
Expand Up @@ -225,6 +225,28 @@ Only for use with external storage. Possible values: ["BASIC_COLUMNS", "GCS_COLU
},
},
},
"rows_limit": {
Type: schema.TypeInt,
Optional: true,
Description: `Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted.
If not set, or if set to 0, all rows will be scanned. Only one of rowsLimit and rowsLimitPercent can be
specified. Cannot be used in conjunction with TimespanConfig.`,
},
"rows_limit_percent": {
Type: schema.TypeInt,
Optional: true,
Description: `Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down.
Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of
rowsLimit and rowsLimitPercent can be specified. Cannot be used in conjunction with TimespanConfig.`,
},
"sample_method": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateEnum([]string{"TOP", "RANDOM_START", ""}),
Description: `How to sample rows if not all rows are scanned. Meaningful only when used in conjunction with either
rowsLimit or rowsLimitPercent. If not specified, rows are scanned in the order BigQuery reads them. Default value: "TOP" Possible values: ["TOP", "RANDOM_START"]`,
Default: "TOP",
},
},
},
},
Expand Down Expand Up @@ -1086,6 +1108,12 @@ func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v
transformed := make(map[string]interface{})
transformed["table_reference"] =
flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["tableReference"], d, config)
transformed["rows_limit"] =
flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(original["rowsLimit"], d, config)
transformed["rows_limit_percent"] =
flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(original["rowsLimitPercent"], d, config)
transformed["sample_method"] =
flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(original["sampleMethod"], d, config)
return []interface{}{transformed}
}
func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d *schema.ResourceData, config *Config) interface{} {
Expand Down Expand Up @@ -1117,6 +1145,44 @@ func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTa
return v
}

func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(v interface{}, d *schema.ResourceData, config *Config) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := stringToFixed64(strVal); err == nil {
return intVal
}
}

// number values are represented as float64
if floatVal, ok := v.(float64); ok {
intVal := int(floatVal)
return intVal
}

return v // let terraform core handle it otherwise
}

func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(v interface{}, d *schema.ResourceData, config *Config) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := stringToFixed64(strVal); err == nil {
return intVal
}
}

// number values are represented as float64
if floatVal, ok := v.(float64); ok {
intVal := int(floatVal)
return intVal
}

return v // let terraform core handle it otherwise
}

func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(v interface{}, d *schema.ResourceData, config *Config) interface{} {
return v
}

func flattenDataLossPreventionJobTriggerInspectJobActions(v interface{}, d *schema.ResourceData, config *Config) interface{} {
if v == nil {
return v
Expand Down Expand Up @@ -1671,6 +1737,27 @@ func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v
transformed["tableReference"] = transformedTableReference
}

transformedRowsLimit, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(original["rows_limit"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedRowsLimit); val.IsValid() && !isEmptyValue(val) {
transformed["rowsLimit"] = transformedRowsLimit
}

transformedRowsLimitPercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(original["rows_limit_percent"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedRowsLimitPercent); val.IsValid() && !isEmptyValue(val) {
transformed["rowsLimitPercent"] = transformedRowsLimitPercent
}

transformedSampleMethod, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(original["sample_method"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedSampleMethod); val.IsValid() && !isEmptyValue(val) {
transformed["sampleMethod"] = transformedSampleMethod
}

return transformed, nil
}

Expand Down Expand Up @@ -1719,6 +1806,18 @@ func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTab
return v, nil
}

func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}

func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsRowsLimitPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}

func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}

func expandDataLossPreventionJobTriggerInspectJobActions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
Expand Down
140 changes: 138 additions & 2 deletions google/resource_data_loss_prevention_job_trigger_generated_test.go
Expand Up @@ -68,8 +68,8 @@ resource "google_data_loss_prevention_job_trigger" "basic" {
save_findings {
output_config {
table {
project_id = "asdf"
dataset_id = "asdf"
project_id = "project"
dataset_id = "dataset"
}
}
}
Expand All @@ -86,6 +86,142 @@ resource "google_data_loss_prevention_job_trigger" "basic" {
`, context)
}

func TestAccDataLossPreventionJobTrigger_dlpJobTriggerBigqueryRowLimitExample(t *testing.T) {
t.Parallel()

context := map[string]interface{}{
"project": getTestProjectFromEnv(),
"random_suffix": randString(t, 10),
}

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDataLossPreventionJobTriggerDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccDataLossPreventionJobTrigger_dlpJobTriggerBigqueryRowLimitExample(context),
},
{
ResourceName: "google_data_loss_prevention_job_trigger.bigquery_row_limit",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"parent"},
},
},
})
}

func testAccDataLossPreventionJobTrigger_dlpJobTriggerBigqueryRowLimitExample(context map[string]interface{}) string {
return Nprintf(`
resource "google_data_loss_prevention_job_trigger" "bigquery_row_limit" {
parent = "projects/%{project}"
description = "Description"
display_name = "Displayname"
triggers {
schedule {
recurrence_period_duration = "86400s"
}
}
inspect_job {
inspect_template_name = "fake"
actions {
save_findings {
output_config {
table {
project_id = "project"
dataset_id = "dataset"
}
}
}
}
storage_config {
big_query_options {
table_reference {
project_id = "project"
dataset_id = "dataset"
table_id = "table_to_scan"
}
rows_limit = 1000
sample_method = "RANDOM_START"
}
}
}
}
`, context)
}

func TestAccDataLossPreventionJobTrigger_dlpJobTriggerBigqueryRowLimitPercentageExample(t *testing.T) {
t.Parallel()

context := map[string]interface{}{
"project": getTestProjectFromEnv(),
"random_suffix": randString(t, 10),
}

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDataLossPreventionJobTriggerDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccDataLossPreventionJobTrigger_dlpJobTriggerBigqueryRowLimitPercentageExample(context),
},
{
ResourceName: "google_data_loss_prevention_job_trigger.bigquery_row_limit_percentage",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"parent"},
},
},
})
}

func testAccDataLossPreventionJobTrigger_dlpJobTriggerBigqueryRowLimitPercentageExample(context map[string]interface{}) string {
return Nprintf(`
resource "google_data_loss_prevention_job_trigger" "bigquery_row_limit_percentage" {
parent = "projects/%{project}"
description = "Description"
display_name = "Displayname"
triggers {
schedule {
recurrence_period_duration = "86400s"
}
}
inspect_job {
inspect_template_name = "fake"
actions {
save_findings {
output_config {
table {
project_id = "project"
dataset_id = "dataset"
}
}
}
}
storage_config {
big_query_options {
table_reference {
project_id = "project"
dataset_id = "dataset"
table_id = "table_to_scan"
}
rows_limit_percent = 50
sample_method = "RANDOM_START"
}
}
}
}
`, context)
}

func testAccCheckDataLossPreventionJobTriggerDestroyProducer(t *testing.T) func(s *terraform.State) error {
return func(s *terraform.State) error {
for name, rs := range s.RootModule().Resources {
Expand Down

0 comments on commit e623d9f

Please sign in to comment.