diff --git a/.changelog/6742.txt b/.changelog/6742.txt new file mode 100644 index 00000000000..6ac623ea0e4 --- /dev/null +++ b/.changelog/6742.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +`google_filestore_backup` +``` diff --git a/google/provider.go b/google/provider.go index 0228b81dc1d..70d381adbb7 100644 --- a/google/provider.go +++ b/google/provider.go @@ -934,9 +934,9 @@ func Provider() *schema.Provider { return provider } -// Generated resources: 250 +// Generated resources: 251 // Generated IAM resources: 156 -// Total generated resources: 406 +// Total generated resources: 407 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -1177,6 +1177,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_essential_contacts_contact": resourceEssentialContactsContact(), "google_filestore_instance": resourceFilestoreInstance(), "google_filestore_snapshot": resourceFilestoreSnapshot(), + "google_filestore_backup": resourceFilestoreBackup(), "google_firestore_index": resourceFirestoreIndex(), "google_firestore_document": resourceFirestoreDocument(), "google_game_services_realm": resourceGameServicesRealm(), diff --git a/google/resource_filestore_backup.go b/google/resource_filestore_backup.go new file mode 100644 index 00000000000..f6e5b38636c --- /dev/null +++ b/google/resource_filestore_backup.go @@ -0,0 +1,528 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceFilestoreBackup() *schema.Resource { + return &schema.Resource{ + Create: resourceFilestoreBackupCreate, + Read: resourceFilestoreBackupRead, + Update: resourceFilestoreBackupUpdate, + Delete: resourceFilestoreBackupDelete, + + Importer: &schema.ResourceImporter{ + State: resourceFilestoreBackupImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the location of the instance. This can be a region for ENTERPRISE tier instances.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the backup. The name must be unique within the specified instance. + +The name must be 1-63 characters long, and comply with +RFC1035. Specifically, the name must be 1-63 characters long and match +the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the +first character must be a lowercase letter, and all following +characters must be a dash, lowercase letter, or digit, except the last +character, which cannot be a dash.`, + }, + "source_file_share": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the file share in the source Cloud Filestore instance that the backup is created from.`, + }, + "source_instance": { + Type: schema.TypeString, + Required: true, + Description: `The resource name of the source Cloud Filestore instance, in the format projects/{projectId}/locations/{locationId}/instances/{instanceId}, used to create this backup.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Resource labels to represent user-provided metadata.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "capacity_gb": { + Type: schema.TypeString, + Computed: true, + Description: `The amount of bytes needed to allocate a full copy of the snapshot content.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time when the snapshot was created in RFC3339 text format.`, + }, + "download_bytes": { + Type: schema.TypeString, + Computed: true, + Description: `Amount of bytes that will be downloaded if the backup is restored.`, + }, + "kms_key_name": { + Type: schema.TypeString, + Computed: true, + Description: `KMS key name used for data encryption.`, + }, + "source_instance_tier": { + Type: schema.TypeString, + Computed: true, + Description: `The service tier of the source Cloud Filestore instance that this backup is created from.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The backup state.`, + }, + "storage_bytes": { + Type: schema.TypeString, + Computed: true, + Description: `The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceFilestoreBackupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandFilestoreBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandFilestoreBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + sourceInstanceProp, err := expandFilestoreBackupSourceInstance(d.Get("source_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_instance"); !isEmptyValue(reflect.ValueOf(sourceInstanceProp)) && (ok || !reflect.DeepEqual(v, sourceInstanceProp)) { + obj["sourceInstance"] = sourceInstanceProp + } + sourceFileShareProp, err := expandFilestoreBackupSourceFileShare(d.Get("source_file_share"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_file_share"); !isEmptyValue(reflect.ValueOf(sourceFileShareProp)) && (ok || !reflect.DeepEqual(v, sourceFileShareProp)) { + obj["sourceFileShare"] = sourceFileShareProp + } + + lockName, err := replaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + mutexKV.Lock(lockName) + defer mutexKV.Unlock(lockName) + + url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups?backupId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Backup: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate), isNotFilestoreQuotaError) + if err != nil { + return fmt.Errorf("Error creating Backup: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = filestoreOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Backup", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Backup: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Backup %q: %#v", d.Id(), res) + + return resourceFilestoreBackupRead(d, meta) +} + +func resourceFilestoreBackupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isNotFilestoreQuotaError) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("FilestoreBackup %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + if err := d.Set("description", flattenFilestoreBackupDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("state", flattenFilestoreBackupState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("create_time", flattenFilestoreBackupCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("labels", flattenFilestoreBackupLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("capacity_gb", flattenFilestoreBackupCapacityGb(res["capacityGb"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("storage_bytes", flattenFilestoreBackupStorageBytes(res["storageBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("source_instance", flattenFilestoreBackupSourceInstance(res["sourceInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("source_file_share", flattenFilestoreBackupSourceFileShare(res["sourceFileShare"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("source_instance_tier", flattenFilestoreBackupSourceInstanceTier(res["sourceInstanceTier"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("download_bytes", flattenFilestoreBackupDownloadBytes(res["downloadBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("kms_key_name", flattenFilestoreBackupKmsKeyName(res["kmsKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + + return nil +} + +func resourceFilestoreBackupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + descriptionProp, err := expandFilestoreBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandFilestoreBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + sourceInstanceProp, err := expandFilestoreBackupSourceInstance(d.Get("source_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_instance"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceInstanceProp)) { + obj["sourceInstance"] = sourceInstanceProp + } + + lockName, err := replaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + mutexKV.Lock(lockName) + defer mutexKV.Unlock(lockName) + + url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("source_instance") { + updateMask = append(updateMask, "sourceInstance") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate), isNotFilestoreQuotaError) + + if err != nil { + return fmt.Errorf("Error updating Backup %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Backup %q: %#v", d.Id(), res) + } + + err = filestoreOperationWaitTime( + config, res, project, "Updating Backup", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceFilestoreBackupRead(d, meta) +} + +func resourceFilestoreBackupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Backup: %s", err) + } + billingProject = project + + lockName, err := replaceVars(d, config, "filestore/{{project}}") + if err != nil { + return err + } + mutexKV.Lock(lockName) + defer mutexKV.Unlock(lockName) + + url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Backup %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete), isNotFilestoreQuotaError) + if err != nil { + return handleNotFoundError(err, d, "Backup") + } + + err = filestoreOperationWaitTime( + config, res, project, "Deleting Backup", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Backup %q: %#v", d.Id(), res) + return nil +} + +func resourceFilestoreBackupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/backups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenFilestoreBackupDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupCapacityGb(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupStorageBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupSourceInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupSourceFileShare(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupSourceInstanceTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupDownloadBytes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenFilestoreBackupKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandFilestoreBackupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandFilestoreBackupLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandFilestoreBackupSourceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandFilestoreBackupSourceFileShare(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_filestore_backup_generated_test.go b/google/resource_filestore_backup_generated_test.go new file mode 100644 index 00000000000..1dd9bda7320 --- /dev/null +++ b/google/resource_filestore_backup_generated_test.go @@ -0,0 +1,117 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccFilestoreBackup_filestoreBackupBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFilestoreBackupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFilestoreBackup_filestoreBackupBasicExample(context), + }, + { + ResourceName: "google_filestore_backup.backup", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccFilestoreBackup_filestoreBackupBasicExample(context map[string]interface{}) string { + return Nprintf(` + +resource "google_filestore_instance" "instance" { + name = "tf-test-tf-fs-inst%{random_suffix}" + location = "us-central1-b" + tier = "BASIC_SSD" + + file_shares { + capacity_gb = 2560 + name = "share1" + } + + networks { + network = "default" + modes = ["MODE_IPV4"] + connect_mode = "DIRECT_PEERING" + } +} + +resource "google_filestore_backup" "backup" { + name = "tf-test-tf-fs-bkup%{random_suffix}" + location = "us-central1" + source_instance = google_filestore_instance.instance.id + source_file_share = "share1" + + description = "This is a filestore backup for the test instance" + labels = { + "files":"label1", + "other-label": "label2" + } +} +`, context) +} + +func testAccCheckFilestoreBackupDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_filestore_backup" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{FilestoreBasePath}}projects/{{project}}/locations/{{location}}/backups/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil, isNotFilestoreQuotaError) + if err == nil { + return fmt.Errorf("FilestoreBackup still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/resource_filestore_backup_sweeper_test.go b/google/resource_filestore_backup_sweeper_test.go new file mode 100644 index 00000000000..a65f9a06cfe --- /dev/null +++ b/google/resource_filestore_backup_sweeper_test.go @@ -0,0 +1,124 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("FilestoreBackup", &resource.Sweeper{ + Name: "FilestoreBackup", + F: testSweepFilestoreBackup, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepFilestoreBackup(region string) error { + resourceName := "FilestoreBackup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["backups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://file.googleapis.com/v1/projects/{{project}}/locations/{{location}}/backups/{{name}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/resource_filestore_backup_test.go b/google/resource_filestore_backup_test.go new file mode 100644 index 00000000000..b560fc5d50b --- /dev/null +++ b/google/resource_filestore_backup_test.go @@ -0,0 +1,115 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFilestoreBackup_update(t *testing.T) { + t.Parallel() + + instName := fmt.Sprintf("tf-fs-inst-%d", randInt(t)) + bkupName := fmt.Sprintf("tf-fs-bkup-%d", randInt(t)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFilestoreBackupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFilestoreBackup_create(instName, bkupName), + }, + { + ResourceName: "google_filestore_backup.backup", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + { + Config: testAccFilestoreBackup_update(instName, bkupName), + }, + { + ResourceName: "google_filestore_backup.backup", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "description", "location"}, + }, + }, + }) +} + +func testAccFilestoreBackup_create(instName string, bkupName string) string { + return fmt.Sprintf(` +resource "google_filestore_instance" "instance" { + name = "%s" + location = "us-central1-b" + tier = "BASIC_SSD" + + file_shares { + capacity_gb = 2560 + name = "share22" + } + + networks { + network = "default" + modes = ["MODE_IPV4"] + connect_mode = "DIRECT_PEERING" + } + description = "An instance created during testing." +} + +resource "google_filestore_backup" "backup" { + name = "%s" + location = "us-central1" + source_instance = google_filestore_instance.instance.id + source_file_share = "share22" + + description = "This is a filestore backup for the test instance" +} + +`, instName, bkupName) +} + +func testAccFilestoreBackup_update(instName string, bkupName string) string { + return fmt.Sprintf(` +resource "google_filestore_instance" "instance" { + name = "%s" + location = "us-central1-b" + tier = "BASIC_SSD" + + file_shares { + capacity_gb = 2560 + name = "share22" + } + + networks { + network = "default" + modes = ["MODE_IPV4"] + connect_mode = "DIRECT_PEERING" + } + + labels = { + "files":"label1", + "other-label": "update" + } + + description = "A modified instance during testing." +} + +resource "google_filestore_backup" "backup" { + name = "%s" + location = "us-central1" + source_instance = google_filestore_instance.instance.id + source_file_share = "share22" + + description = "This is an updated filestore backup for the test instance" + labels = { + "files":"label1", + "other-label": "update" + } +} + +`, instName, bkupName) +} diff --git a/google/resource_filestore_instance.go b/google/resource_filestore_instance.go index 3aded692c7a..8aa5958c573 100644 --- a/google/resource_filestore_instance.go +++ b/google/resource_filestore_instance.go @@ -123,6 +123,13 @@ for not allowing root access. The default is NO_ROOT_SQUASH. Default value: "NO_ }, }, }, + "source_backup": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the backup, in the format +projects/{projectId}/locations/{locationId}/backups/{backupId}, +that this file share has been restored from.`, + }, }, }, }, @@ -612,6 +619,7 @@ func flattenFilestoreInstanceFileShares(v interface{}, d *schema.ResourceData, c transformed = append(transformed, map[string]interface{}{ "name": flattenFilestoreInstanceFileSharesName(original["name"], d, config), "capacity_gb": flattenFilestoreInstanceFileSharesCapacityGb(original["capacityGb"], d, config), + "source_backup": flattenFilestoreInstanceFileSharesSourceBackup(original["sourceBackup"], d, config), "nfs_export_options": flattenFilestoreInstanceFileSharesNfsExportOptions(original["nfsExportOptions"], d, config), }) } @@ -638,6 +646,10 @@ func flattenFilestoreInstanceFileSharesCapacityGb(v interface{}, d *schema.Resou return v // let terraform core handle it otherwise } +func flattenFilestoreInstanceFileSharesSourceBackup(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d *schema.ResourceData, config *Config) interface{} { if v == nil { return v @@ -803,6 +815,13 @@ func expandFilestoreInstanceFileShares(v interface{}, d TerraformResourceData, c transformed["capacityGb"] = transformedCapacityGb } + transformedSourceBackup, err := expandFilestoreInstanceFileSharesSourceBackup(original["source_backup"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceBackup); val.IsValid() && !isEmptyValue(val) { + transformed["sourceBackup"] = transformedSourceBackup + } + transformedNfsExportOptions, err := expandFilestoreInstanceFileSharesNfsExportOptions(original["nfs_export_options"], d, config) if err != nil { return nil, err @@ -823,6 +842,10 @@ func expandFilestoreInstanceFileSharesCapacityGb(v interface{}, d TerraformResou return v, nil } +func expandFilestoreInstanceFileSharesSourceBackup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandFilestoreInstanceFileSharesNfsExportOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) diff --git a/website/docs/r/filestore_backup.html.markdown b/website/docs/r/filestore_backup.html.markdown new file mode 100644 index 00000000000..e9e58d0de2d --- /dev/null +++ b/website/docs/r/filestore_backup.html.markdown @@ -0,0 +1,167 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Filestore" +page_title: "Google: google_filestore_backup" +description: |- + A Google Cloud Filestore backup. +--- + +# google\_filestore\_backup + +A Google Cloud Filestore backup. + + +To get more information about Backup, see: + +* [API documentation](https://cloud.google.com/filestore/docs/reference/rest/v1/projects.locations.instances.backups) +* How-to Guides + * [Official Documentation](https://cloud.google.com/filestore/docs/backups) + * [Creating Backups](https://cloud.google.com/filestore/docs/create-backups) + + +## Example Usage - Filestore Backup Basic + + +```hcl + +resource "google_filestore_instance" "instance" { + name = "tf-fs-inst" + location = "us-central1-b" + tier = "BASIC_SSD" + + file_shares { + capacity_gb = 2560 + name = "share1" + } + + networks { + network = "default" + modes = ["MODE_IPV4"] + connect_mode = "DIRECT_PEERING" + } +} + +resource "google_filestore_backup" "backup" { + name = "tf-fs-bkup" + location = "us-central1" + source_instance = google_filestore_instance.instance.id + source_file_share = "share1" + + description = "This is a filestore backup for the test instance" + labels = { + "files":"label1", + "other-label": "label2" + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `name` - + (Required) + The resource name of the backup. The name must be unique within the specified instance. + The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + +* `source_instance` - + (Required) + The resource name of the source Cloud Filestore instance, in the format projects/{projectId}/locations/{locationId}/instances/{instanceId}, used to create this backup. + +* `source_file_share` - + (Required) + Name of the file share in the source Cloud Filestore instance that the backup is created from. + +* `location` - + (Required) + The name of the location of the instance. This can be a region for ENTERPRISE tier instances. + + +- - - + + +* `description` - + (Optional) + A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected. + +* `labels` - + (Optional) + Resource labels to represent user-provided metadata. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/backups/{{name}}` + +* `state` - + The backup state. + +* `create_time` - + The time when the snapshot was created in RFC3339 text format. + +* `capacity_gb` - + The amount of bytes needed to allocate a full copy of the snapshot content. + +* `storage_bytes` - + The size of the storage used by the backup. As backups share storage, this number is expected to change with backup creation/deletion. + +* `source_instance_tier` - + The service tier of the source Cloud Filestore instance that this backup is created from. + +* `download_bytes` - + Amount of bytes that will be downloaded if the backup is restored. + +* `kms_key_name` - + KMS key name used for data encryption. + + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Backup can be imported using any of these accepted formats: + +``` +$ terraform import google_filestore_backup.default projects/{{project}}/locations/{{location}}/backups/{{name}} +$ terraform import google_filestore_backup.default {{project}}/{{location}}/{{name}} +$ terraform import google_filestore_backup.default {{location}}/{{name}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/filestore_instance.html.markdown b/website/docs/r/filestore_instance.html.markdown index 406ae785da4..b92610dca29 100644 --- a/website/docs/r/filestore_instance.html.markdown +++ b/website/docs/r/filestore_instance.html.markdown @@ -166,6 +166,11 @@ The following arguments are supported: File share capacity in GiB. This must be at least 1024 GiB for the standard tier, or 2560 GiB for the premium tier. +* `source_backup` - + The resource name of the backup, in the format + projects/{projectId}/locations/{locationId}/backups/{backupId}, + that this file share has been restored from. + * `nfs_export_options` - (Optional) Nfs Export Options. There is a limit of 10 export options per file share.