Skip to content

Commit

Permalink
Support for clone a persistent disk (#6637) (#12779)
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <magic-modules@google.com>

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician committed Oct 12, 2022
1 parent 6eb1805 commit fb85ff2
Show file tree
Hide file tree
Showing 7 changed files with 261 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .changelog/6637.txt
@@ -0,0 +1,3 @@
```release-note:enhancement
compute: added `source_disk` field to `google_compute_disk` and `google_compute_region_disk` resource
```
56 changes: 56 additions & 0 deletions google/resource_compute_disk.go
Expand Up @@ -27,6 +27,16 @@ import (
"google.golang.org/api/googleapi"
)

// diffsupress for beta and to check change in source_disk attribute
func sourceDiskDiffSupress(_, old, new string, _ *schema.ResourceData) bool {
s1 := strings.TrimPrefix(old, "https://www.googleapis.com/compute/beta")
s2 := strings.TrimPrefix(new, "https://www.googleapis.com/compute/v1")
if strings.HasSuffix(s1, s2) {
return true
}
return false
}

// Is the new disk size smaller than the old one?
func isDiskShrinkage(_ context.Context, old, new, _ interface{}) bool {
// It's okay to remove size entirely.
Expand Down Expand Up @@ -411,6 +421,21 @@ following are valid values:
* 'projects/project/global/snapshots/snapshot'
* 'global/snapshots/snapshot'
* 'snapshot'`,
},
"source_disk": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: sourceDiskDiffSupress,
Description: `The source disk used to create this disk. You can provide this as a partial or full URL to the resource.
For example, the following are valid values:
* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk}
* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk}
* projects/{project}/zones/{zone}/disks/{disk}
* projects/{project}/regions/{region}/disks/{disk}
* zones/{zone}/disks/{disk}
* regions/{region}/disks/{disk}`,
},
"source_image_encryption_key": {
Type: schema.TypeList,
Expand Down Expand Up @@ -537,6 +562,13 @@ internally during updates.`,
Computed: true,
Description: `Last detach timestamp in RFC3339 text format.`,
},
"source_disk_id": {
Type: schema.TypeString,
Computed: true,
Description: `The ID value of the disk used to create this image. This value may
be used to determine whether the image was taken from the current
or a previous instance of a given disk name.`,
},
"source_image_id": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -625,6 +657,12 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
} else if v, ok := d.GetOkExists("physical_block_size_bytes"); !isEmptyValue(reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !reflect.DeepEqual(v, physicalBlockSizeBytesProp)) {
obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp
}
sourceDiskProp, err := expandComputeDiskSourceDisk(d.Get("source_disk"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) {
obj["sourceDisk"] = sourceDiskProp
}
typeProp, err := expandComputeDiskType(d.Get("type"), d, config)
if err != nil {
return err
Expand Down Expand Up @@ -801,6 +839,12 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
if err := d.Set("physical_block_size_bytes", flattenComputeDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("source_disk", flattenComputeDiskSourceDisk(res["sourceDisk"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("source_disk_id", flattenComputeDiskSourceDiskId(res["sourceDiskId"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("type", flattenComputeDiskType(res["type"], d, config)); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
Expand Down Expand Up @@ -1124,6 +1168,14 @@ func flattenComputeDiskPhysicalBlockSizeBytes(v interface{}, d *schema.ResourceD
return v // let terraform core handle it otherwise
}

func flattenComputeDiskSourceDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} {
return v
}

func flattenComputeDiskSourceDiskId(v interface{}, d *schema.ResourceData, config *Config) interface{} {
return v
}

func flattenComputeDiskType(v interface{}, d *schema.ResourceData, config *Config) interface{} {
if v == nil {
return v
Expand Down Expand Up @@ -1310,6 +1362,10 @@ func expandComputeDiskPhysicalBlockSizeBytes(v interface{}, d TerraformResourceD
return v, nil
}

func expandComputeDiskSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}

func expandComputeDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
f, err := parseZonalFieldValue("diskTypes", v.(string), "project", "zone", d, config, true)
if err != nil {
Expand Down
58 changes: 58 additions & 0 deletions google/resource_compute_disk_test.go
Expand Up @@ -585,6 +585,34 @@ func testAccCheckEncryptionKey(t *testing.T, n string, disk *compute.Disk) resou
}
}

func TestAccComputeDisk_cloneDisk(t *testing.T) {
t.Parallel()
pid := getTestProjectFromEnv()
diskName := fmt.Sprintf("tf-test-%s", randString(t, 10))

var disk compute.Disk

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeDiskDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccComputeDisk_diskClone(diskName, "self_link"),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeDiskExists(
t, "google_compute_disk.disk-clone", pid, &disk),
),
},
{
ResourceName: "google_compute_disk.disk-clone",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func testAccComputeDisk_basic(diskName string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
Expand Down Expand Up @@ -833,3 +861,33 @@ resource "google_compute_disk" "foobar" {
}
`, diskName)
}

func testAccComputeDisk_diskClone(diskName, refSelector string) string {
return fmt.Sprintf(`
data "google_compute_image" "my_image" {
family = "debian-11"
project = "debian-cloud"
}
resource "google_compute_disk" "foobar" {
name = "%s"
image = data.google_compute_image.my_image.self_link
size = 50
type = "pd-ssd"
zone = "us-central1-a"
labels = {
my-label = "my-label-value"
}
}
resource "google_compute_disk" "disk-clone" {
name = "%s"
source_disk = google_compute_disk.foobar.%s
type = "pd-ssd"
zone = "us-central1-a"
labels = {
my-label = "my-label-value"
}
}
`, diskName, diskName+"-clone", refSelector)
}
46 changes: 46 additions & 0 deletions google/resource_compute_region_disk.go
Expand Up @@ -171,6 +171,21 @@ valid values:
* 'projects/project/global/snapshots/snapshot'
* 'global/snapshots/snapshot'
* 'snapshot'`,
},
"source_disk": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: sourceDiskDiffSupress,
Description: `The source disk used to create this disk. You can provide this as a partial or full URL to the resource.
For example, the following are valid values:
* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk}
* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk}
* projects/{project}/zones/{zone}/disks/{disk}
* projects/{project}/regions/{region}/disks/{disk}
* zones/{zone}/disks/{disk}
* regions/{region}/disks/{disk}`,
},
"source_snapshot_encryption_key": {
Type: schema.TypeList,
Expand Down Expand Up @@ -228,6 +243,13 @@ internally during updates.`,
Computed: true,
Description: `Last detach timestamp in RFC3339 text format.`,
},
"source_disk_id": {
Type: schema.TypeString,
Computed: true,
Description: `The ID value of the disk used to create this image. This value may
be used to determine whether the image was taken from the current
or a previous instance of a given disk name.`,
},
"source_snapshot_id": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -319,6 +341,12 @@ func resourceComputeRegionDiskCreate(d *schema.ResourceData, meta interface{}) e
} else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) {
obj["type"] = typeProp
}
sourceDiskProp, err := expandComputeRegionDiskSourceDisk(d.Get("source_disk"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) {
obj["sourceDisk"] = sourceDiskProp
}
regionProp, err := expandComputeRegionDiskRegion(d.Get("region"), d, config)
if err != nil {
return err
Expand Down Expand Up @@ -477,6 +505,12 @@ func resourceComputeRegionDiskRead(d *schema.ResourceData, meta interface{}) err
if err := d.Set("type", flattenComputeRegionDiskType(res["type"], d, config)); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("source_disk", flattenComputeRegionDiskSourceDisk(res["sourceDisk"], d, config)); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("source_disk_id", flattenComputeRegionDiskSourceDiskId(res["sourceDiskId"], d, config)); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("region", flattenComputeRegionDiskRegion(res["region"], d, config)); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
Expand Down Expand Up @@ -799,6 +833,14 @@ func flattenComputeRegionDiskType(v interface{}, d *schema.ResourceData, config
return NameFromSelfLinkStateFunc(v)
}

func flattenComputeRegionDiskSourceDisk(v interface{}, d *schema.ResourceData, config *Config) interface{} {
return v
}

func flattenComputeRegionDiskSourceDiskId(v interface{}, d *schema.ResourceData, config *Config) interface{} {
return v
}

func flattenComputeRegionDiskRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} {
if v == nil {
return v
Expand Down Expand Up @@ -924,6 +966,10 @@ func expandComputeRegionDiskType(v interface{}, d TerraformResourceData, config
return f.RelativeLink(), nil
}

func expandComputeRegionDiskSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
return v, nil
}

func expandComputeRegionDiskRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {
f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true)
if err != nil {
Expand Down
66 changes: 66 additions & 0 deletions google/resource_compute_region_disk_test.go
Expand Up @@ -174,6 +174,34 @@ func TestAccComputeRegionDisk_deleteDetach(t *testing.T) {
})
}

func TestAccComputeRegionDisk_cloneDisk(t *testing.T) {
t.Parallel()

diskName := fmt.Sprintf("tf-test-%s", randString(t, 10))

var disk compute.Disk

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccComputeRegionDisk_diskClone(diskName, "self_link"),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
t, "google_compute_region_disk.regiondisk-clone", &disk),
),
},
{
ResourceName: "google_compute_region_disk.regiondisk-clone",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func testAccCheckComputeRegionDiskExists(t *testing.T, n string, disk *compute.Disk) resource.TestCheckFunc {
return func(s *terraform.State) error {
p := getTestProjectFromEnv()
Expand Down Expand Up @@ -409,3 +437,41 @@ resource "google_compute_instance" "inst" {
}
`, diskName, diskName, regionDiskName, instanceName)
}

func testAccComputeRegionDisk_diskClone(diskName, refSelector string) string {
return fmt.Sprintf(`
resource "google_compute_region_disk" "regiondisk" {
name = "%s"
snapshot = google_compute_snapshot.snapdisk.id
type = "pd-ssd"
region = "us-central1"
physical_block_size_bytes = 4096
replica_zones = ["us-central1-a", "us-central1-f"]
}
resource "google_compute_disk" "disk" {
name = "%s"
image = "debian-11-bullseye-v20220719"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "snapdisk" {
name = "%s"
source_disk = google_compute_disk.disk.name
zone = "us-central1-a"
}
resource "google_compute_region_disk" "regiondisk-clone" {
name = "%s"
source_disk = google_compute_region_disk.regiondisk.%s
type = "pd-ssd"
region = "us-central1"
physical_block_size_bytes = 4096
replica_zones = ["us-central1-a", "us-central1-f"]
}
`, diskName, diskName, diskName, diskName+"-clone", refSelector)
}
16 changes: 16 additions & 0 deletions website/docs/r/compute_disk.html.markdown
Expand Up @@ -122,6 +122,17 @@ The following arguments are supported:
(Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html))
Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.

* `source_disk` -
(Optional)
The source disk used to create this disk. You can provide this as a partial or full URL to the resource.
For example, the following are valid values:
* https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk}
* https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk}
* projects/{project}/zones/{zone}/disks/{disk}
* projects/{project}/regions/{region}/disks/{disk}
* zones/{zone}/disks/{disk}
* regions/{region}/disks/{disk}

* `type` -
(Optional)
URL of the disk type resource describing which disk type to use to
Expand Down Expand Up @@ -297,6 +308,11 @@ In addition to the arguments listed above, the following computed attributes are
Links to the users of the disk (attached instances) in form:
project/zones/zone/instances/instance

* `source_disk_id` -
The ID value of the disk used to create this image. This value may
be used to determine whether the image was taken from the current
or a previous instance of a given disk name.

* `source_image_id` -
The ID value of the image used to create this disk. This value
identifies the exact image that was used to create this persistent
Expand Down

0 comments on commit fb85ff2

Please sign in to comment.