Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding 'management' attribute to the google_container_cluster resource #12987

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/6794.txt
@@ -0,0 +1,3 @@
```release-note:enhancement
container: Adding management attribute to the google_container_cluster resource
```
99 changes: 99 additions & 0 deletions google/resource_container_cluster.go
Expand Up @@ -517,6 +517,48 @@ func resourceContainerCluster() *schema.Resource {
},
},
},
"management": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Description: `NodeManagement configuration for this NodePool.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"auto_upgrade": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: `Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.`,
},
"auto_repair": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: `Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered.`,
},
"upgrade_options": {
Type: schema.TypeList,
Computed: true,
Description: `Specifies the Auto Upgrade knobs for the node pool.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"auto_upgrade_start_time": {
Type: schema.TypeString,
Computed: true,
Description: `This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.`,
},
"description": {
Type: schema.TypeString,
Computed: true,
Description: `This field is set when upgrades are about to commence with the description of the upgrade.`,
},
},
},
},
},
},
},
},
},
},
Expand Down Expand Up @@ -3301,6 +3343,7 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa
ImageType: config["image_type"].(string),
BootDiskKmsKey: config["boot_disk_kms_key"].(string),
UpgradeSettings: expandUpgradeSettings(config["upgrade_settings"]),
Management: expandManagement(config["management"]),
}

if v, ok := config["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 {
Expand All @@ -3321,6 +3364,37 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa
return npd
}

func expandManagement(configured interface{}) *container.NodeManagement {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
return &container.NodeManagement{}
}
config := l[0].(map[string]interface{})

mng := &container.NodeManagement{
AutoUpgrade: config["auto_upgrade"].(bool),
AutoRepair: config["auto_repair"].(bool),
UpgradeOptions: expandUpgradeOptions(config["upgrade_options"]),
}

return mng
}

func expandUpgradeOptions(configured interface{}) *container.AutoUpgradeOptions {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
return &container.AutoUpgradeOptions{}
}
config := l[0].(map[string]interface{})

upgradeOptions := &container.AutoUpgradeOptions{
AutoUpgradeStartTime: config["auto_upgrade_start_time"].(string),
Description: config["description"].(string),
}

return upgradeOptions
}

func expandUpgradeSettings(configured interface{}) *container.UpgradeSettings {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
Expand Down Expand Up @@ -4133,6 +4207,31 @@ func flattenAutoProvisioningDefaults(a *container.AutoprovisioningNodePoolDefaul
r["boot_disk_kms_key"] = a.BootDiskKmsKey
r["shielded_instance_config"] = flattenShieldedInstanceConfig(a.ShieldedInstanceConfig)
r["upgrade_settings"] = flattenUpgradeSettings(a.UpgradeSettings)
r["management"] = flattenManagement(a.Management)

return []map[string]interface{}{r}
}

func flattenManagement(a *container.NodeManagement) []map[string]interface{} {
if a == nil {
return nil
}
r := make(map[string]interface{})
r["auto_upgrade"] = a.AutoUpgrade
r["auto_repair"] = a.AutoRepair
r["upgrade_options"] = flattenUpgradeOptions(a.UpgradeOptions)

return []map[string]interface{}{r}
}

func flattenUpgradeOptions(a *container.AutoUpgradeOptions) []map[string]interface{} {
if a == nil {
return nil
}

r := make(map[string]interface{})
r["auto_upgrade_start_time"] = a.AutoUpgradeStartTime
r["description"] = a.Description

return []map[string]interface{}{r}
}
Expand Down
63 changes: 63 additions & 0 deletions google/resource_container_cluster_test.go
Expand Up @@ -2226,6 +2226,38 @@ func TestAccContainerCluster_nodeAutoprovisioningDefaultsShieldedInstance(t *tes
})
}

func TestAccContainerCluster_autoprovisioningDefaultsManagement(t *testing.T) {
t.Parallel()

clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_autoprovisioningDefaultsManagement(clusterName, false, false),
},
{
ResourceName: "google_container_cluster.with_autoprovisioning_management",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
{
Config: testAccContainerCluster_autoprovisioningDefaultsManagement(clusterName, true, true),
},
{
ResourceName: "google_container_cluster.with_autoprovisioning_management",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
},
})
}

func TestAccContainerCluster_autoprovisioningDefaultsUpgradeSettings(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -3767,6 +3799,37 @@ resource "google_container_cluster" "with_net_ref_by_name" {
`, network, cluster, cluster)
}

func testAccContainerCluster_autoprovisioningDefaultsManagement(clusterName string, autoUpgrade, autoRepair bool) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_autoprovisioning_management" {
name = "%s"
location = "us-central1-f"
initial_node_count = 1

cluster_autoscaling {
enabled = true

resource_limits {
resource_type = "cpu"
maximum = 2
}

resource_limits {
resource_type = "memory"
maximum = 2048
}

auto_provisioning_defaults {
management {
auto_upgrade = %t
auto_repair = %t
}
}
}
}
`, clusterName, autoUpgrade, autoRepair)
}

func testAccContainerCluster_backendRef(cluster string) string {
return fmt.Sprintf(`
resource "google_compute_backend_service" "my-backend-service" {
Expand Down
12 changes: 12 additions & 0 deletions website/docs/r/container_cluster.html.markdown
Expand Up @@ -519,6 +519,16 @@ as "Intel Haswell" or "Intel Sandy Bridge".

* `shielded_instance_config` - (Optional) Shielded Instance options. Structure is [documented below](#nested_shielded_instance_config).

* `management` - (Optional) NodeManagement configuration for this NodePool. Structure is [documented below](#nested_management).

<a name="nested_management"></a>The `management` block supports:

* `auto_upgrade` - (Optional) Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.

* `auto_repair` - (Optional) Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered.

This block also contains several computed attributes, documented below.

* `upgrade_settings` - (Optional) Specifies the upgrade settings for NAP created node pools. Structure is [documented below](#nested_upgrade_settings).

<a name="nested_upgrade_settings"></a>The `upgrade_settings` block supports:
Expand Down Expand Up @@ -1158,6 +1168,8 @@ exported:
notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last
`/16` from the container CIDR.

* `cluster_autoscaling.0.auto_provisioning_defaults.0.management.0.upgrade_options` - Specifies the [Auto Upgrade knobs](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/NodeManagement#AutoUpgradeOptions) for the node pool.

## Timeouts

This resource provides the following
Expand Down