Skip to content

Commit

Permalink
Allow passing node service_account when autopilot enabled (GoogleClou…
Browse files Browse the repository at this point in the history
  • Loading branch information
JeremyOT authored and googlerjk committed Nov 21, 2022
1 parent 60f2ec1 commit 7d47c7e
Show file tree
Hide file tree
Showing 3 changed files with 126 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,13 @@ var (
forceNewClusterNodeConfigFields = []string{
"workload_metadata_config",
}

suppressDiffForAutopilot = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool {
if v, _ := d.Get("enable_autopilot").(bool); v {
return true
}
return false
})
)

// This uses the node pool nodeConfig schema but sets
Expand Down Expand Up @@ -444,18 +451,21 @@ func resourceContainerCluster() *schema.Resource {
Optional: true,
Computed: true,
Description: `Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs of the cluster's workload. See the guide to using Node Auto-Provisioning for more details.`,
ConflictsWith: []string{"enable_autopilot"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Required: true,
Type: schema.TypeBool,
Optional: true,
Computed: true,
ConflictsWith: []string{"enable_autopilot"},
Description: `Whether node auto-provisioning is enabled. Resource limits for cpu and memory must be defined to enable node auto-provisioning.`,
},
"resource_limits": {
Type: schema.TypeList,
Optional: true,
Description: `Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning.`,
Type: schema.TypeList,
Optional: true,
ConflictsWith: []string{"enable_autopilot"},
DiffSuppressFunc: suppressDiffForAutopilot,
Description: `Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"resource_type": {
Expand Down Expand Up @@ -499,24 +509,27 @@ func resourceContainerCluster() *schema.Resource {
Description: `The Google Cloud Platform Service Account to be used by the node VMs.`,
},
"disk_size": {
Type: schema.TypeInt,
Optional: true,
Default: 100,
Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`,
Type: schema.TypeInt,
Optional: true,
Default: 100,
Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`,
DiffSuppressFunc: suppressDiffForAutopilot,
ValidateFunc: validation.IntAtLeast(10),
},
"disk_type": {
Type: schema.TypeString,
Optional: true,
Default: "pd-standard",
Description: `Type of the disk attached to each node.`,
Type: schema.TypeString,
Optional: true,
Default: "pd-standard",
Description: `Type of the disk attached to each node.`,
DiffSuppressFunc: suppressDiffForAutopilot,
ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", "pd-balanced"}, false),
},
"image_type": {
Type: schema.TypeString,
Optional: true,
Default: "COS_CONTAINERD",
Description: `The default image type used by NAP once a new node pool is being created.`,
Type: schema.TypeString,
Optional: true,
Default: "COS_CONTAINERD",
Description: `The default image type used by NAP once a new node pool is being created.`,
DiffSuppressFunc: suppressDiffForAutopilot,
ValidateFunc: validation.StringInSlice([]string{"COS_CONTAINERD", "COS", "UBUNTU_CONTAINERD", "UBUNTU"}, false),
},
<% unless version == 'ga' -%>
Expand Down Expand Up @@ -610,11 +623,12 @@ func resourceContainerCluster() *schema.Resource {
},
<% unless version == 'ga' -%>
"autoscaling_profile": {
Type: schema.TypeString,
Default: "BALANCED",
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"BALANCED", "OPTIMIZE_UTILIZATION"}, false),
Description: `Configuration options for the Autoscaling profile feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability when deciding to remove nodes from a cluster. Can be BALANCED or OPTIMIZE_UTILIZATION. Defaults to BALANCED.`,
Type: schema.TypeString,
Default: "BALANCED",
Optional: true,
DiffSuppressFunc: suppressDiffForAutopilot,
ValidateFunc: validation.StringInSlice([]string{"BALANCED", "OPTIMIZE_UTILIZATION"}, false),
Description: `Configuration options for the Autoscaling profile feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability when deciding to remove nodes from a cluster. Can be BALANCED or OPTIMIZE_UTILIZATION. Defaults to BALANCED.`,
},
<% end -%>
},
Expand Down Expand Up @@ -3777,8 +3791,12 @@ func expandMaintenancePolicy(d *schema.ResourceData, meta interface{}) *containe

func expandClusterAutoscaling(configured interface{}, d *schema.ResourceData) *container.ClusterAutoscaling {
l, ok := configured.([]interface{})
enableAutopilot := false
if v, ok := d.GetOk("enable_autopilot"); ok && v == true {
enableAutopilot = true
}
if !ok || l == nil || len(l) == 0 || l[0] == nil {
if v, ok := d.GetOk("enable_autopilot"); ok && v == true {
if enableAutopilot {
return nil
}
return &container.ClusterAutoscaling{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2115,6 +2115,7 @@ func TestAccContainerCluster_withShieldedNodes(t *testing.T) {
func TestAccContainerCluster_withAutopilot(t *testing.T) {
t.Parallel()

pid := getTestProjectFromEnv()
containerNetName := fmt.Sprintf("tf-test-container-net-%s", randString(t, 10))
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))

Expand All @@ -2124,12 +2125,47 @@ func TestAccContainerCluster_withAutopilot(t *testing.T) {
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withAutopilot(containerNetName, clusterName, "us-central1", true, false),
Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, false, ""),
},
{
ResourceName: "google_container_cluster.with_autopilot",
ImportState: true,
ImportStateVerify: true,
ResourceName: "google_container_cluster.with_autopilot",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
},
})
}

func TestAccContainerClusterCustomServiceAccount_withAutopilot(t *testing.T) {
t.Parallel()

pid := getTestProjectFromEnv()
containerNetName := fmt.Sprintf("tf-test-container-net-%s", randString(t, 10))
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
serviceAccountName := fmt.Sprintf("tf-test-sa-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, false, serviceAccountName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.with_autopilot",
"cluster_autoscaling.0.enabled", "true"),
resource.TestCheckResourceAttr("google_container_cluster.with_autopilot",
"cluster_autoscaling.0.auto_provisioning_defaults.0.service_account",
fmt.Sprintf("%s@%s.iam.gserviceaccount.com", serviceAccountName, pid)),
resource.TestCheckResourceAttr("google_container_cluster.with_autopilot",
"cluster_autoscaling.0.auto_provisioning_defaults.0.oauth_scopes.0", "https://www.googleapis.com/auth/cloud-platform"),
),
},
{
ResourceName: "google_container_cluster.with_autopilot",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
},
Expand All @@ -2139,6 +2175,7 @@ func TestAccContainerCluster_withAutopilot(t *testing.T) {
func TestAccContainerCluster_errorAutopilotLocation(t *testing.T) {
t.Parallel()

pid := getTestProjectFromEnv()
containerNetName := fmt.Sprintf("tf-test-container-net-%s", randString(t, 10))
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))

Expand All @@ -2148,7 +2185,7 @@ func TestAccContainerCluster_errorAutopilotLocation(t *testing.T) {
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withAutopilot(containerNetName, clusterName, "us-central1-a", true, false),
Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1-a", true, false, ""),
ExpectError: regexp.MustCompile(`Autopilot clusters must be regional clusters.`),
},
},
Expand All @@ -2159,6 +2196,7 @@ func TestAccContainerCluster_errorAutopilotLocation(t *testing.T) {
func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) {
t.Parallel()

pid := getTestProjectFromEnv()
containerNetName := fmt.Sprintf("tf-test-container-net-%s", randString(t, 10))
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))

Expand All @@ -2168,7 +2206,7 @@ func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) {
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withAutopilot(containerNetName, clusterName, "us-central1", true, true),
Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, true, ""),
},
{
ResourceName: "google_container_cluster.with_autopilot",
Expand Down Expand Up @@ -3275,8 +3313,8 @@ func testAccCheckContainerClusterDestroyProducer(t *testing.T) func(s *terraform
}

attributes := rs.Primary.Attributes
_, err := config.NewContainerClient(config.userAgent).Projects.Zones.Clusters.Get(
config.Project, attributes["location"], attributes["name"]).Do()
_, err := config.NewContainerClient(config.userAgent).Projects.Locations.Clusters.Get(
fmt.Sprintf("projects/%s/locations/%s/clusters/%s", config.Project, attributes["location"], attributes["name"])).Do()
if err == nil {
return fmt.Errorf("Cluster still exists")
}
Expand Down Expand Up @@ -6314,8 +6352,36 @@ resource "google_container_cluster" "primary" {
`, name)
}

func testAccContainerCluster_withAutopilot(containerNetName string, clusterName string, location string, enabled bool, withNetworkTag bool) string {
config := fmt.Sprintf(`
func testAccContainerCluster_withAutopilot(projectID string, containerNetName string, clusterName string, location string, enabled bool, withNetworkTag bool, serviceAccount string) string {
config := ""
clusterAutoscaling := ""
if serviceAccount != "" {
config += fmt.Sprintf(`
resource "google_service_account" "service_account" {
account_id = "%[1]s"
project = "%[2]s"
display_name = "Service Account"
}

resource "google_project_iam_binding" "project" {
project = "%[2]s"
role = "roles/container.nodeServiceAccount"
members = [
"serviceAccount:%[1]s@%[2]s.iam.gserviceaccount.com",
]
}`, serviceAccount, projectID)

clusterAutoscaling = fmt.Sprintf(`
cluster_autoscaling {
auto_provisioning_defaults {
service_account = "%s@%s.iam.gserviceaccount.com"
oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
}
}`, serviceAccount, projectID)
}

config += fmt.Sprintf(`

resource "google_compute_network" "container_network" {
name = "%s"
auto_create_subnetworks = false
Expand Down Expand Up @@ -6362,9 +6428,10 @@ resource "google_container_cluster" "with_autopilot" {
disabled = false
}
}
%s
vertical_pod_autoscaling {
enabled = true
}`, containerNetName, clusterName, location, enabled)
}`, containerNetName, clusterName, location, enabled, clusterAutoscaling)
if withNetworkTag {
config += `
node_pool_auto_config {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -470,15 +470,16 @@ addons_config {

<a name="nested_cluster_autoscaling"></a>The `cluster_autoscaling` block supports:

* `enabled` - (Required) Whether node auto-provisioning is enabled. Resource
limits for `cpu` and `memory` must be defined to enable node auto-provisioning.
* `enabled` - (Optional) Whether node auto-provisioning is enabled. Must be supplied for GKE Standard clusters, `true` is implied
for autopilot clusters. Resource limits for `cpu` and `memory` must be defined to enable node auto-provisioning for GKE Standard.

* `resource_limits` - (Optional) Global constraints for machine resources in the
cluster. Configuring the `cpu` and `memory` types is required if node
auto-provisioning is enabled. These limits will apply to node pool autoscaling
in addition to node auto-provisioning. Structure is [documented below](#nested_resource_limits).

* `auto_provisioning_defaults` - (Optional) Contains defaults for a node pool created by NAP.
* `auto_provisioning_defaults` - (Optional) Contains defaults for a node pool created by NAP. A subset of fields also apply to
GKE Autopilot clusters.
Structure is [documented below](#nested_auto_provisioning_defaults).

* `autoscaling_profile` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html)) Configuration
Expand All @@ -503,11 +504,11 @@ Minimum CPU platform to be used for NAP created node pools. The instance may be
specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such
as "Intel Haswell" or "Intel Sandy Bridge".

* `oauth_scopes` - (Optional) Scopes that are used by NAP when creating node pools. Use the "https://www.googleapis.com/auth/cloud-platform" scope to grant access to all APIs. It is recommended that you set `service_account` to a non-default service account and grant IAM roles to that service account for only the resources that it needs.
* `oauth_scopes` - (Optional) Scopes that are used by NAP and GKE Autopilot when creating node pools. Use the "https://www.googleapis.com/auth/cloud-platform" scope to grant access to all APIs. It is recommended that you set `service_account` to a non-default service account and grant IAM roles to that service account for only the resources that it needs.

-> `monitoring.write` is always enabled regardless of user input. `monitoring` and `logging.write` may also be enabled depending on the values for `monitoring_service` and `logging_service`.

* `service_account` - (Optional) The Google Cloud Platform Service Account to be used by the node VMs.
* `service_account` - (Optional) The Google Cloud Platform Service Account to be used by the node VMs created by GKE Autopilot or NAP.

* `boot_disk_kms_key` - (Optional) The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption

Expand Down

0 comments on commit 7d47c7e

Please sign in to comment.