diff --git a/.changelog/6780.txt b/.changelog/6780.txt new file mode 100644 index 00000000000..9bc6422913f --- /dev/null +++ b/.changelog/6780.txt @@ -0,0 +1,12 @@ +```release-note:enhancement +container: added field `gcp_public_cidrs_access_enabled` and `private_endpoint_subnetwork` to `google_container_cluster` +``` +```release-note:enhancement +container: added update support for `enable_private_endpoint` and `enable_private_nodes` in `google_container_cluster` +``` +```release-note:enhancement +container: promoted `network_config` in `google_container_node_pool` to GA. +``` +```release-note:enhancement +container: added field `enable_private_nodes` in `network_config` to `google_container_node_pool` +``` diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go index cb79f3fa716..e3a08929b04 100644 --- a/google/resource_container_cluster.go +++ b/google/resource_container_cluster.go @@ -22,17 +22,23 @@ import ( var ( instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", ProjectRegex)) - networkConfig = &schema.Resource{ + masterAuthorizedNetworksConfig = &schema.Resource{ Schema: map[string]*schema.Schema{ "cidr_blocks": { Type: schema.TypeSet, - // Despite being the only entry in a nested block, this should be kept - // Optional. Expressing the parent with no entries and omitting the + // This should be kept Optional. Expressing the + // parent with no entries and omitting the // parent entirely are semantically different. Optional: true, Elem: cidrBlockConfig, Description: `External networks that can access the Kubernetes cluster master through HTTPS.`, }, + "gcp_public_cidrs_access_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether master is accessbile via Google Compute Engine Public IP addresses.`, + }, }, } cidrBlockConfig = &schema.Resource{ @@ -64,6 +70,14 @@ var ( "addons_config.0.gce_persistent_disk_csi_driver_config", } + privateClusterConfigKeys = []string{ + "private_cluster_config.0.enable_private_endpoint", + "private_cluster_config.0.enable_private_nodes", + "private_cluster_config.0.master_ipv4_cidr_block", + "private_cluster_config.0.private_endpoint_subnetwork", + "private_cluster_config.0.master_global_access_config", + } + forceNewClusterNodeConfigFields = []string{ "workload_metadata_config", } @@ -918,8 +932,9 @@ func resourceContainerCluster() *schema.Resource { "master_authorized_networks_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, - Elem: networkConfig, + Elem: masterAuthorizedNetworksConfig, Description: `The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists).`, }, @@ -1114,10 +1129,15 @@ func resourceContainerCluster() *schema.Resource { Description: `Configuration for private clusters, clusters with private nodes.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + // enable_private_endpoint is orthogonal to private_endpoint_subnetwork. + // User can create a private_cluster_config block without including + // either one of those two fields. Both fields are optional. + // At the same time, we use 'AtLeastOneOf' to prevent an empty block + // like 'private_cluster_config{}' "enable_private_endpoint": { Type: schema.TypeBool, - Required: true, - ForceNew: true, + Optional: true, + AtLeastOneOf: privateClusterConfigKeys, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.`, }, @@ -1125,6 +1145,7 @@ func resourceContainerCluster() *schema.Resource { Type: schema.TypeBool, Optional: true, ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, }, @@ -1133,6 +1154,7 @@ func resourceContainerCluster() *schema.Resource { Computed: true, Optional: true, ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, ValidateFunc: orEmpty(validation.IsCIDRNetwork(28, 28)), Description: `The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning private IP addresses to the cluster master(s) and the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network, and it must be a /28 subnet. See Private Cluster Limitations for more details. This field only applies to private clusters, when enable_private_nodes is true.`, }, @@ -1146,17 +1168,26 @@ func resourceContainerCluster() *schema.Resource { Computed: true, Description: `The internal IP address of this cluster's master endpoint.`, }, + "private_endpoint_subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `Subnetwork in cluster's network where master's endpoint will be provisioned.`, + }, "public_endpoint": { Type: schema.TypeString, Computed: true, Description: `The external IP address of this cluster's master endpoint.`, }, "master_global_access_config": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Description: "Controls cluster master global access settings.", + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + AtLeastOneOf: privateClusterConfigKeys, + Description: "Controls cluster master global access settings.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { @@ -1535,7 +1566,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er Name: clusterName, InitialNodeCount: int64(d.Get("initial_node_count").(int)), MaintenancePolicy: expandMaintenancePolicy(d, meta), - MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config")), + MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config"), d), InitialClusterVersion: d.Get("min_master_version").(string), ClusterIpv4Cidr: d.Get("cluster_ipv4_cidr").(string), Description: d.Get("description").(string), @@ -2098,7 +2129,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er c := d.Get("master_authorized_networks_config") req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ - DesiredMasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(c), + DesiredMasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(c, d), }, } @@ -2162,6 +2193,24 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), enabled) } + if d.HasChange("private_cluster_config.0.enable_private_endpoint") { + enabled := d.Get("private_cluster_config.0.enable_private_endpoint").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredEnablePrivateEndpoint: enabled, + ForceSendFields: []string{"DesiredEnablePrivateEndpoint"}, + }, + } + + updateF := updateFunc(req, "updating enable private endpoint") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's enable private endpoint has been updated to %v", d.Id(), enabled) + } + if d.HasChange("binary_authorization") { req := &container.UpdateClusterRequest{ Update: &container.ClusterUpdate{ @@ -3537,7 +3586,7 @@ func expandMasterAuth(configured interface{}) *container.MasterAuth { return result } -func expandMasterAuthorizedNetworksConfig(configured interface{}) *container.MasterAuthorizedNetworksConfig { +func expandMasterAuthorizedNetworksConfig(configured interface{}, d *schema.ResourceData) *container.MasterAuthorizedNetworksConfig { l := configured.([]interface{}) if len(l) == 0 { return &container.MasterAuthorizedNetworksConfig{ @@ -3559,6 +3608,10 @@ func expandMasterAuthorizedNetworksConfig(configured interface{}) *container.Mas }) } } + if v, ok := d.GetOkExists("master_authorized_networks_config.0.gcp_public_cidrs_access_enabled"); ok { + result.GcpPublicCidrsAccessEnabled = v.(bool) + result.ForceSendFields = []string{"GcpPublicCidrsAccessEnabled"} + } } return result } @@ -3586,11 +3639,12 @@ func expandPrivateClusterConfig(configured interface{}) *container.PrivateCluste } config := l[0].(map[string]interface{}) return &container.PrivateClusterConfig{ - EnablePrivateEndpoint: config["enable_private_endpoint"].(bool), - EnablePrivateNodes: config["enable_private_nodes"].(bool), - MasterIpv4CidrBlock: config["master_ipv4_cidr_block"].(string), - MasterGlobalAccessConfig: expandPrivateClusterConfigMasterGlobalAccessConfig(config["master_global_access_config"]), - ForceSendFields: []string{"EnablePrivateEndpoint", "EnablePrivateNodes", "MasterIpv4CidrBlock", "MasterGlobalAccessConfig"}, + EnablePrivateEndpoint: config["enable_private_endpoint"].(bool), + EnablePrivateNodes: config["enable_private_nodes"].(bool), + MasterIpv4CidrBlock: config["master_ipv4_cidr_block"].(string), + MasterGlobalAccessConfig: expandPrivateClusterConfigMasterGlobalAccessConfig(config["master_global_access_config"]), + PrivateEndpointSubnetwork: config["private_endpoint_subnetwork"].(string), + ForceSendFields: []string{"EnablePrivateEndpoint", "EnablePrivateNodes", "MasterIpv4CidrBlock", "MasterGlobalAccessConfig"}, } } @@ -4020,6 +4074,7 @@ func flattenPrivateClusterConfig(c *container.PrivateClusterConfig) []map[string "master_global_access_config": flattenPrivateClusterConfigMasterGlobalAccessConfig(c.MasterGlobalAccessConfig), "peering_name": c.PeeringName, "private_endpoint": c.PrivateEndpoint, + "private_endpoint_subnetwork": c.PrivateEndpointSubnetwork, "public_endpoint": c.PublicEndpoint, }, } @@ -4263,16 +4318,15 @@ func flattenMasterAuthorizedNetworksConfig(c *container.MasterAuthorizedNetworks return nil } result := make(map[string]interface{}) - if c.Enabled { - cidrBlocks := make([]interface{}, 0, len(c.CidrBlocks)) - for _, v := range c.CidrBlocks { - cidrBlocks = append(cidrBlocks, map[string]interface{}{ - "cidr_block": v.CidrBlock, - "display_name": v.DisplayName, - }) - } - result["cidr_blocks"] = schema.NewSet(schema.HashResource(cidrBlockConfig), cidrBlocks) + cidrBlocks := make([]interface{}, 0, len(c.CidrBlocks)) + for _, v := range c.CidrBlocks { + cidrBlocks = append(cidrBlocks, map[string]interface{}{ + "cidr_block": v.CidrBlock, + "display_name": v.DisplayName, + }) } + result["cidr_blocks"] = schema.NewSet(schema.HashResource(cidrBlockConfig), cidrBlocks) + result["gcp_public_cidrs_access_enabled"] = c.GcpPublicCidrsAccessEnabled return []map[string]interface{}{result} } @@ -4491,12 +4545,15 @@ func containerClusterPrivateClusterConfigSuppress(k, old, new string, d *schema. o, n = d.GetChange("private_cluster_config.0.enable_private_nodes") suppressNodes := !o.(bool) && !n.(bool) + // Do not suppress diffs when private_endpoint_subnetwork is configured + _, hasSubnet := d.GetOk("private_cluster_config.0.private_endpoint_subnetwork") + if k == "private_cluster_config.0.enable_private_endpoint" { - return suppressEndpoint + return suppressEndpoint && !hasSubnet } else if k == "private_cluster_config.0.enable_private_nodes" { - return suppressNodes + return suppressNodes && !hasSubnet } else if k == "private_cluster_config.#" { - return suppressEndpoint && suppressNodes + return suppressEndpoint && suppressNodes && !hasSubnet } return false } diff --git a/google/resource_container_cluster_test.go b/google/resource_container_cluster_test.go index 9437e1357a5..aa65b3efa8e 100644 --- a/google/resource_container_cluster_test.go +++ b/google/resource_container_cluster_test.go @@ -662,6 +662,81 @@ func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { }) } +func TestAccContainerCluster_withGcpPublicCidrsAccessEnabledToggle(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withoutGcpPublicCidrsAccessEnabled(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_gcp_public_cidrs_access_enabled", + "master_authorized_networks_config.#", "0"), + ), + }, + { + ResourceName: "google_container_cluster.with_gcp_public_cidrs_access_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerCluster_withGcpPublicCidrsAccessEnabled(clusterName, "false"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_gcp_public_cidrs_access_enabled", + "master_authorized_networks_config.0.gcp_public_cidrs_access_enabled", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_gcp_public_cidrs_access_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerCluster_withGcpPublicCidrsAccessEnabled(clusterName, "true"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_gcp_public_cidrs_access_enabled", + "master_authorized_networks_config.0.gcp_public_cidrs_access_enabled", "true"), + ), + }, + }, + }) +} + +func testAccContainerCluster_withGcpPublicCidrsAccessEnabled(clusterName string, flag string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_gcp_public_cidrs_access_enabled" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 + + master_authorized_networks_config { + gcp_public_cidrs_access_enabled = %s + } +} +`, clusterName, flag) +} + +func testAccContainerCluster_withoutGcpPublicCidrsAccessEnabled(clusterName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_gcp_public_cidrs_access_enabled" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 +} +`, clusterName) +} + func TestAccContainerCluster_regional(t *testing.T) { t.Parallel() @@ -3473,6 +3548,154 @@ resource "google_container_cluster" "regional" { `, clusterName) } +func TestAccContainerCluster_withPrivateEndpointSubnetwork(t *testing.T) { + t.Parallel() + + r := randString(t, 10) + + subnet1Name := fmt.Sprintf("tf-test-container-subnetwork1-%s", r) + subnet1Cidr := "10.0.36.0/24" + + subnet2Name := fmt.Sprintf("tf-test-container-subnetwork2-%s", r) + subnet2Cidr := "10.9.26.0/24" + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", r) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateEndpointSubnetwork(containerNetName, clusterName, subnet1Name, subnet1Cidr, subnet2Name, subnet2Cidr), + }, + { + ResourceName: "google_container_cluster.with_private_endpoint_subnetwork", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + +func testAccContainerCluster_withPrivateEndpointSubnetwork(containerNetName, clusterName, s1Name, s1Cidr, s2Name, s2Cidr string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork1" { + name = "%s" + network = google_compute_network.container_network.name + ip_cidr_range = "%s" + region = "us-central1" + private_ip_google_access = true +} + +resource "google_compute_subnetwork" "container_subnetwork2" { + name = "%s" + network = google_compute_network.container_network.name + ip_cidr_range = "%s" + region = "us-central1" + private_ip_google_access = true +} + +resource "google_container_cluster" "with_private_endpoint_subnetwork" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork1.name + + private_cluster_config { + private_endpoint_subnetwork = google_compute_subnetwork.container_subnetwork2.name + } +} +`, containerNetName, s1Name, s1Cidr, s2Name, s2Cidr, clusterName) +} + +func TestAccContainerCluster_withEnablePrivateEndpointToggle(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withoutEnablePrivateEndpoint(clusterName), + }, + { + ResourceName: "google_container_cluster.with_enable_private_endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerCluster_withEnablePrivateEndpoint(clusterName, "true"), + }, + { + ResourceName: "google_container_cluster.with_enable_private_endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerCluster_withEnablePrivateEndpoint(clusterName, "false"), + }, + { + ResourceName: "google_container_cluster.with_enable_private_endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + +func testAccContainerCluster_withEnablePrivateEndpoint(clusterName string, flag string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_enable_private_endpoint" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 + + master_authorized_networks_config { + gcp_public_cidrs_access_enabled = false + } + + private_cluster_config { + enable_private_endpoint = %s + } +} +`, clusterName, flag) +} + +func testAccContainerCluster_withoutEnablePrivateEndpoint(clusterName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_enable_private_endpoint" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 + + master_authorized_networks_config { + gcp_public_cidrs_access_enabled = false + } +} +`, clusterName) +} + func testAccContainerCluster_regionalWithNodePool(cluster, nodePool string) string { return fmt.Sprintf(` resource "google_container_cluster" "regional" { diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go index 4ff8e85ad0d..7fbe78e573a 100644 --- a/google/resource_container_node_pool.go +++ b/google/resource_container_node_pool.go @@ -299,6 +299,44 @@ var schemaNodePool = map[string]*schema.Schema{ Computed: true, Description: `The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way.`, }, + + "network_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Networking configuration for this NodePool. If specified, it overrides the cluster-level defaults.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "create_pod_range": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.`, + }, + "enable_private_nodes": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether nodes have internal IP addresses only.`, + }, + "pod_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.`, + }, + "pod_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validateIpCidrRange, + Description: `The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.`, + }, + }, + }, + }, } type NodePoolInformation struct { @@ -770,6 +808,7 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, Config: expandNodeConfig(d.Get(prefix + "node_config")), Locations: locations, Version: d.Get(prefix + "version").(string), + NetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), } if v, ok := d.GetOk(prefix + "autoscaling"); ok { @@ -949,6 +988,7 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP "instance_group_urls": igmUrls, "managed_instance_group_urls": managedIgmUrls, "version": np.Version, + "network_config": flattenNodeNetworkConfig(np.NetworkConfig, d, prefix), } if np.Autoscaling != nil { @@ -987,6 +1027,50 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP return nodePool, nil } +func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.ResourceData, prefix string) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required + "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, + "pod_range": c.PodRange, + "enable_private_nodes": c.EnablePrivateNodes, + }) + } + return result +} + +func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { + networkNodeConfigs := v.([]interface{}) + + nnc := &container.NodeNetworkConfig{} + + if len(networkNodeConfigs) == 0 { + return nnc + } + + networkNodeConfig := networkNodeConfigs[0].(map[string]interface{}) + + if v, ok := networkNodeConfig["create_pod_range"]; ok { + nnc.CreatePodRange = v.(bool) + } + + if v, ok := networkNodeConfig["pod_range"]; ok { + nnc.PodRange = v.(string) + } + + if v, ok := networkNodeConfig["pod_ipv4_cidr_block"]; ok { + nnc.PodIpv4CidrBlock = v.(string) + } + + if v, ok := networkNodeConfig["enable_private_nodes"]; ok { + nnc.EnablePrivateNodes = v.(bool) + nnc.ForceSendFields = []string{"EnablePrivateNodes"} + } + + return nnc +} + func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout time.Duration) error { config := meta.(*Config) name := d.Get(prefix + "name").(string) @@ -1451,6 +1535,40 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name) } + if d.HasChange(prefix + "network_config") { + if d.HasChange(prefix + "network_config.0.enable_private_nodes") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + NodeNetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool workload_metadata_config", userAgent, + timeout) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) + } + } + return nil } diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go index 77eb1b30e67..caaac91f42b 100644 --- a/google/resource_container_node_pool_test.go +++ b/google/resource_container_node_pool_test.go @@ -333,6 +333,129 @@ func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { }) } +func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", randString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withNetworkConfig(cluster, np, network), + }, + { + ResourceName: "google_container_node_pool.with_manual_pod_cidr", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_config.0.create_pod_range"}, + }, + { + ResourceName: "google_container_node_pool.with_auto_pod_cidr", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_config.0.create_pod_range"}, + }, + }, + }) +} + +func TestAccContainerNodePool_withEnablePrivateNodesToggle(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", randString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "true"), + }, + { + ResourceName: "google_container_node_pool.with_enable_private_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "false"), + }, + { + ResourceName: "google_container_node_pool.with_enable_private_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + +func testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, flag string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } +} + +resource "google_container_node_pool" "with_enable_private_nodes" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = false + enable_private_nodes = %s + pod_range = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} +`, network, cluster, np, flag) +} + func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { t.Parallel() @@ -1656,6 +1779,95 @@ resource "google_container_node_pool" "with_workload_metadata_config" { `, projectID, cluster, np) } +func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } + + secondary_ip_range { + range_name = "another-pod" + ip_cidr_range = "10.1.32.0/22" + } + + lifecycle { + ignore_changes = [ + # The auto nodepool creates a secondary range which diffs this resource. + secondary_ip_range, + ] + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + release_channel { + channel = "RAPID" + } +} + +resource "google_container_node_pool" "with_manual_pod_cidr" { + name = "%s-manual" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = false + pod_range = google_compute_subnetwork.container_subnetwork.secondary_ip_range[2].range_name + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +resource "google_container_node_pool" "with_auto_pod_cidr" { + name = "%s-auto" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = true + pod_range = "auto-pod-range" + pod_ipv4_cidr_block = "10.2.0.0/20" + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +`, network, cluster, np, np) +} + func makeUpgradeSettings(maxSurge int, maxUnavailable int, strategy string, nodePoolSoakDuration string, batchNodeCount int, batchPercentage float64, batchSoakDuration string) string { if strategy == "BLUE_GREEN" { return fmt.Sprintf(` diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 826a739d36c..7332fbd5f22 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -255,9 +255,6 @@ region are guaranteed to support the same version. manages the default node pool, which isn't recommended to be used with Terraform. Structure is [documented below](#nested_node_config). -* `network_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Configuration for - [Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Structure is [documented below](#nested_network_config) - * `node_pool` - (Optional) List of node pools associated with this cluster. See [google_container_node_pool](container_node_pool.html) for schema. **Warning:** node pools defined inside a cluster can't be changed (or added/removed) after @@ -681,6 +678,9 @@ This block also contains several computed attributes, documented below. * `cidr_blocks` - (Optional) External networks that can access the Kubernetes cluster master through HTTPS. +* `gcp_public_cidrs_access_enabled` - (Optional) Whether Kubernetes master is + accessible via Google Compute Engine Public IPs. + The `master_authorized_networks_config.cidr_blocks` block supports: * `cidr_block` - (Optional) External network that can access Kubernetes master through HTTPS. @@ -841,14 +841,6 @@ linux_node_config { * `node_group` - (Optional) Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). -The `network_config` block supports: - -* `create_pod_range` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified. - -* `pod_ipv4_cidr_block` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. - -* `pod_range` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID. - The `ephemeral_storage_config` block supports: * `local_ssd_count` (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. @@ -978,6 +970,8 @@ In addition, the `private_cluster_config` allows access to the following read-on * `private_endpoint` - The internal IP address of this cluster's master endpoint. +* `private_endpoint_subnetwork` - Subnetwork in cluster's network where master's endpoint will be provisioned. + * `public_endpoint` - The external IP address of this cluster's master endpoint. !> The Google provider is unable to validate certain configurations of diff --git a/website/docs/r/container_node_pool.html.markdown b/website/docs/r/container_node_pool.html.markdown index 3987b62a24b..aec019eb28e 100644 --- a/website/docs/r/container_node_pool.html.markdown +++ b/website/docs/r/container_node_pool.html.markdown @@ -148,8 +148,9 @@ cluster. * `node_config` - (Optional) Parameters used in creating the node pool. See [google_container_cluster](container_cluster.html#nested_node_config) for schema. -* `network_config` - (Optional) The network configuration of the pool. See - [google_container_cluster](container_cluster.html) for schema. +* `network_config` - (Optional) The network configuration of the pool. Such as + configuration for [Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Or enabling private nodes. Structure is + [documented below](#nested_network_config) * `node_count` - (Optional) The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside `autoscaling`. @@ -198,6 +199,16 @@ cluster. * `auto_upgrade` - (Optional) Whether the nodes will be automatically upgraded. +The `network_config` block supports: + +* `create_pod_range` - (Optional) Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified. + +* `enable_private_nodes` - (Optional) Whether nodes have internal IP addresses only. + +* `pod_ipv4_cidr_block` - (Optional) The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. + +* `pod_range` - (Optional) The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID. + The `upgrade_settings` block supports: * `max_surge` - (Optional) The number of additional nodes that can be added to the node pool during