diff --git a/.changelog/6744.txt b/.changelog/6744.txt
new file mode 100644
index 00000000000..a51cdd76b8b
--- /dev/null
+++ b/.changelog/6744.txt
@@ -0,0 +1,6 @@
+```release-note:enhancement
+container: Added `node_pool_defaults.node_config_defaults.logging_variant`, `node_pool.node_config.logging_variant`, and `node_config.logging_variant` to `google_container_cluster`.
+```
+```release-note:enhancement
+container: Added `node_config.logging_variant` to `google_container_node_pool`.
+```
diff --git a/google/node_config.go b/google/node_config.go
index 1f5b7de37aa..a3491a875e0 100644
--- a/google/node_config.go
+++ b/google/node_config.go
@@ -16,6 +16,16 @@ var defaultOauthScopes = []string{
"https://www.googleapis.com/auth/trace.append",
}
+func schemaLoggingVariant() *schema.Schema {
+ return &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: `Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.`,
+ Default: "DEFAULT",
+ ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "MAX_THROUGHPUT"}, false),
+ }
+}
+
func schemaGcfsConfig(forceNew bool) *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
@@ -148,6 +158,8 @@ func schemaNodeConfig() *schema.Schema {
Description: `The number of local SSD disks to be attached to the node.`,
},
+ "logging_variant": schemaLoggingVariant(),
+
"gcfs_config": schemaGcfsConfig(true),
"gvnic": {
@@ -369,6 +381,24 @@ func schemaNodeConfig() *schema.Schema {
}
}
+func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults {
+ configs := configured.([]interface{})
+ if len(configs) == 0 || configs[0] == nil {
+ return nil
+ }
+ config := configs[0].(map[string]interface{})
+
+ nodeConfigDefaults := &container.NodeConfigDefaults{}
+ if variant, ok := config["logging_variant"]; ok {
+ nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{
+ VariantConfig: &container.LoggingVariantConfig{
+ Variant: variant.(string),
+ },
+ }
+ }
+ return nodeConfigDefaults
+}
+
func expandNodeConfig(v interface{}) *container.NodeConfig {
nodeConfigs := v.([]interface{})
nc := &container.NodeConfig{
@@ -424,6 +454,14 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
nc.LocalSsdCount = int64(v.(int))
}
+ if v, ok := nodeConfig["logging_variant"]; ok {
+ nc.LoggingConfig = &container.NodePoolLoggingConfig{
+ VariantConfig: &container.LoggingVariantConfig{
+ Variant: v.(string),
+ },
+ }
+ }
+
if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 {
conf := v.([]interface{})[0].(map[string]interface{})
nc.GcfsConfig = &container.GcfsConfig{
@@ -565,6 +603,20 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf
return wmc
}
+func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} {
+ result := make([]map[string]interface{}, 0, 1)
+
+ if c == nil {
+ return result
+ }
+
+ result = append(result, map[string]interface{}{})
+
+ result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig)
+
+ return result
+}
+
func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
config := make([]map[string]interface{}, 0, 1)
@@ -578,6 +630,7 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
"disk_type": c.DiskType,
"guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators),
"local_ssd_count": c.LocalSsdCount,
+ "logging_variant": flattenLoggingVariant(c.LoggingConfig),
"gcfs_config": flattenGcfsConfig(c.GcfsConfig),
"gvnic": flattenGvnic(c.Gvnic),
"reservation_affinity": flattenGKEReservationAffinity(c.ReservationAffinity),
@@ -635,6 +688,14 @@ func flattenShieldedInstanceConfig(c *container.ShieldedInstanceConfig) []map[st
return result
}
+func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string {
+ variant := "DEFAULT"
+ if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" {
+ variant = c.VariantConfig.Variant
+ }
+ return variant
+}
+
func flattenGcfsConfig(c *container.GcfsConfig) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go
index af30533f168..a48d766370b 100644
--- a/google/resource_container_cluster.go
+++ b/google/resource_container_cluster.go
@@ -89,6 +89,33 @@ func clusterSchemaNodeConfig() *schema.Schema {
return nodeConfigSch
}
+// Defines default nodel pool settings for the entire cluster. These settings are
+// overridden if specified on the specific NodePool object.
+func clusterSchemaNodePoolDefaults() *schema.Schema {
+ return &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Computed: true,
+ Description: `The default nodel pool settings for the entire cluster.`,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "node_config_defaults": {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: `Subset of NodeConfig message that has defaults.`,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "logging_variant": schemaLoggingVariant(),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
func rfc5545RecurrenceDiffSuppress(k, o, n string, d *schema.ResourceData) bool {
// This diff gets applied in the cloud console if you specify
// "FREQ=DAILY" in your config and add a maintenance exclusion.
@@ -959,6 +986,8 @@ func resourceContainerCluster() *schema.Resource {
ConflictsWith: []string{"enable_autopilot"},
},
+ "node_pool_defaults": clusterSchemaNodePoolDefaults(),
+
"node_version": {
Type: schema.TypeString,
Optional: true,
@@ -1613,6 +1642,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.NodeConfig = expandNodeConfig([]interface{}{})
}
+ if v, ok := d.GetOk("node_pool_defaults"); ok {
+ cluster.NodePoolDefaults = expandNodePoolDefaults(v)
+ }
+
if v, ok := d.GetOk("node_config"); ok {
cluster.NodeConfig = expandNodeConfig(v)
}
@@ -2008,6 +2041,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
return err
}
+ if err := d.Set("node_pool_defaults", flattenNodePoolDefaults(cluster.NodePoolDefaults)); err != nil {
+ return err
+ }
+
return nil
}
@@ -2909,6 +2946,29 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id())
}
+ if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") {
+ if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok {
+ loggingVariant := v.(string)
+ req := &container.UpdateClusterRequest{
+ Update: &container.ClusterUpdate{
+ DesiredNodePoolLoggingConfig: &container.NodePoolLoggingConfig{
+ VariantConfig: &container.LoggingVariantConfig{
+ Variant: loggingVariant,
+ },
+ },
+ },
+ }
+
+ updateF := updateFunc(req, "updating GKE cluster desired node pool logging configuration defaults.")
+ // Call update serially.
+ if err := lockedCall(lockKey, updateF); err != nil {
+ return err
+ }
+
+ log.Printf("[INFO] GKE cluster %s node pool logging configuration defaults have been updated", d.Id())
+ }
+ }
+
d.Partial(false)
if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil {
@@ -3749,6 +3809,32 @@ func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *co
}
}
+func expandNodePoolDefaults(configured interface{}) *container.NodePoolDefaults {
+ l, ok := configured.([]interface{})
+ if !ok || l == nil || len(l) == 0 || l[0] == nil {
+ return nil
+ }
+ nodePoolDefaults := &container.NodePoolDefaults{}
+ config := l[0].(map[string]interface{})
+ if v, ok := config["node_config_defaults"]; ok && len(v.([]interface{})) > 0 {
+ nodePoolDefaults.NodeConfigDefaults = expandNodeConfigDefaults(v)
+ }
+ return nodePoolDefaults
+}
+
+func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interface{} {
+ if c == nil {
+ return nil
+ }
+
+ result := make(map[string]interface{})
+ if c.NodeConfigDefaults != nil {
+ result["node_config_defaults"] = flattenNodeConfigDefaults(c.NodeConfigDefaults)
+ }
+
+ return []map[string]interface{}{result}
+}
+
func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} {
if c == nil {
return nil
diff --git a/google/resource_container_cluster_test.go b/google/resource_container_cluster_test.go
index 5ba8cc1132c..9437e1357a5 100644
--- a/google/resource_container_cluster_test.go
+++ b/google/resource_container_cluster_test.go
@@ -923,6 +923,83 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
})
}
+func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) {
+ t.Parallel()
+ clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, "MAX_THROUGHPUT"),
+ },
+ {
+ ResourceName: "google_container_cluster.with_logging_variant_in_node_config",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccContainerCluster_withLoggingVariantInNodePool(t *testing.T) {
+ t.Parallel()
+ clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
+ nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, "MAX_THROUGHPUT"),
+ },
+ {
+ ResourceName: "google_container_cluster.with_logging_variant_in_node_pool",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccContainerCluster_withLoggingVariantUpdates(t *testing.T) {
+ t.Parallel()
+ clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
+ },
+ {
+ ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "MAX_THROUGHPUT"),
+ },
+ {
+ ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
+ },
+ {
+ ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) {
t.Parallel()
@@ -3572,6 +3649,53 @@ resource "google_container_cluster" "with_node_config" {
`, clusterName)
}
+func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant string) string {
+ return fmt.Sprintf(`
+resource "google_container_cluster" "with_logging_variant_in_node_config" {
+ name = "%s"
+ location = "us-central1-f"
+ initial_node_count = 1
+
+ node_config {
+ logging_variant = "%s"
+ }
+}
+`, clusterName, loggingVariant)
+}
+
+func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant string) string {
+ return fmt.Sprintf(`
+resource "google_container_cluster" "with_logging_variant_in_node_pool" {
+ name = "%s"
+ location = "us-central1-f"
+
+ node_pool {
+ name = "%s"
+ initial_node_count = 1
+ node_config {
+ logging_variant = "%s"
+ }
+ }
+}
+`, clusterName, nodePoolName, loggingVariant)
+}
+
+func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant string) string {
+ return fmt.Sprintf(`
+resource "google_container_cluster" "with_logging_variant_node_pool_default" {
+ name = "%s"
+ location = "us-central1-f"
+ initial_node_count = 1
+
+ node_pool_defaults {
+ node_config_defaults {
+ logging_variant = "%s"
+ }
+ }
+}
+`, clusterName, loggingVariant)
+}
+
func testAccContainerCluster_withNodeConfigUpdate(clusterName string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_config" {
diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go
index ba5257b3968..6778a8a1dac 100644
--- a/google/resource_container_node_pool.go
+++ b/google/resource_container_node_pool.go
@@ -1050,6 +1050,45 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
if d.HasChange(prefix + "node_config") {
+ if d.HasChange(prefix + "node_config.0.logging_variant") {
+ if v, ok := d.GetOk(prefix + "node_config.0.logging_variant"); ok {
+ loggingVariant := v.(string)
+ req := &container.UpdateNodePoolRequest{
+ Name: name,
+ LoggingConfig: &container.NodePoolLoggingConfig{
+ VariantConfig: &container.LoggingVariantConfig{
+ Variant: loggingVariant,
+ },
+ },
+ }
+
+ updateF := func() error {
+ clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
+ if config.UserProjectOverride {
+ clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
+ }
+ op, err := clusterNodePoolsUpdateCall.Do()
+ if err != nil {
+ return err
+ }
+
+ // Wait until it's updated
+ return containerOperationWait(config, op,
+ nodePoolInfo.project,
+ nodePoolInfo.location,
+ "updating GKE node pool logging_variant", userAgent,
+ timeout)
+ }
+
+ // Call update serially.
+ if err := lockedCall(lockKey, updateF); err != nil {
+ return err
+ }
+
+ log.Printf("[INFO] Updated logging_variant for node pool %s", name)
+ }
+ }
+
if d.HasChange(prefix + "node_config.0.tags") {
req := &container.UpdateNodePoolRequest{
Name: name,
diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go
index cf4c32720d3..272f0e1c69e 100644
--- a/google/resource_container_node_pool_test.go
+++ b/google/resource_container_node_pool_test.go
@@ -152,6 +152,45 @@ func TestAccContainerNodePool_noName(t *testing.T) {
})
}
+func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) {
+ t.Parallel()
+
+ cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
+ nodePool := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))
+
+ vcrTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT"),
+ },
+ {
+ ResourceName: "google_container_node_pool.with_logging_variant",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "MAX_THROUGHPUT"),
+ },
+ {
+ ResourceName: "google_container_node_pool.with_logging_variant",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ {
+ Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT"),
+ },
+ {
+ ResourceName: "google_container_node_pool.with_logging_variant",
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
t.Parallel()
@@ -955,6 +994,26 @@ resource "google_container_node_pool" "np" {
`, cluster, np)
}
+func testAccContainerNodePool_withLoggingVariant(cluster, np, loggingVariant string) string {
+ return fmt.Sprintf(`
+resource "google_container_cluster" "with_logging_variant" {
+ name = "%s"
+ location = "us-central1-a"
+ initial_node_count = 1
+}
+
+resource "google_container_node_pool" "with_logging_variant" {
+ name = "%s"
+ location = "us-central1-a"
+ cluster = google_container_cluster.with_logging_variant.name
+ initial_node_count = 1
+ node_config {
+ logging_variant = "%s"
+ }
+}
+`, cluster, np, loggingVariant)
+}
+
func testAccContainerNodePool_basicWithClusterId(cluster, np string) string {
return fmt.Sprintf(`
provider "google" {
diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown
index 939f8a7d6d9..e9167496d12 100644
--- a/website/docs/r/container_cluster.html.markdown
+++ b/website/docs/r/container_cluster.html.markdown
@@ -269,7 +269,7 @@ region are guaranteed to support the same version.
[autopilot](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison) clusters and
[node auto-provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)-enabled clusters. Structure is [documented below](#nested_node_pool_auto_config).
-* `node_pool_defaults` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Default NodePool settings for the entire cluster. These settings are overridden if specified on the specific NodePool object. Structure is [documented below](#nested_node_pool_defaults).
+* `node_pool_defaults` - (Optional) Default NodePool settings for the entire cluster. These settings are overridden if specified on the specific NodePool object. Structure is [documented below](#nested_node_pool_defaults).
* `node_version` - (Optional) The Kubernetes version on the nodes. Must either be unset
or set to the same value as `min_master_version` on create. Defaults to the default
@@ -710,6 +710,8 @@ ephemeral_storage_config {
}
```
+* `logging_variant` (Optional) Parameter for specifying the type of logging agent used in a node pool. This will override any [cluster-wide default value](#nested_node_pool_defaults). Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.
+
* `gcfs_config` - (Optional) Parameters for the Google Container Filesystem (GCFS).
If unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `image_type = "COS_CONTAINERD"` and `node_version` from GKE versions 1.19 or later to use it.
For GKE versions 1.19, 1.20, and 1.21, the recommended minimum `node_version` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively.
@@ -901,11 +903,13 @@ node_pool_auto_config {
```
The `node_pool_defaults` block supports:
-* `node_config_defaults` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) - Subset of NodeConfig message that has defaults.
+* `node_config_defaults` (Optional) - Subset of NodeConfig message that has defaults.
The `node_config_defaults` block supports:
-* `gcfs_config` (Optional) The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is [documented below](#nested_gcfs_config).
+* `logging_variant` (Optional) The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.
+
+* `gcfs_config` (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is [documented below](#nested_gcfs_config).
The `notification_config` block supports: