Skip to content

Commit

Permalink
Add a logging_variant field to GKE node pools and to node pool defa…
Browse files Browse the repository at this point in the history
…ults for GKE clusters. (#6744) (#13049)

This PR implements the feature request from [Add GKE logging variant field for increasing log agent throughput #12667](#12667).

By adding a logging_variant field within the node_pool_defaults, GKE users will be able to select a cluster-wide default value for the logging agent of the node pools in a cluster. For example, by specifying
```terraform
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
  name               = "example-cluster"
  location           = "us-central1-f"
  initial_node_count = 1

  node_pool_defaults {
    node_config_defaults {
      logging_variant = "MAX_THROUGHPUT"
    }
  }
}
```
every newly created node pool in the cluster will have the max throughput logging agent unless this is explicitly overridden at the node pool level (see the [GKE docs](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#high_throughput_for_all_nodes_in_a_cluster) for more information).

GKE users will also be able to select a logging variant at the node pool level. For example, by specifying
```terraform
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
  name               = "example-cluster"
  location           = "us-central1-f"
  initial_node_count = 1

  node_pool_defaults {
    node_config_defaults {
      logging_variant = "DEFAULT"
    }
  }
}
resource "google_container_node_pool" "with_high_throughput_logging_variant" {
  name    = "example-node-pool-0"
  cluster = google_container_cluster.with_logging_variant_node_pool_default.name
}
resource "google_container_node_pool" "with_high_throughput_logging_variant" {
  name    = "example-node-pool-1"
  cluster = google_container_cluster.with_logging_variant_node_pool_default.name
  node_config {
    logging_variant = "MAX_THROUGHPUT"
  }
}
```
example-node-pool-0 (as well as the default node pool) will have the default logging agent (see the [GKE docs](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#high_throughput_for_all_nodes_in_a_cluster) for more information), but example-node-pool-1 will have the max throughput agent.

Signed-off-by: Modular Magician <magic-modules@google.com>

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician committed Nov 16, 2022
1 parent 7c98257 commit eca88a2
Show file tree
Hide file tree
Showing 7 changed files with 382 additions and 3 deletions.
6 changes: 6 additions & 0 deletions .changelog/6744.txt
@@ -0,0 +1,6 @@
```release-note:enhancement
container: Added `node_pool_defaults.node_config_defaults.logging_variant`, `node_pool.node_config.logging_variant`, and `node_config.logging_variant` to `google_container_cluster`.
```
```release-note:enhancement
container: Added `node_config.logging_variant` to `google_container_node_pool`.
```
61 changes: 61 additions & 0 deletions google/node_config.go
Expand Up @@ -16,6 +16,16 @@ var defaultOauthScopes = []string{
"https://www.googleapis.com/auth/trace.append",
}

func schemaLoggingVariant() *schema.Schema {
return &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: `Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.`,
Default: "DEFAULT",
ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "MAX_THROUGHPUT"}, false),
}
}

func schemaGcfsConfig(forceNew bool) *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Expand Down Expand Up @@ -148,6 +158,8 @@ func schemaNodeConfig() *schema.Schema {
Description: `The number of local SSD disks to be attached to the node.`,
},

"logging_variant": schemaLoggingVariant(),

"gcfs_config": schemaGcfsConfig(true),

"gvnic": {
Expand Down Expand Up @@ -369,6 +381,24 @@ func schemaNodeConfig() *schema.Schema {
}
}

func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults {
configs := configured.([]interface{})
if len(configs) == 0 || configs[0] == nil {
return nil
}
config := configs[0].(map[string]interface{})

nodeConfigDefaults := &container.NodeConfigDefaults{}
if variant, ok := config["logging_variant"]; ok {
nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{
VariantConfig: &container.LoggingVariantConfig{
Variant: variant.(string),
},
}
}
return nodeConfigDefaults
}

func expandNodeConfig(v interface{}) *container.NodeConfig {
nodeConfigs := v.([]interface{})
nc := &container.NodeConfig{
Expand Down Expand Up @@ -424,6 +454,14 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
nc.LocalSsdCount = int64(v.(int))
}

if v, ok := nodeConfig["logging_variant"]; ok {
nc.LoggingConfig = &container.NodePoolLoggingConfig{
VariantConfig: &container.LoggingVariantConfig{
Variant: v.(string),
},
}
}

if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 {
conf := v.([]interface{})[0].(map[string]interface{})
nc.GcfsConfig = &container.GcfsConfig{
Expand Down Expand Up @@ -565,6 +603,20 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf
return wmc
}

func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} {
result := make([]map[string]interface{}, 0, 1)

if c == nil {
return result
}

result = append(result, map[string]interface{}{})

result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig)

return result
}

func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
config := make([]map[string]interface{}, 0, 1)

Expand All @@ -578,6 +630,7 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
"disk_type": c.DiskType,
"guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators),
"local_ssd_count": c.LocalSsdCount,
"logging_variant": flattenLoggingVariant(c.LoggingConfig),
"gcfs_config": flattenGcfsConfig(c.GcfsConfig),
"gvnic": flattenGvnic(c.Gvnic),
"reservation_affinity": flattenGKEReservationAffinity(c.ReservationAffinity),
Expand Down Expand Up @@ -635,6 +688,14 @@ func flattenShieldedInstanceConfig(c *container.ShieldedInstanceConfig) []map[st
return result
}

func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string {
variant := "DEFAULT"
if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" {
variant = c.VariantConfig.Variant
}
return variant
}

func flattenGcfsConfig(c *container.GcfsConfig) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
Expand Down
86 changes: 86 additions & 0 deletions google/resource_container_cluster.go
Expand Up @@ -89,6 +89,33 @@ func clusterSchemaNodeConfig() *schema.Schema {
return nodeConfigSch
}

// Defines default nodel pool settings for the entire cluster. These settings are
// overridden if specified on the specific NodePool object.
func clusterSchemaNodePoolDefaults() *schema.Schema {
return &schema.Schema{
Type: schema.TypeList,
Optional: true,
Computed: true,
Description: `The default nodel pool settings for the entire cluster.`,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"node_config_defaults": {
Type: schema.TypeList,
Optional: true,
Description: `Subset of NodeConfig message that has defaults.`,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"logging_variant": schemaLoggingVariant(),
},
},
},
},
},
}
}

func rfc5545RecurrenceDiffSuppress(k, o, n string, d *schema.ResourceData) bool {
// This diff gets applied in the cloud console if you specify
// "FREQ=DAILY" in your config and add a maintenance exclusion.
Expand Down Expand Up @@ -959,6 +986,8 @@ func resourceContainerCluster() *schema.Resource {
ConflictsWith: []string{"enable_autopilot"},
},

"node_pool_defaults": clusterSchemaNodePoolDefaults(),

"node_version": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -1613,6 +1642,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.NodeConfig = expandNodeConfig([]interface{}{})
}

if v, ok := d.GetOk("node_pool_defaults"); ok {
cluster.NodePoolDefaults = expandNodePoolDefaults(v)
}

if v, ok := d.GetOk("node_config"); ok {
cluster.NodeConfig = expandNodeConfig(v)
}
Expand Down Expand Up @@ -2008,6 +2041,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
return err
}

if err := d.Set("node_pool_defaults", flattenNodePoolDefaults(cluster.NodePoolDefaults)); err != nil {
return err
}

return nil
}

Expand Down Expand Up @@ -2909,6 +2946,29 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id())
}

if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") {
if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok {
loggingVariant := v.(string)
req := &container.UpdateClusterRequest{
Update: &container.ClusterUpdate{
DesiredNodePoolLoggingConfig: &container.NodePoolLoggingConfig{
VariantConfig: &container.LoggingVariantConfig{
Variant: loggingVariant,
},
},
},
}

updateF := updateFunc(req, "updating GKE cluster desired node pool logging configuration defaults.")
// Call update serially.
if err := lockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] GKE cluster %s node pool logging configuration defaults have been updated", d.Id())
}
}

d.Partial(false)

if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil {
Expand Down Expand Up @@ -3749,6 +3809,32 @@ func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *co
}
}

func expandNodePoolDefaults(configured interface{}) *container.NodePoolDefaults {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
return nil
}
nodePoolDefaults := &container.NodePoolDefaults{}
config := l[0].(map[string]interface{})
if v, ok := config["node_config_defaults"]; ok && len(v.([]interface{})) > 0 {
nodePoolDefaults.NodeConfigDefaults = expandNodeConfigDefaults(v)
}
return nodePoolDefaults
}

func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interface{} {
if c == nil {
return nil
}

result := make(map[string]interface{})
if c.NodeConfigDefaults != nil {
result["node_config_defaults"] = flattenNodeConfigDefaults(c.NodeConfigDefaults)
}

return []map[string]interface{}{result}
}

func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} {
if c == nil {
return nil
Expand Down
124 changes: 124 additions & 0 deletions google/resource_container_cluster_test.go
Expand Up @@ -923,6 +923,83 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
})
}

func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) {
t.Parallel()
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_in_node_config",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerCluster_withLoggingVariantInNodePool(t *testing.T) {
t.Parallel()
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_in_node_pool",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerCluster_withLoggingVariantUpdates(t *testing.T) {
t.Parallel()
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -3572,6 +3649,53 @@ resource "google_container_cluster" "with_node_config" {
`, clusterName)
}

func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant_in_node_config" {
name = "%s"
location = "us-central1-f"
initial_node_count = 1
node_config {
logging_variant = "%s"
}
}
`, clusterName, loggingVariant)
}

func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant_in_node_pool" {
name = "%s"
location = "us-central1-f"
node_pool {
name = "%s"
initial_node_count = 1
node_config {
logging_variant = "%s"
}
}
}
`, clusterName, nodePoolName, loggingVariant)
}

func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
name = "%s"
location = "us-central1-f"
initial_node_count = 1
node_pool_defaults {
node_config_defaults {
logging_variant = "%s"
}
}
}
`, clusterName, loggingVariant)
}

func testAccContainerCluster_withNodeConfigUpdate(clusterName string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_config" {
Expand Down

0 comments on commit eca88a2

Please sign in to comment.