diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index a2736cbd8dff..9b90c792b218 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -164,6 +164,23 @@ func SchemaHDInsightsExternalMetastore() *schema.Schema { } } +func SchemaHDInsightsExternalMetastores() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hive": SchemaHDInsightsExternalMetastore(), + + "oozie": SchemaHDInsightsExternalMetastore(), + + "ambari": SchemaHDInsightsExternalMetastore(), + }, + }, + } +} + func ExpandHDInsightsConfigurations(input []interface{}) map[string]interface{} { vs := input[0].(map[string]interface{}) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 5c469ea317ca..9739ce076a5a 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -92,20 +92,7 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "gateway": azure.SchemaHDInsightsGateway(), - "metastores": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hive": azure.SchemaHDInsightsExternalMetastore(), - - "oozie": azure.SchemaHDInsightsExternalMetastore(), - - "ambari": azure.SchemaHDInsightsExternalMetastore(), - }, - }, - }, + "metastores": azure.SchemaHDInsightsExternalMetastores(), "storage_account": azure.SchemaHDInsightsStorageAccounts(), diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index be83294e4972..7e1812b7e1a2 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -87,6 +87,8 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { "gateway": azure.SchemaHDInsightsGateway(), + "metastores": azure.SchemaHDInsightsExternalMetastores(), + "storage_account": azure.SchemaHDInsightsStorageAccounts(), "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), @@ -138,7 +140,13 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa componentVersions := expandHDInsightHBaseComponentVersion(componentVersionsRaw) gatewayRaw := d.Get("gateway").([]interface{}) - gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) + configurations := azure.ExpandHDInsightsConfigurations(gatewayRaw) + + metastoresRaw := d.Get("metastores").([]interface{}) + metastores := expandHDInsightsMetastore(metastoresRaw) + for k, v := range metastores { + configurations[k] = v + } storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) @@ -181,7 +189,7 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa ClusterDefinition: &hdinsight.ClusterDefinition{ Kind: utils.String("HBase"), ComponentVersion: componentVersions, - Configurations: gateway, + Configurations: configurations, }, StorageProfile: &hdinsight.StorageProfile{ Storageaccounts: storageAccounts, @@ -241,11 +249,17 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error retrieving HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - configuration, err := configurationsClient.Get(ctx, resourceGroup, name, "gateway") + // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation + configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving Configuration for HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } + gateway, exists := configurations.Configurations["gateway"] + if !exists { + return fmt.Errorf("Error retrieving gateway for HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + d.Set("name", name) d.Set("resource_group_name", resourceGroup) if location := resp.Location; location != nil { @@ -263,9 +277,11 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error flattening `component_version`: %+v", err) } - if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(configuration.Value)); err != nil { + if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { return fmt.Errorf("Error flattening `gateway`: %+v", err) } + + flattenHDInsightsMetastores(d, configurations.Configurations) } hbaseRoles := hdInsightRoleDefinition{ diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index 6a4a909786d7..07f9d4507c1e 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -87,6 +87,8 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { "gateway": azure.SchemaHDInsightsGateway(), + "metastores": azure.SchemaHDInsightsExternalMetastores(), + "storage_account": azure.SchemaHDInsightsStorageAccounts(), "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), @@ -138,7 +140,13 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m componentVersions := expandHDInsightInteractiveQueryComponentVersion(componentVersionsRaw) gatewayRaw := d.Get("gateway").([]interface{}) - gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) + configurations := azure.ExpandHDInsightsConfigurations(gatewayRaw) + + metastoresRaw := d.Get("metastores").([]interface{}) + metastores := expandHDInsightsMetastore(metastoresRaw) + for k, v := range metastores { + configurations[k] = v + } storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) @@ -181,7 +189,7 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m ClusterDefinition: &hdinsight.ClusterDefinition{ Kind: utils.String("INTERACTIVEHIVE"), ComponentVersion: componentVersions, - Configurations: gateway, + Configurations: configurations, }, StorageProfile: &hdinsight.StorageProfile{ Storageaccounts: storageAccounts, @@ -241,11 +249,17 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met return fmt.Errorf("Error retrieving HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - configuration, err := configurationsClient.Get(ctx, resourceGroup, name, "gateway") + // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation + configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving Configuration for HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } + gateway, exists := configurations.Configurations["gateway"] + if !exists { + return fmt.Errorf("Error retrieving gateway for HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + d.Set("name", name) d.Set("resource_group_name", resourceGroup) if location := resp.Location; location != nil { @@ -263,9 +277,11 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met return fmt.Errorf("Error flattening `component_version`: %+v", err) } - if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(configuration.Value)); err != nil { + if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { return fmt.Errorf("Error flattening `gateway`: %+v", err) } + + flattenHDInsightsMetastores(d, configurations.Configurations) } interactiveQueryRoles := hdInsightRoleDefinition{ diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index d8f8ccb250cf..1c7d1974305d 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -71,6 +71,8 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { "tls_min_version": azure.SchemaHDInsightTls(), + "metastores": azure.SchemaHDInsightsExternalMetastores(), + "component_version": { Type: schema.TypeList, Required: true, @@ -139,7 +141,13 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa componentVersions := expandHDInsightKafkaComponentVersion(componentVersionsRaw) gatewayRaw := d.Get("gateway").([]interface{}) - gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) + configurations := azure.ExpandHDInsightsConfigurations(gatewayRaw) + + metastoresRaw := d.Get("metastores").([]interface{}) + metastores := expandHDInsightsMetastore(metastoresRaw) + for k, v := range metastores { + configurations[k] = v + } storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) @@ -182,7 +190,7 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa ClusterDefinition: &hdinsight.ClusterDefinition{ Kind: utils.String("Kafka"), ComponentVersion: componentVersions, - Configurations: gateway, + Configurations: configurations, }, StorageProfile: &hdinsight.StorageProfile{ Storageaccounts: storageAccounts, @@ -242,9 +250,15 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error retrieving HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - configuration, err := configurationsClient.Get(ctx, resourceGroup, name, "gateway") + // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation + configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error retrieving Configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + gateway, exists := configurations.Configurations["gateway"] + if !exists { + return fmt.Errorf("Error retrieving gateway for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -264,9 +278,11 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error flattening `component_version`: %+v", err) } - if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(configuration.Value)); err != nil { + if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { return fmt.Errorf("Error flattening `gateway`: %+v", err) } + + flattenHDInsightsMetastores(d, configurations.Configurations) } kafkaRoles := hdInsightRoleDefinition{ diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 6303ed1ed511..38e3d078e4e5 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -87,6 +87,8 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { "gateway": azure.SchemaHDInsightsGateway(), + "metastores": azure.SchemaHDInsightsExternalMetastores(), + "storage_account": azure.SchemaHDInsightsStorageAccounts(), "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), @@ -138,7 +140,13 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa componentVersions := expandHDInsightSparkComponentVersion(componentVersionsRaw) gatewayRaw := d.Get("gateway").([]interface{}) - gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) + configurations := azure.ExpandHDInsightsConfigurations(gatewayRaw) + + metastoresRaw := d.Get("metastores").([]interface{}) + metastores := expandHDInsightsMetastore(metastoresRaw) + for k, v := range metastores { + configurations[k] = v + } storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) @@ -181,7 +189,7 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa ClusterDefinition: &hdinsight.ClusterDefinition{ Kind: utils.String("Spark"), ComponentVersion: componentVersions, - Configurations: gateway, + Configurations: configurations, }, StorageProfile: &hdinsight.StorageProfile{ Storageaccounts: storageAccounts, @@ -241,11 +249,17 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error retrieving HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - configuration, err := configurationsClient.Get(ctx, resourceGroup, name, "gateway") + // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation + configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving Configuration for HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } + gateway, exists := configurations.Configurations["gateway"] + if !exists { + return fmt.Errorf("Error retrieving gateway for HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + d.Set("name", name) d.Set("resource_group_name", resourceGroup) if location := resp.Location; location != nil { @@ -263,9 +277,11 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error flattening `component_version`: %+v", err) } - if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(configuration.Value)); err != nil { + if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { return fmt.Errorf("Error flattening `gateway`: %+v", err) } + + flattenHDInsightsMetastores(d, configurations.Configurations) } sparkRoles := hdInsightRoleDefinition{ diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index 9f66197d27c9..bffffa1c149a 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -87,6 +87,8 @@ func resourceArmHDInsightStormCluster() *schema.Resource { "gateway": azure.SchemaHDInsightsGateway(), + "metastores": azure.SchemaHDInsightsExternalMetastores(), + "storage_account": azure.SchemaHDInsightsStorageAccounts(), "roles": { @@ -136,7 +138,13 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa componentVersions := expandHDInsightStormComponentVersion(componentVersionsRaw) gatewayRaw := d.Get("gateway").([]interface{}) - gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) + configurations := azure.ExpandHDInsightsConfigurations(gatewayRaw) + + metastoresRaw := d.Get("metastores").([]interface{}) + metastores := expandHDInsightsMetastore(metastoresRaw) + for k, v := range metastores { + configurations[k] = v + } storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) @@ -178,7 +186,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa ClusterDefinition: &hdinsight.ClusterDefinition{ Kind: utils.String("Storm"), ComponentVersion: componentVersions, - Configurations: gateway, + Configurations: configurations, }, StorageProfile: &hdinsight.StorageProfile{ Storageaccounts: storageAccounts, @@ -238,11 +246,17 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error retrieving HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - configuration, err := configurationsClient.Get(ctx, resourceGroup, name, "gateway") + // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation + configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving Configuration for HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } + gateway, exists := configurations.Configurations["gateway"] + if !exists { + return fmt.Errorf("Error retrieving gateway for HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + d.Set("name", name) d.Set("resource_group_name", resourceGroup) if location := resp.Location; location != nil { @@ -260,9 +274,11 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface return fmt.Errorf("Error flattening `component_version`: %+v", err) } - if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(configuration.Value)); err != nil { + if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { return fmt.Errorf("Error flattening `gateway`: %+v", err) } + + flattenHDInsightsMetastores(d, configurations.Configurations) } stormRoles := hdInsightRoleDefinition{ diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go index 19ac97b8ae83..85e50592c7b8 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go @@ -226,6 +226,101 @@ func TestAccAzureRMHDInsightHBaseCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightHBaseCluster_allMetastores(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + +func TestAccAzureRMHDInsightHBaseCluster_hiveMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + }, + }) +} + +func TestAccAzureRMHDInsightHBaseCluster_updateMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + { + Config: testAccAzureRMHDInsightHBaseCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + func testAccAzureRMHDInsightHBaseCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHBaseCluster_template(data) return fmt.Sprintf(` @@ -793,3 +888,187 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightHBaseCluster_allMetastores(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHBaseCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "oozie" { + name = "oozie" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "ambari" { + name = "ambari" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_hbase_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + hbase = "1.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + oozie { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.oozie.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + ambari { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.ambari.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} + +func testAccAzureRMHDInsightHBaseCluster_hiveMetastore(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHBaseCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_hbase_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + hbase = "1.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go index ed26c92c393a..947a8d1134c9 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go @@ -226,6 +226,101 @@ func TestAccAzureRMHDInsightInteractiveQueryCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightInteractiveQueryCluster_allMetastores(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + +func TestAccAzureRMHDInsightInteractiveQueryCluster_hiveMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + }, + }) +} + +func TestAccAzureRMHDInsightInteractiveQueryCluster_updateMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + func testAccAzureRMHDInsightInteractiveQueryCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) return fmt.Sprintf(` @@ -793,3 +888,187 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightInteractiveQueryCluster_allMetastores(data acceptance.TestData) string { + template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "oozie" { + name = "oozie" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "ambari" { + name = "ambari" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_interactive_query_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + interactive_hive = "2.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + oozie { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.oozie.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + ambari { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.ambari.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} + +func testAccAzureRMHDInsightInteractiveQueryCluster_hiveMetastore(data acceptance.TestData) string { + template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_interactive_query_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + interactive_hive = "2.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go index be9e082c04c3..c4433ca0f86c 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go @@ -229,6 +229,101 @@ func TestAccAzureRMHDInsightKafkaCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightKafkaCluster_allMetastores(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + +func TestAccAzureRMHDInsightKafkaCluster_hiveMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + }, + }) +} + +func TestAccAzureRMHDInsightKafkaCluster_updateMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + { + Config: testAccAzureRMHDInsightKafkaCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + func testAccAzureRMHDInsightKafkaCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightKafkaCluster_template(data) return fmt.Sprintf(` @@ -804,3 +899,189 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightKafkaCluster_allMetastores(data acceptance.TestData) string { + template := testAccAzureRMHDInsightKafkaCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "oozie" { + name = "oozie" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "ambari" { + name = "ambari" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_kafka_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + kafka = "1.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + number_of_disks_per_node = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + oozie { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.oozie.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + ambari { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.ambari.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} + +func testAccAzureRMHDInsightKafkaCluster_hiveMetastore(data acceptance.TestData) string { + template := testAccAzureRMHDInsightKafkaCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_kafka_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + kafka = "1.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + number_of_disks_per_node = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go index 83b76fc41748..1be3c4c8742e 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go @@ -226,6 +226,101 @@ func TestAccAzureRMHDInsightSparkCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightSparkCluster_allMetastores(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + +func TestAccAzureRMHDInsightSparkCluster_hiveMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + }, + }) +} + +func TestAccAzureRMHDInsightSparkCluster_updateMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + { + Config: testAccAzureRMHDInsightSparkCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + func testAccAzureRMHDInsightSparkCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightSparkCluster_template(data) return fmt.Sprintf(` @@ -793,3 +888,187 @@ resource "azurerm_hdinsight_spark_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightSparkCluster_allMetastores(data acceptance.TestData) string { + template := testAccAzureRMHDInsightSparkCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "oozie" { + name = "oozie" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "ambari" { + name = "ambari" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_spark_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + spark = "2.3" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + oozie { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.oozie.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + ambari { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.ambari.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} + +func testAccAzureRMHDInsightSparkCluster_hiveMetastore(data acceptance.TestData) string { + template := testAccAzureRMHDInsightSparkCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_spark_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + spark = "2.3" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go index af62f1de8d98..acbae46f87a9 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go @@ -201,6 +201,101 @@ func TestAccAzureRMHDInsightStormCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightStormCluster_allMetastores(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightStormCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + +func TestAccAzureRMHDInsightStormCluster_hiveMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightStormCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + }, + }) +} + +func TestAccAzureRMHDInsightStormCluster_updateMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightStormCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + { + Config: testAccAzureRMHDInsightStormCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + func testAccAzureRMHDInsightStormCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightStormCluster_template(data) return fmt.Sprintf(` @@ -670,3 +765,187 @@ resource "azurerm_hdinsight_storm_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightStormCluster_allMetastores(data acceptance.TestData) string { + template := testAccAzureRMHDInsightStormCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "oozie" { + name = "oozie" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "ambari" { + name = "ambari" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_storm_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + storm = "1.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + oozie { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.oozie.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + ambari { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.ambari.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} + +func testAccAzureRMHDInsightStormCluster_hiveMetastore(data acceptance.TestData) string { + template := testAccAzureRMHDInsightStormCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_storm_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + storm = "1.1" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/client.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/client.go deleted file mode 100644 index 3fb087bf8c8d..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/client.go +++ /dev/null @@ -1,25 +0,0 @@ -package paths - -import ( - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" -) - -// Client is the base client for Data Lake Storage Path -type Client struct { - autorest.Client - BaseURI string -} - -// New creates an instance of the Data Lake Storage Path client. -func New() Client { - return NewWithEnvironment(azure.PublicCloud) -} - -// NewWithEnvironment creates an instance of the Data Lake Storage Path client. -func NewWithEnvironment(environment azure.Environment) Client { - return Client{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: environment.StorageEndpointSuffix, - } -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/create.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/create.go deleted file mode 100644 index 600bd75a9a87..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/create.go +++ /dev/null @@ -1,95 +0,0 @@ -package paths - -import ( - "context" - "net/http" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" -) - -type PathResource string - -const PathResourceFile PathResource = "file" -const PathResourceDirectory PathResource = "directory" - -type CreateInput struct { - Resource PathResource -} - -// Create creates a Data Lake Store Gen2 Path within a Storage Account -func (client Client) Create(ctx context.Context, accountName string, fileSystemName string, path string, input CreateInput) (result autorest.Response, err error) { - if accountName == "" { - return result, validation.NewError("datalakestore.Client", "Create", "`accountName` cannot be an empty string.") - } - if fileSystemName == "" { - return result, validation.NewError("datalakestore.Client", "Create", "`fileSystemName` cannot be an empty string.") - } - - req, err := client.CreatePreparer(ctx, accountName, fileSystemName, path, input) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "Create", nil, "Failure preparing request") - return - } - - resp, err := client.CreateSender(req) - if err != nil { - result = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "datalakestore.Client", "Create", resp, "Failure sending request") - return - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "Create", resp, "Failure responding to request") - } - - return -} - -// CreatePreparer prepares the Create request. -func (client Client) CreatePreparer(ctx context.Context, accountName string, fileSystemName string, path string, input CreateInput) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "fileSystemName": autorest.Encode("path", fileSystemName), - "path": autorest.Encode("path", path), - } - - queryParameters := map[string]interface{}{ - "resource": autorest.Encode("query", input.Resource), - } - - headers := map[string]interface{}{ - "x-ms-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPut(), - autorest.WithBaseURL(endpoints.GetDataLakeStoreEndpoint(client.BaseURI, accountName)), - autorest.WithPathParameters("/{fileSystemName}/{path}", pathParameters), - autorest.WithQueryParameters(queryParameters), - autorest.WithHeaders(headers)) - - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateSender sends the Create request. The method will close the -// http.Response Body if it receives an error. -func (client Client) CreateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// CreateResponder handles the response to the Create request. The method always -// closes the http.Response Body. -func (client Client) CreateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusCreated), - autorest.ByClosing()) - result = autorest.Response{Response: resp} - - return -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/delete.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/delete.go deleted file mode 100644 index abe122d74434..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/delete.go +++ /dev/null @@ -1,81 +0,0 @@ -package paths - -import ( - "context" - "net/http" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" -) - -// Delete deletes a Data Lake Store Gen2 FileSystem within a Storage Account -func (client Client) Delete(ctx context.Context, accountName string, fileSystemName string, path string) (result autorest.Response, err error) { - if accountName == "" { - return result, validation.NewError("datalakestore.Client", "Delete", "`accountName` cannot be an empty string.") - } - if fileSystemName == "" { - return result, validation.NewError("datalakestore.Client", "Delete", "`fileSystemName` cannot be an empty string.") - } - - req, err := client.DeletePreparer(ctx, accountName, fileSystemName, path) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "datalakestore.Client", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "Delete", resp, "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client Client) DeletePreparer(ctx context.Context, accountName string, fileSystemName string, path string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "fileSystemName": autorest.Encode("path", fileSystemName), - "path": autorest.Encode("path", fileSystemName), - } - - headers := map[string]interface{}{ - "x-ms-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(endpoints.GetDataLakeStoreEndpoint(client.BaseURI, accountName)), - autorest.WithPathParameters("/{fileSystemName}/{path}", pathParameters), - autorest.WithHeaders(headers)) - - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusAccepted), - autorest.ByClosing()) - result = autorest.Response{Response: resp} - - return -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/helpers.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/helpers.go deleted file mode 100644 index e6ef6f4bfb9f..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package paths - -import ( - "fmt" -) - -func parsePathResource(input string) (PathResource, error) { - switch input { - case "file": - return PathResourceFile, nil - case "directory": - return PathResourceDirectory, nil - } - return "", fmt.Errorf("Unhandled path resource type %q", input) -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/properties_get.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/properties_get.go deleted file mode 100644 index 9120d72e029d..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/properties_get.go +++ /dev/null @@ -1,134 +0,0 @@ -package paths - -import ( - "context" - "net/http" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" -) - -type GetPropertiesResponse struct { - autorest.Response - - ETag string - LastModified time.Time - // ResourceType is only returned for GetPropertiesActionGetStatus requests - ResourceType PathResource - Owner string - Group string - // ACL is only returned for GetPropertiesActionGetAccessControl requests - ACL string -} - -type GetPropertiesAction string - -const ( - GetPropertiesActionGetStatus GetPropertiesAction = "getStatus" - GetPropertiesActionGetAccessControl GetPropertiesAction = "getAccessControl" -) - -// GetProperties gets the properties for a Data Lake Store Gen2 Path in a FileSystem within a Storage Account -func (client Client) GetProperties(ctx context.Context, accountName string, fileSystemName string, path string, action GetPropertiesAction) (result GetPropertiesResponse, err error) { - if accountName == "" { - return result, validation.NewError("datalakestore.Client", "GetProperties", "`accountName` cannot be an empty string.") - } - if fileSystemName == "" { - return result, validation.NewError("datalakestore.Client", "GetProperties", "`fileSystemName` cannot be an empty string.") - } - - req, err := client.GetPropertiesPreparer(ctx, accountName, fileSystemName, path, action) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "GetProperties", nil, "Failure preparing request") - return - } - - resp, err := client.GetPropertiesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "datalakestore.Client", "GetProperties", resp, "Failure sending request") - return - } - - result, err = client.GetPropertiesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "GetProperties", resp, "Failure responding to request") - } - - return -} - -// GetPropertiesPreparer prepares the GetProperties request. -func (client Client) GetPropertiesPreparer(ctx context.Context, accountName string, fileSystemName string, path string, action GetPropertiesAction) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "fileSystemName": autorest.Encode("path", fileSystemName), - "path": autorest.Encode("path", path), - } - - queryParameters := map[string]interface{}{ - "action": autorest.Encode("query", string(action)), - } - - headers := map[string]interface{}{ - "x-ms-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsHead(), - autorest.WithBaseURL(endpoints.GetDataLakeStoreEndpoint(client.BaseURI, accountName)), - autorest.WithPathParameters("/{fileSystemName}/{path}", pathParameters), - autorest.WithQueryParameters(queryParameters), - autorest.WithHeaders(headers)) - - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetPropertiesSender sends the GetProperties request. The method will close the -// http.Response Body if it receives an error. -func (client Client) GetPropertiesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// GetPropertiesResponder handles the response to the GetProperties request. The method always -// closes the http.Response Body. -func (client Client) GetPropertiesResponder(resp *http.Response) (result GetPropertiesResponse, err error) { - result = GetPropertiesResponse{} - if resp != nil && resp.Header != nil { - - resourceTypeRaw := resp.Header.Get("x-ms-resource-type") - var resourceType PathResource - if resourceTypeRaw != "" { - resourceType, err = parsePathResource(resourceTypeRaw) - if err != nil { - return GetPropertiesResponse{}, err - } - result.ResourceType = resourceType - } - result.ETag = resp.Header.Get("ETag") - - if lastModifiedRaw := resp.Header.Get("Last-Modified"); lastModifiedRaw != "" { - lastModified, err := time.Parse(time.RFC1123, lastModifiedRaw) - if err != nil { - return GetPropertiesResponse{}, err - } - result.LastModified = lastModified - } - - result.Owner = resp.Header.Get("x-ms-owner") - result.Group = resp.Header.Get("x-ms-group") - result.ACL = resp.Header.Get("x-ms-acl") - } - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - - return result, nil -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/properties_set.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/properties_set.go deleted file mode 100644 index 7c0d677647d0..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/properties_set.go +++ /dev/null @@ -1,116 +0,0 @@ -package paths - -import ( - "context" - "net/http" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" -) - -type SetAccessControlInput struct { - Owner *string - Group *string - ACL *string - - // Optional - A date and time value. - // Specify this header to perform the operation only if the resource has been modified since the specified date and time. - IfModifiedSince *string - - // Optional - A date and time value. - // Specify this header to perform the operation only if the resource has not been modified since the specified date and time. - IfUnmodifiedSince *string -} - -// SetProperties sets the access control properties for a Data Lake Store Gen2 Path within a Storage Account File System -func (client Client) SetAccessControl(ctx context.Context, accountName string, fileSystemName string, path string, input SetAccessControlInput) (result autorest.Response, err error) { - if accountName == "" { - return result, validation.NewError("datalakestore.Client", "SetAccessControl", "`accountName` cannot be an empty string.") - } - if fileSystemName == "" { - return result, validation.NewError("datalakestore.Client", "SetAccessControl", "`fileSystemName` cannot be an empty string.") - } - - req, err := client.SetAccessControlPreparer(ctx, accountName, fileSystemName, path, input) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "SetAccessControl", nil, "Failure preparing request") - return - } - - resp, err := client.SetAccessControlSender(req) - if err != nil { - result = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "datalakestore.Client", "SetAccessControl", resp, "Failure sending request") - return - } - - result, err = client.SetAccessControlResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "datalakestore.Client", "SetAccessControl", resp, "Failure responding to request") - } - - return -} - -// SetAccessControlPreparer prepares the SetAccessControl request. -func (client Client) SetAccessControlPreparer(ctx context.Context, accountName string, fileSystemName string, path string, input SetAccessControlInput) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "fileSystemName": autorest.Encode("path", fileSystemName), - "path": autorest.Encode("path", path), - } - - queryParameters := map[string]interface{}{ - "action": autorest.Encode("query", "setAccessControl"), - } - - headers := map[string]interface{}{ - "x-ms-version": APIVersion, - } - - if input.Owner != nil { - headers["x-ms-owner"] = *input.Owner - } - if input.Group != nil { - headers["x-ms-group"] = *input.Group - } - if input.ACL != nil { - headers["x-ms-acl"] = *input.ACL - } - - if input.IfModifiedSince != nil { - headers["If-Modified-Since"] = *input.IfModifiedSince - } - if input.IfUnmodifiedSince != nil { - headers["If-Unmodified-Since"] = *input.IfUnmodifiedSince - } - - preparer := autorest.CreatePreparer( - autorest.AsPatch(), - autorest.WithBaseURL(endpoints.GetDataLakeStoreEndpoint(client.BaseURI, accountName)), - autorest.WithPathParameters("/{fileSystemName}/{path}", pathParameters), - autorest.WithQueryParameters(queryParameters), - autorest.WithHeaders(headers)) - - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetAccessControlSender sends the SetAccessControl request. The method will close the -// http.Response Body if it receives an error. -func (client Client) SetAccessControlSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req, - azure.DoRetryWithRegistration(client.Client)) -} - -// SetAccessControlResponder handles the response to the SetAccessControl request. The method always -// closes the http.Response Body. -func (client Client) SetAccessControlResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/resource_id.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/resource_id.go deleted file mode 100644 index c32841877067..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/resource_id.go +++ /dev/null @@ -1,57 +0,0 @@ -package paths - -import ( - "fmt" - "net/url" - "strings" - - "github.com/tombuildsstuff/giovanni/storage/internal/endpoints" -) - -// GetResourceID returns the Resource ID for the given Data Lake Storage FileSystem -// This can be useful when, for example, you're using this as a unique identifier -func (client Client) GetResourceID(accountName, fileSystemName, path string) string { - domain := endpoints.GetDataLakeStoreEndpoint(client.BaseURI, accountName) - return fmt.Sprintf("%s/%s/%s", domain, fileSystemName, path) -} - -type ResourceID struct { - AccountName string - FileSystemName string - Path string -} - -// ParseResourceID parses the specified Resource ID and returns an object -// which can be used to interact with the Data Lake Storage FileSystem API's -func ParseResourceID(id string) (*ResourceID, error) { - // example: https://foo.dfs.core.windows.net/Bar - if id == "" { - return nil, fmt.Errorf("`id` was empty") - } - - uri, err := url.Parse(id) - if err != nil { - return nil, fmt.Errorf("Error parsing ID as a URL: %s", err) - } - - accountName, err := endpoints.GetAccountNameFromEndpoint(uri.Host) - if err != nil { - return nil, fmt.Errorf("Error parsing Account Name: %s", err) - } - - fileSystemAndPath := strings.TrimPrefix(uri.Path, "/") - separatorIndex := strings.Index(fileSystemAndPath, "/") - var fileSystem, path string - if separatorIndex < 0 { - fileSystem = fileSystemAndPath - path = "" - } else { - fileSystem = fileSystemAndPath[0:separatorIndex] - path = fileSystemAndPath[separatorIndex+1:] - } - return &ResourceID{ - AccountName: *accountName, - FileSystemName: fileSystem, - Path: path, - }, nil -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/version.go b/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/version.go deleted file mode 100644 index 2148b768007f..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/2018-11-09/datalakestore/paths/version.go +++ /dev/null @@ -1,14 +0,0 @@ -package paths - -import ( - "fmt" - - "github.com/tombuildsstuff/giovanni/version" -) - -// APIVersion is the version of the API used for all Storage API Operations -const APIVersion = "2018-11-09" - -func UserAgent() string { - return fmt.Sprintf("tombuildsstuff/giovanni/%s storage/%s", version.Number, APIVersion) -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/accesscontrol/ace.go b/vendor/github.com/tombuildsstuff/giovanni/storage/accesscontrol/ace.go deleted file mode 100644 index f2c140ece9c3..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/accesscontrol/ace.go +++ /dev/null @@ -1,123 +0,0 @@ -package accesscontrol - -import ( - "fmt" - "regexp" - "strings" - - "github.com/google/uuid" -) - -type TagType string - -const ( - TagTypeUser TagType = "user" - TagTypeGroup TagType = "group" - TagTypeMask TagType = "mask" - TagTypeOther TagType = "other" -) - -// ACE is roughly modelled on https://linux.die.net/man/5/acl -// Have collapsed ACL_USER_OBJ to be TagType of user with a nil Qualifier - -type ACE struct { - IsDefault bool - TagType TagType - TagQualifier *uuid.UUID - Permissions string // TODO break the rwx into permission flags? -} - -var permissionsRegex *regexp.Regexp - -func init() { - permissionsRegex = regexp.MustCompile("[r-][w-][x-]") -} - -// ValidateACEPermissions checks the format of the ACE permission string. Returns nil on success. -func ValidateACEPermissions(permissions string) error { - if !permissionsRegex.MatchString(permissions) { - return fmt.Errorf("Permissions must be of the form [r-][w-][x-]") - } - return nil -} - -// Validate checks the formatting and combination of values in the ACE. Returns nil on success -func (ace *ACE) Validate() error { - switch ace.TagType { - case TagTypeMask, TagTypeOther: - if ace.TagQualifier != nil { - return fmt.Errorf("TagQualifier cannot be set for 'mask' or 'other' TagTypes") - } - } - - if err := ValidateACEPermissions(ace.Permissions); err != nil { - return err - } - - if err := validateTagType(ace.TagType); err != nil { - return err - } - - return nil -} - -// ParseACE parses an ACE string and returns the ACE -func ParseACE(input string) (ACE, error) { - ace := ACE{} - - parts := strings.Split(input, ":") - if len(parts) == 4 { - if parts[0] == "default" { - ace.IsDefault = true - parts = parts[1:] - } else { - return ACE{}, fmt.Errorf("When specifying a 4-part ACE the first part must be 'default'") - } - } - - if len(parts) != 3 { - return ACE{}, fmt.Errorf("ACE string should have either 3 or 4 parts") - } - - ace.TagType = TagType(parts[0]) - - qualiferString := parts[1] - if qualiferString != "" { - qualifier, err := uuid.Parse(qualiferString) - if err != nil { - return ACE{}, fmt.Errorf("Error parsing qualifer %q: %s", qualiferString, err) - } - ace.TagQualifier = &qualifier - } - - ace.Permissions = parts[2] - - if err := ace.Validate(); err != nil { - return ACE{}, err - } - return ace, nil -} - -// String returns the string form of the ACE - this does not check that it is valid -func (ace *ACE) String() string { - prefix := "" - if ace.IsDefault { - prefix = "default:" - } - qualifierString := "" - if ace.TagQualifier != nil { - qualifierString = ace.TagQualifier.String() - } - return fmt.Sprintf("%s%s:%s:%s", prefix, ace.TagType, qualifierString, ace.Permissions) -} - -func validateTagType(tagType TagType) error { - switch tagType { - case TagTypeUser, - TagTypeGroup, - TagTypeMask, - TagTypeOther: - return nil - } - return fmt.Errorf("Unrecognized TagType: %q", tagType) -} diff --git a/vendor/github.com/tombuildsstuff/giovanni/storage/accesscontrol/acl.go b/vendor/github.com/tombuildsstuff/giovanni/storage/accesscontrol/acl.go deleted file mode 100644 index 1dbd3fefb01a..000000000000 --- a/vendor/github.com/tombuildsstuff/giovanni/storage/accesscontrol/acl.go +++ /dev/null @@ -1,53 +0,0 @@ -package accesscontrol - -import ( - "strings" -) - -type ACL struct { - Entries []ACE -} - -// Validate checks the ACL. Returns nil on success -func (acl *ACL) Validate() error { - - // TODO - // - check each user/group is only listed once (per default/non-default) - - for _, v := range acl.Entries { - if err := v.Validate(); err != nil { - return err - } - } - - return nil -} - -// ParseACL parses an ACL string -func ParseACL(input string) (ACL, error) { - - aceStrings := strings.Split(input, ",") - entries := make([]ACE, len(aceStrings)) - - for i := 0; i < len(aceStrings); i++ { - aceString := aceStrings[i] - entry, err := ParseACE(aceString) - if err != nil { - return ACL{}, err - } - entries[i] = entry - } - return ACL{Entries: entries}, nil -} - -// String returns the string form of the ACL - this does not check that it is valid -func (acl *ACL) String() string { - - aceStrings := make([]string, len(acl.Entries)) - - for i := 0; i < len(acl.Entries); i++ { - ace := acl.Entries[i] - aceStrings[i] = ace.String() - } - return strings.Join(aceStrings, ",") -} diff --git a/website/docs/r/hdinsight_hbase_cluster.html.markdown b/website/docs/r/hdinsight_hbase_cluster.html.markdown index dcf22a2ce697..08ced804b8b4 100644 --- a/website/docs/r/hdinsight_hbase_cluster.html.markdown +++ b/website/docs/r/hdinsight_hbase_cluster.html.markdown @@ -110,6 +110,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight HBase Cluster. +* `metastores` - (Optional) A `metastores` block as defined below. + --- A `component_version` block supports the following: @@ -232,6 +234,53 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `metastores` block supports the following: + +* `hive` - (Optional) A `hive` block as defined below. + +* `oozie` - (Optional) An `oozie` block as defined below. + +* `ambari` - (Optional) An `ambari` block as defined below. + +--- + +A `hive` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + +--- + +An `oozie` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created. + +--- + +An `ambari` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown index 3273260f62df..e6d6460a3ba2 100644 --- a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown +++ b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown @@ -109,6 +109,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Interactive Query Cluster. +* `metastores` - (Optional) A `metastores` block as defined below. + --- A `component_version` block supports the following: @@ -235,6 +237,53 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `metastores` block supports the following: + +* `hive` - (Optional) A `hive` block as defined below. + +* `oozie` - (Optional) An `oozie` block as defined below. + +* `ambari` - (Optional) An `ambari` block as defined below. + +--- + +A `hive` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + +--- + +An `oozie` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created. + +--- + +An `ambari` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_kafka_cluster.html.markdown b/website/docs/r/hdinsight_kafka_cluster.html.markdown index 197d9ca45aba..93bbcba60962 100644 --- a/website/docs/r/hdinsight_kafka_cluster.html.markdown +++ b/website/docs/r/hdinsight_kafka_cluster.html.markdown @@ -111,6 +111,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Kafka Cluster. +* `metastores` - (Optional) A `metastores` block as defined below. + --- A `component_version` block supports the following: @@ -235,6 +237,53 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `metastores` block supports the following: + +* `hive` - (Optional) A `hive` block as defined below. + +* `oozie` - (Optional) An `oozie` block as defined below. + +* `ambari` - (Optional) An `ambari` block as defined below. + +--- + +A `hive` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + +--- + +An `oozie` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created. + +--- + +An `ambari` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_spark_cluster.html.markdown b/website/docs/r/hdinsight_spark_cluster.html.markdown index e864eadeedb1..771c6ad1ef40 100644 --- a/website/docs/r/hdinsight_spark_cluster.html.markdown +++ b/website/docs/r/hdinsight_spark_cluster.html.markdown @@ -110,6 +110,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Spark Cluster. +* `metastores` - (Optional) A `metastores` block as defined below. + --- A `component_version` block supports the following: @@ -232,6 +234,54 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `metastores` block supports the following: + +* `hive` - (Optional) A `hive` block as defined below. + +* `oozie` - (Optional) An `oozie` block as defined below. + +* `ambari` - (Optional) An `ambari` block as defined below. + +--- + +A `hive` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + +--- + +An `oozie` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created. + +--- + +An `ambari` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_storm_cluster.html.markdown b/website/docs/r/hdinsight_storm_cluster.html.markdown index c0925dbb1f1f..ace3366c4d6c 100644 --- a/website/docs/r/hdinsight_storm_cluster.html.markdown +++ b/website/docs/r/hdinsight_storm_cluster.html.markdown @@ -108,6 +108,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Storm Cluster. +* `metastores` - (Optional) A `metastores` block as defined below. + --- A `component_version` block supports the following: @@ -218,6 +220,53 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `metastores` block supports the following: + +* `hive` - (Optional) A `hive` block as defined below. + +* `oozie` - (Optional) An `oozie` block as defined below. + +* `ambari` - (Optional) An `ambari` block as defined below. + +--- + +A `hive` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + +--- + +An `oozie` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created. + +--- + +An `ambari` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: