diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index a1d9ec7d8c93..ca129004101e 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -40,7 +40,7 @@ func SchemaHDInsightTier() *schema.Schema { ValidateFunc: validation.StringInSlice([]string{ string(hdinsight.Standard), string(hdinsight.Premium), - }, false), + }, true), // TODO: file a bug about this DiffSuppressFunc: location.DiffSuppressFunc, } @@ -119,6 +119,43 @@ func SchemaHDInsightsGateway() *schema.Schema { } } +func SchemaHDInsightsExternalMetastore() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "server": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "database_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "username": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "password": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + // Azure returns the key as *****. We'll suppress that here. + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return (new == d.Get(k).(string)) && (old == "*****") + }, + }, + }, + }, + } +} + func ExpandHDInsightsConfigurations(input []interface{}) map[string]interface{} { vs := input[0].(map[string]interface{}) @@ -136,6 +173,86 @@ func ExpandHDInsightsConfigurations(input []interface{}) map[string]interface{} } } +func ExpandHDInsightsHiveMetastore(input []interface{}) map[string]interface{} { + if len(input) == 0 { + return nil + } + vs := input[0].(map[string]interface{}) + + server := vs["server"].(string) + database := vs["database_name"].(string) + username := vs["username"].(string) + password := vs["password"].(string) + + return map[string]interface{}{ + "hive-site": map[string]interface{}{ + "javax.jdo.option.ConnectionDriverName": "com.microsoft.sqlserver.jdbc.SQLServerDriver", + "javax.jdo.option.ConnectionURL": fmt.Sprintf("jdbc:sqlserver://%s;database=%s;encrypt=true;trustServerCertificate=true;create=false;loginTimeout=300", server, database), + "javax.jdo.option.ConnectionUserName": username, + "javax.jdo.option.ConnectionPassword": password, + }, + "hive-env": map[string]interface{}{ + "hive_database": "Existing MSSQL Server database with SQL authentication", + "hive_database_name": database, + "hive_database_type": "mssql", + "hive_existing_mssql_server_database": database, + "hive_existing_mssql_server_host": server, + "hive_hostname": server, + }, + } +} + +func ExpandHDInsightsOozieMetastore(input []interface{}) map[string]interface{} { + if len(input) == 0 { + return nil + } + vs := input[0].(map[string]interface{}) + + server := vs["server"].(string) + database := vs["database_name"].(string) + username := vs["username"].(string) + password := vs["password"].(string) + + return map[string]interface{}{ + "oozie-site": map[string]interface{}{ + "oozie.service.JPAService.jdbc.driver": "com.microsoft.sqlserver.jdbc.SQLServerDriver", + "oozie.service.JPAService.jdbc.url": fmt.Sprintf("jdbc:sqlserver://%s;database=%s;encrypt=true;trustServerCertificate=true;create=false;loginTimeout=300", server, database), + "oozie.service.JPAService.jdbc.username": username, + "oozie.service.JPAService.jdbc.password": password, + "oozie.db.schema.name": "oozie", + }, + "oozie-env": map[string]interface{}{ + "oozie_database": "Existing MSSQL Server database with SQL authentication", + "oozie_database_name": database, + "oozie_database_type": "mssql", + "oozie_existing_mssql_server_database": database, + "oozie_existing_mssql_server_host": server, + "oozie_hostname": server, + }, + } +} + +func ExpandHDInsightsAmbariMetastore(input []interface{}) map[string]interface{} { + if len(input) == 0 { + return nil + } + vs := input[0].(map[string]interface{}) + + server := vs["server"].(string) + database := vs["database_name"].(string) + username := vs["username"].(string) + password := vs["password"].(string) + + return map[string]interface{}{ + "ambari-conf": map[string]interface{}{ + "database-server": server, + "database-name": database, + "database-user-name": username, + "database-user-password": password, + }, + } +} + func FlattenHDInsightsConfigurations(input map[string]*string) []interface{} { enabled := false if v, exists := input["restAuthCredential.isEnabled"]; exists && v != nil { @@ -164,6 +281,111 @@ func FlattenHDInsightsConfigurations(input map[string]*string) []interface{} { } } +func FlattenHDInsightsHiveMetastore(env map[string]*string, site map[string]*string) []interface{} { + server := "" + if v, exists := env["hive_hostname"]; exists && v != nil { + server = *v + } + + database := "" + if v, exists := env["hive_database_name"]; exists && v != nil { + database = *v + } + + username := "" + if v, exists := site["javax.jdo.option.ConnectionUserName"]; exists && v != nil { + username = *v + } + + password := "" + if v, exists := site["javax.jdo.option.ConnectionPassword"]; exists && v != nil { + password = *v + } + + if server != "" && database != "" { + return []interface{}{ + map[string]interface{}{ + "server": server, + "database_name": database, + "username": username, + "password": password, + }, + } + } + + return nil +} + +func FlattenHDInsightsOozieMetastore(env map[string]*string, site map[string]*string) []interface{} { + server := "" + if v, exists := env["oozie_hostname"]; exists && v != nil { + server = *v + } + + database := "" + if v, exists := env["oozie_database_name"]; exists && v != nil { + database = *v + } + + username := "" + if v, exists := site["oozie.service.JPAService.jdbc.username"]; exists && v != nil { + username = *v + } + + password := "" + if v, exists := site["oozie.service.JPAService.jdbc.password"]; exists && v != nil { + password = *v + } + + if server != "" && database != "" { + return []interface{}{ + map[string]interface{}{ + "server": server, + "database_name": database, + "username": username, + "password": password, + }, + } + } + + return nil +} + +func FlattenHDInsightsAmbariMetastore(conf map[string]*string) []interface{} { + server := "" + if v, exists := conf["database-server"]; exists && v != nil { + server = *v + } + + database := "" + if v, exists := conf["database-name"]; exists && v != nil { + database = *v + } + + username := "" + if v, exists := conf["database-user-name"]; exists && v != nil { + username = *v + } + + password := "" + if v, exists := conf["database-user-password"]; exists && v != nil { + password = *v + } + + if server != "" && database != "" { + return []interface{}{ + map[string]interface{}{ + "server": server, + "database_name": database, + "username": username, + "password": password, + }, + } + } + + return nil +} + func SchemaHDInsightsStorageAccounts() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 98aed1c185cb..265c68e56dc0 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -272,3 +272,56 @@ func deleteHDInsightEdgeNodes(ctx context.Context, client *hdinsight.Application return nil } + +func expandHDInsightsMetastore(input []interface{}) map[string]interface{} { + v := input[0].(map[string]interface{}) + + config := map[string]interface{}{} + + if hiveRaw, ok := v["hive"]; ok { + for k, val := range azure.ExpandHDInsightsHiveMetastore(hiveRaw.([]interface{})) { + config[k] = val + } + } + + if oozieRaw, ok := v["oozie"]; ok { + for k, val := range azure.ExpandHDInsightsOozieMetastore(oozieRaw.([]interface{})) { + config[k] = val + } + } + + if ambariRaw, ok := v["ambari"]; ok { + for k, val := range azure.ExpandHDInsightsAmbariMetastore(ambariRaw.([]interface{})) { + config[k] = val + } + } + + return config +} + +func flattenHDInsightsMetastores(d *schema.ResourceData, configurations map[string]map[string]*string) { + result := map[string]interface{}{} + + hiveEnv, envExists := configurations["hive-env"] + hiveSite, siteExists := configurations["hive-site"] + if envExists && siteExists { + result["hive"] = azure.FlattenHDInsightsHiveMetastore(hiveEnv, hiveSite) + } + + oozieEnv, envExists := configurations["oozie-env"] + oozieSite, siteExists := configurations["oozie-site"] + if envExists && siteExists { + result["oozie"] = azure.FlattenHDInsightsOozieMetastore(oozieEnv, oozieSite) + } + + ambari, ambariExists := configurations["ambari-conf"] + if ambariExists { + result["ambari"] = azure.FlattenHDInsightsAmbariMetastore(ambari) + } + + if len(result) > 0 { + d.Set("metastores", []interface{}{ + result, + }) + } +} diff --git a/azurerm/internal/services/hdinsight/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/internal/services/hdinsight/resource_arm_hdinsight_hadoop_cluster.go index 840696b1418b..b31f87c7813a 100644 --- a/azurerm/internal/services/hdinsight/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/internal/services/hdinsight/resource_arm_hdinsight_hadoop_cluster.go @@ -93,6 +93,21 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "gateway": azure.SchemaHDInsightsGateway(), + "metastores": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hive": azure.SchemaHDInsightsExternalMetastore(), + + "oozie": azure.SchemaHDInsightsExternalMetastore(), + + "ambari": azure.SchemaHDInsightsExternalMetastore(), + }, + }, + }, + "storage_account": azure.SchemaHDInsightsStorageAccounts(), "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), @@ -186,7 +201,14 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf componentVersions := expandHDInsightHadoopComponentVersion(componentVersionsRaw) gatewayRaw := d.Get("gateway").([]interface{}) - gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) + configurations := azure.ExpandHDInsightsConfigurations(gatewayRaw) + + if metastoresRaw, ok := d.GetOkExists("metastores"); ok { + metastores := expandHDInsightsMetastore(metastoresRaw.([]interface{})) + for k, v := range metastores { + configurations[k] = v + } + } storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) @@ -229,7 +251,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf ClusterDefinition: &hdinsight.ClusterDefinition{ Kind: utils.String("Hadoop"), ComponentVersion: componentVersions, - Configurations: gateway, + Configurations: configurations, }, StorageProfile: &hdinsight.StorageProfile{ Storageaccounts: storageAccounts, @@ -315,11 +337,17 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return fmt.Errorf("Error retrieving HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - configuration, err := configurationsClient.Get(ctx, resourceGroup, name, "gateway") + // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation + configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error retrieving Configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } + gateway, exists := configurations.Configurations["gateway"] + if !exists { + return fmt.Errorf("Error retrieving gateway for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + d.Set("name", name) d.Set("resource_group_name", resourceGroup) if location := resp.Location; location != nil { @@ -337,9 +365,11 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return fmt.Errorf("Error flattening `component_version`: %+v", err) } - if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(configuration.Value)); err != nil { + if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { return fmt.Errorf("Error flattening `gateway`: %+v", err) } + + flattenHDInsightsMetastores(d, configurations.Configurations) } hadoopRoles := hdInsightRoleDefinition{ diff --git a/azurerm/internal/services/hdinsight/tests/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/internal/services/hdinsight/tests/resource_arm_hdinsight_hadoop_cluster_test.go index 8254b1905e0f..05780c4b5fbc 100644 --- a/azurerm/internal/services/hdinsight/tests/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/internal/services/hdinsight/tests/resource_arm_hdinsight_hadoop_cluster_test.go @@ -340,6 +340,101 @@ func TestAccAzureRMHDInsightHadoopCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_allMetastores(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + +func TestAccAzureRMHDInsightHadoopCluster_hiveMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + }, + }) +} + +func TestAccAzureRMHDInsightHadoopCluster_updateMetastore(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_hiveMetastore(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + { + Config: testAccAzureRMHDInsightHadoopCluster_allMetastores(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + "metastores.0.hive.0.password", + "metastores.0.oozie.0.password", + "metastores.0.ambari.0.password"), + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) return fmt.Sprintf(` @@ -448,7 +543,6 @@ resource "azurerm_hdinsight_hadoop_cluster" "import" { for_each = lookup(roles.value, "head_node", []) content { password = lookup(head_node.value, "password", null) - ssh_keys = lookup(head_node.value, "ssh_keys", null) subnet_id = lookup(head_node.value, "subnet_id", null) username = head_node.value.username virtual_network_id = lookup(head_node.value, "virtual_network_id", null) @@ -459,9 +553,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "import" { dynamic "worker_node" { for_each = lookup(roles.value, "worker_node", []) content { - min_instance_count = lookup(worker_node.value, "min_instance_count", null) password = lookup(worker_node.value, "password", null) - ssh_keys = lookup(worker_node.value, "ssh_keys", null) subnet_id = lookup(worker_node.value, "subnet_id", null) target_instance_count = worker_node.value.target_instance_count username = worker_node.value.username @@ -474,7 +566,6 @@ resource "azurerm_hdinsight_hadoop_cluster" "import" { for_each = lookup(roles.value, "zookeeper_node", []) content { password = lookup(zookeeper_node.value, "password", null) - ssh_keys = lookup(zookeeper_node.value, "ssh_keys", null) subnet_id = lookup(zookeeper_node.value, "subnet_id", null) username = zookeeper_node.value.username virtual_network_id = lookup(zookeeper_node.value, "virtual_network_id", null) @@ -1000,7 +1091,6 @@ func testAccAzureRMHDInsightHadoopCluster_tls(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) return fmt.Sprintf(` %s - resource "azurerm_hdinsight_hadoop_cluster" "test" { name = "acctesthdi-%d" resource_group_name = azurerm_resource_group.test.name @@ -1008,43 +1098,221 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { cluster_version = "3.6" tier = "Standard" tls_min_version = "1.2" - component_version { hadoop = "2.7" } - gateway { enabled = true username = "acctestusrgw" password = "TerrAform123!" } - storage_account { storage_container_id = azurerm_storage_container.test.id storage_account_key = azurerm_storage_account.test.primary_access_key is_default = true } - roles { head_node { vm_size = "Standard_D3_v2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, data.RandomInteger) +} +func testAccAzureRMHDInsightHadoopCluster_allMetastores(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHadoopCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "oozie" { + name = "oozie" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_database" "ambari" { + name = "ambari" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + hadoop = "2.7" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } worker_node { vm_size = "Standard_D4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" target_instance_count = 2 } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + oozie { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.oozie.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + ambari { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.ambari.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } +} +`, template, data.RandomInteger, data.RandomInteger) +} +func testAccAzureRMHDInsightHadoopCluster_hiveMetastore(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHadoopCluster_template(data) + return fmt.Sprintf(` +%s +resource "azurerm_sql_server" "test" { + name = "acctestsql-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + administrator_login = "sql_admin" + administrator_login_password = "TerrAform123!" + version = "12.0" +} +resource "azurerm_sql_database" "hive" { + name = "hive" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + server_name = azurerm_sql_server.test.name + collation = "SQL_Latin1_General_CP1_CI_AS" + create_mode = "Default" + requested_service_objective_name = "GP_Gen5_2" +} +resource "azurerm_sql_firewall_rule" "AzureServices" { + name = "allow-azure-services" + resource_group_name = azurerm_resource_group.test.name + server_name = azurerm_sql_server.test.name + start_ip_address = "0.0.0.0" + end_ip_address = "0.0.0.0" +} +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + component_version { + hadoop = "2.7" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } zookeeper_node { vm_size = "Standard_D3_v2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } } + metastores { + hive { + server = azurerm_sql_server.test.fully_qualified_domain_name + database_name = azurerm_sql_database.hive.name + username = azurerm_sql_server.test.administrator_login + password = azurerm_sql_server.test.administrator_login_password + } + } } -`, template, data.RandomInteger) +`, template, data.RandomInteger, data.RandomInteger) } diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index ae6edaa5cb15..bccdc6d1b00b 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -111,6 +111,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Hadoop Cluster. +* `metastores` - (Optional) A `metastores` block as defined below. + --- A `component_version` block supports the following: @@ -251,6 +253,54 @@ A `install_script_action` block supports the following: * `uri` - (Required) The URI pointing to the script to run during the installation of the edge node. Changing this forces a new resource to be created. +--- + +A `metastores` block supports the following: + +* `hive` - (Optional) A `hive` block as defined below. + +* `oozie` - (Optional) An `oozie` block as defined below. + +* `ambari` - (Optional) An `ambari` block as defined below. + +--- + +A `hive` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + +--- + +An `oozie` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created. + +--- + +An `ambari` block supports the following: + +* `server` - (Required) The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created. + +* `database_name` - (Required) The external Hive metastore's existing SQL database. Changing this forces a new resource to be created. + +* `username` - (Required) The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created. + +* `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. + + ## Attributes Reference The following attributes are exported: