1. Packages
  2. Azure Classic
  3. API Docs
  4. hdinsight
  5. SparkCluster

We recommend using Azure Native.

Azure Classic v5.81.0 published on Monday, Jun 24, 2024 by Pulumi

azure.hdinsight.SparkCluster

Explore with Pulumi AI

azure logo

We recommend using Azure Native.

Azure Classic v5.81.0 published on Monday, Jun 24, 2024 by Pulumi

    Manages a HDInsight Spark Cluster.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as azure from "@pulumi/azure";
    
    const example = new azure.core.ResourceGroup("example", {
        name: "example-resources",
        location: "West Europe",
    });
    const exampleAccount = new azure.storage.Account("example", {
        name: "hdinsightstor",
        resourceGroupName: example.name,
        location: example.location,
        accountTier: "Standard",
        accountReplicationType: "LRS",
    });
    const exampleContainer = new azure.storage.Container("example", {
        name: "hdinsight",
        storageAccountName: exampleAccount.name,
        containerAccessType: "private",
    });
    const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
        name: "example-hdicluster",
        resourceGroupName: example.name,
        location: example.location,
        clusterVersion: "3.6",
        tier: "Standard",
        componentVersion: {
            spark: "2.3",
        },
        gateway: {
            username: "acctestusrgw",
            password: "Password123!",
        },
        storageAccounts: [{
            storageContainerId: exampleContainer.id,
            storageAccountKey: exampleAccount.primaryAccessKey,
            isDefault: true,
        }],
        roles: {
            headNode: {
                vmSize: "Standard_A3",
                username: "acctestusrvm",
                password: "AccTestvdSC4daf986!",
            },
            workerNode: {
                vmSize: "Standard_A3",
                username: "acctestusrvm",
                password: "AccTestvdSC4daf986!",
                targetInstanceCount: 3,
            },
            zookeeperNode: {
                vmSize: "Medium",
                username: "acctestusrvm",
                password: "AccTestvdSC4daf986!",
            },
        },
    });
    
    import pulumi
    import pulumi_azure as azure
    
    example = azure.core.ResourceGroup("example",
        name="example-resources",
        location="West Europe")
    example_account = azure.storage.Account("example",
        name="hdinsightstor",
        resource_group_name=example.name,
        location=example.location,
        account_tier="Standard",
        account_replication_type="LRS")
    example_container = azure.storage.Container("example",
        name="hdinsight",
        storage_account_name=example_account.name,
        container_access_type="private")
    example_spark_cluster = azure.hdinsight.SparkCluster("example",
        name="example-hdicluster",
        resource_group_name=example.name,
        location=example.location,
        cluster_version="3.6",
        tier="Standard",
        component_version=azure.hdinsight.SparkClusterComponentVersionArgs(
            spark="2.3",
        ),
        gateway=azure.hdinsight.SparkClusterGatewayArgs(
            username="acctestusrgw",
            password="Password123!",
        ),
        storage_accounts=[azure.hdinsight.SparkClusterStorageAccountArgs(
            storage_container_id=example_container.id,
            storage_account_key=example_account.primary_access_key,
            is_default=True,
        )],
        roles=azure.hdinsight.SparkClusterRolesArgs(
            head_node=azure.hdinsight.SparkClusterRolesHeadNodeArgs(
                vm_size="Standard_A3",
                username="acctestusrvm",
                password="AccTestvdSC4daf986!",
            ),
            worker_node=azure.hdinsight.SparkClusterRolesWorkerNodeArgs(
                vm_size="Standard_A3",
                username="acctestusrvm",
                password="AccTestvdSC4daf986!",
                target_instance_count=3,
            ),
            zookeeper_node=azure.hdinsight.SparkClusterRolesZookeeperNodeArgs(
                vm_size="Medium",
                username="acctestusrvm",
                password="AccTestvdSC4daf986!",
            ),
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/hdinsight"
    	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
    			Name:     pulumi.String("example-resources"),
    			Location: pulumi.String("West Europe"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
    			Name:                   pulumi.String("hdinsightstor"),
    			ResourceGroupName:      example.Name,
    			Location:               example.Location,
    			AccountTier:            pulumi.String("Standard"),
    			AccountReplicationType: pulumi.String("LRS"),
    		})
    		if err != nil {
    			return err
    		}
    		exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
    			Name:                pulumi.String("hdinsight"),
    			StorageAccountName:  exampleAccount.Name,
    			ContainerAccessType: pulumi.String("private"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
    			Name:              pulumi.String("example-hdicluster"),
    			ResourceGroupName: example.Name,
    			Location:          example.Location,
    			ClusterVersion:    pulumi.String("3.6"),
    			Tier:              pulumi.String("Standard"),
    			ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
    				Spark: pulumi.String("2.3"),
    			},
    			Gateway: &hdinsight.SparkClusterGatewayArgs{
    				Username: pulumi.String("acctestusrgw"),
    				Password: pulumi.String("Password123!"),
    			},
    			StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
    				&hdinsight.SparkClusterStorageAccountArgs{
    					StorageContainerId: exampleContainer.ID(),
    					StorageAccountKey:  exampleAccount.PrimaryAccessKey,
    					IsDefault:          pulumi.Bool(true),
    				},
    			},
    			Roles: &hdinsight.SparkClusterRolesArgs{
    				HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
    					VmSize:   pulumi.String("Standard_A3"),
    					Username: pulumi.String("acctestusrvm"),
    					Password: pulumi.String("AccTestvdSC4daf986!"),
    				},
    				WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
    					VmSize:              pulumi.String("Standard_A3"),
    					Username:            pulumi.String("acctestusrvm"),
    					Password:            pulumi.String("AccTestvdSC4daf986!"),
    					TargetInstanceCount: pulumi.Int(3),
    				},
    				ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
    					VmSize:   pulumi.String("Medium"),
    					Username: pulumi.String("acctestusrvm"),
    					Password: pulumi.String("AccTestvdSC4daf986!"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Azure = Pulumi.Azure;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new Azure.Core.ResourceGroup("example", new()
        {
            Name = "example-resources",
            Location = "West Europe",
        });
    
        var exampleAccount = new Azure.Storage.Account("example", new()
        {
            Name = "hdinsightstor",
            ResourceGroupName = example.Name,
            Location = example.Location,
            AccountTier = "Standard",
            AccountReplicationType = "LRS",
        });
    
        var exampleContainer = new Azure.Storage.Container("example", new()
        {
            Name = "hdinsight",
            StorageAccountName = exampleAccount.Name,
            ContainerAccessType = "private",
        });
    
        var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
        {
            Name = "example-hdicluster",
            ResourceGroupName = example.Name,
            Location = example.Location,
            ClusterVersion = "3.6",
            Tier = "Standard",
            ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
            {
                Spark = "2.3",
            },
            Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
            {
                Username = "acctestusrgw",
                Password = "Password123!",
            },
            StorageAccounts = new[]
            {
                new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
                {
                    StorageContainerId = exampleContainer.Id,
                    StorageAccountKey = exampleAccount.PrimaryAccessKey,
                    IsDefault = true,
                },
            },
            Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
            {
                HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
                {
                    VmSize = "Standard_A3",
                    Username = "acctestusrvm",
                    Password = "AccTestvdSC4daf986!",
                },
                WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
                {
                    VmSize = "Standard_A3",
                    Username = "acctestusrvm",
                    Password = "AccTestvdSC4daf986!",
                    TargetInstanceCount = 3,
                },
                ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
                {
                    VmSize = "Medium",
                    Username = "acctestusrvm",
                    Password = "AccTestvdSC4daf986!",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.azure.core.ResourceGroup;
    import com.pulumi.azure.core.ResourceGroupArgs;
    import com.pulumi.azure.storage.Account;
    import com.pulumi.azure.storage.AccountArgs;
    import com.pulumi.azure.storage.Container;
    import com.pulumi.azure.storage.ContainerArgs;
    import com.pulumi.azure.hdinsight.SparkCluster;
    import com.pulumi.azure.hdinsight.SparkClusterArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
    import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new ResourceGroup("example", ResourceGroupArgs.builder()
                .name("example-resources")
                .location("West Europe")
                .build());
    
            var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
                .name("hdinsightstor")
                .resourceGroupName(example.name())
                .location(example.location())
                .accountTier("Standard")
                .accountReplicationType("LRS")
                .build());
    
            var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
                .name("hdinsight")
                .storageAccountName(exampleAccount.name())
                .containerAccessType("private")
                .build());
    
            var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
                .name("example-hdicluster")
                .resourceGroupName(example.name())
                .location(example.location())
                .clusterVersion("3.6")
                .tier("Standard")
                .componentVersion(SparkClusterComponentVersionArgs.builder()
                    .spark("2.3")
                    .build())
                .gateway(SparkClusterGatewayArgs.builder()
                    .username("acctestusrgw")
                    .password("Password123!")
                    .build())
                .storageAccounts(SparkClusterStorageAccountArgs.builder()
                    .storageContainerId(exampleContainer.id())
                    .storageAccountKey(exampleAccount.primaryAccessKey())
                    .isDefault(true)
                    .build())
                .roles(SparkClusterRolesArgs.builder()
                    .headNode(SparkClusterRolesHeadNodeArgs.builder()
                        .vmSize("Standard_A3")
                        .username("acctestusrvm")
                        .password("AccTestvdSC4daf986!")
                        .build())
                    .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
                        .vmSize("Standard_A3")
                        .username("acctestusrvm")
                        .password("AccTestvdSC4daf986!")
                        .targetInstanceCount(3)
                        .build())
                    .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
                        .vmSize("Medium")
                        .username("acctestusrvm")
                        .password("AccTestvdSC4daf986!")
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: azure:core:ResourceGroup
        properties:
          name: example-resources
          location: West Europe
      exampleAccount:
        type: azure:storage:Account
        name: example
        properties:
          name: hdinsightstor
          resourceGroupName: ${example.name}
          location: ${example.location}
          accountTier: Standard
          accountReplicationType: LRS
      exampleContainer:
        type: azure:storage:Container
        name: example
        properties:
          name: hdinsight
          storageAccountName: ${exampleAccount.name}
          containerAccessType: private
      exampleSparkCluster:
        type: azure:hdinsight:SparkCluster
        name: example
        properties:
          name: example-hdicluster
          resourceGroupName: ${example.name}
          location: ${example.location}
          clusterVersion: '3.6'
          tier: Standard
          componentVersion:
            spark: '2.3'
          gateway:
            username: acctestusrgw
            password: Password123!
          storageAccounts:
            - storageContainerId: ${exampleContainer.id}
              storageAccountKey: ${exampleAccount.primaryAccessKey}
              isDefault: true
          roles:
            headNode:
              vmSize: Standard_A3
              username: acctestusrvm
              password: AccTestvdSC4daf986!
            workerNode:
              vmSize: Standard_A3
              username: acctestusrvm
              password: AccTestvdSC4daf986!
              targetInstanceCount: 3
            zookeeperNode:
              vmSize: Medium
              username: acctestusrvm
              password: AccTestvdSC4daf986!
    

    Create SparkCluster Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new SparkCluster(name: string, args: SparkClusterArgs, opts?: CustomResourceOptions);
    @overload
    def SparkCluster(resource_name: str,
                     args: SparkClusterArgs,
                     opts: Optional[ResourceOptions] = None)
    
    @overload
    def SparkCluster(resource_name: str,
                     opts: Optional[ResourceOptions] = None,
                     gateway: Optional[SparkClusterGatewayArgs] = None,
                     component_version: Optional[SparkClusterComponentVersionArgs] = None,
                     tier: Optional[str] = None,
                     roles: Optional[SparkClusterRolesArgs] = None,
                     resource_group_name: Optional[str] = None,
                     cluster_version: Optional[str] = None,
                     name: Optional[str] = None,
                     encryption_in_transit_enabled: Optional[bool] = None,
                     metastores: Optional[SparkClusterMetastoresArgs] = None,
                     monitor: Optional[SparkClusterMonitorArgs] = None,
                     extension: Optional[SparkClusterExtensionArgs] = None,
                     network: Optional[SparkClusterNetworkArgs] = None,
                     private_link_configuration: Optional[SparkClusterPrivateLinkConfigurationArgs] = None,
                     location: Optional[str] = None,
                     disk_encryptions: Optional[Sequence[SparkClusterDiskEncryptionArgs]] = None,
                     security_profile: Optional[SparkClusterSecurityProfileArgs] = None,
                     storage_account_gen2: Optional[SparkClusterStorageAccountGen2Args] = None,
                     storage_accounts: Optional[Sequence[SparkClusterStorageAccountArgs]] = None,
                     tags: Optional[Mapping[str, str]] = None,
                     compute_isolation: Optional[SparkClusterComputeIsolationArgs] = None,
                     tls_min_version: Optional[str] = None)
    func NewSparkCluster(ctx *Context, name string, args SparkClusterArgs, opts ...ResourceOption) (*SparkCluster, error)
    public SparkCluster(string name, SparkClusterArgs args, CustomResourceOptions? opts = null)
    public SparkCluster(String name, SparkClusterArgs args)
    public SparkCluster(String name, SparkClusterArgs args, CustomResourceOptions options)
    
    type: azure:hdinsight:SparkCluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args SparkClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args SparkClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args SparkClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args SparkClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args SparkClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var sparkClusterResource = new Azure.HDInsight.SparkCluster("sparkClusterResource", new()
    {
        Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
        {
            Password = "string",
            Username = "string",
        },
        ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
        {
            Spark = "string",
        },
        Tier = "string",
        Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
        {
            HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
            {
                Username = "string",
                VmSize = "string",
                Password = "string",
                ScriptActions = new[]
                {
                    new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeScriptActionArgs
                    {
                        Name = "string",
                        Uri = "string",
                        Parameters = "string",
                    },
                },
                SshKeys = new[]
                {
                    "string",
                },
                SubnetId = "string",
                VirtualNetworkId = "string",
            },
            WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
            {
                TargetInstanceCount = 0,
                Username = "string",
                VmSize = "string",
                Autoscale = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleArgs
                {
                    Capacity = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleCapacityArgs
                    {
                        MaxInstanceCount = 0,
                        MinInstanceCount = 0,
                    },
                    Recurrence = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs
                    {
                        Schedules = new[]
                        {
                            new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs
                            {
                                Days = new[]
                                {
                                    "string",
                                },
                                TargetInstanceCount = 0,
                                Time = "string",
                            },
                        },
                        Timezone = "string",
                    },
                },
                Password = "string",
                ScriptActions = new[]
                {
                    new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeScriptActionArgs
                    {
                        Name = "string",
                        Uri = "string",
                        Parameters = "string",
                    },
                },
                SshKeys = new[]
                {
                    "string",
                },
                SubnetId = "string",
                VirtualNetworkId = "string",
            },
            ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
            {
                Username = "string",
                VmSize = "string",
                Password = "string",
                ScriptActions = new[]
                {
                    new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeScriptActionArgs
                    {
                        Name = "string",
                        Uri = "string",
                        Parameters = "string",
                    },
                },
                SshKeys = new[]
                {
                    "string",
                },
                SubnetId = "string",
                VirtualNetworkId = "string",
            },
        },
        ResourceGroupName = "string",
        ClusterVersion = "string",
        Name = "string",
        EncryptionInTransitEnabled = false,
        Metastores = new Azure.HDInsight.Inputs.SparkClusterMetastoresArgs
        {
            Ambari = new Azure.HDInsight.Inputs.SparkClusterMetastoresAmbariArgs
            {
                DatabaseName = "string",
                Password = "string",
                Server = "string",
                Username = "string",
            },
            Hive = new Azure.HDInsight.Inputs.SparkClusterMetastoresHiveArgs
            {
                DatabaseName = "string",
                Password = "string",
                Server = "string",
                Username = "string",
            },
            Oozie = new Azure.HDInsight.Inputs.SparkClusterMetastoresOozieArgs
            {
                DatabaseName = "string",
                Password = "string",
                Server = "string",
                Username = "string",
            },
        },
        Monitor = new Azure.HDInsight.Inputs.SparkClusterMonitorArgs
        {
            LogAnalyticsWorkspaceId = "string",
            PrimaryKey = "string",
        },
        Extension = new Azure.HDInsight.Inputs.SparkClusterExtensionArgs
        {
            LogAnalyticsWorkspaceId = "string",
            PrimaryKey = "string",
        },
        Network = new Azure.HDInsight.Inputs.SparkClusterNetworkArgs
        {
            ConnectionDirection = "string",
            PrivateLinkEnabled = false,
        },
        PrivateLinkConfiguration = new Azure.HDInsight.Inputs.SparkClusterPrivateLinkConfigurationArgs
        {
            GroupId = "string",
            IpConfiguration = new Azure.HDInsight.Inputs.SparkClusterPrivateLinkConfigurationIpConfigurationArgs
            {
                Name = "string",
                Primary = false,
                PrivateIpAddress = "string",
                PrivateIpAllocationMethod = "string",
                SubnetId = "string",
            },
            Name = "string",
        },
        Location = "string",
        DiskEncryptions = new[]
        {
            new Azure.HDInsight.Inputs.SparkClusterDiskEncryptionArgs
            {
                EncryptionAlgorithm = "string",
                EncryptionAtHostEnabled = false,
                KeyVaultKeyId = "string",
                KeyVaultManagedIdentityId = "string",
            },
        },
        SecurityProfile = new Azure.HDInsight.Inputs.SparkClusterSecurityProfileArgs
        {
            AaddsResourceId = "string",
            DomainName = "string",
            DomainUserPassword = "string",
            DomainUsername = "string",
            LdapsUrls = new[]
            {
                "string",
            },
            MsiResourceId = "string",
            ClusterUsersGroupDns = new[]
            {
                "string",
            },
        },
        StorageAccountGen2 = new Azure.HDInsight.Inputs.SparkClusterStorageAccountGen2Args
        {
            FilesystemId = "string",
            IsDefault = false,
            ManagedIdentityResourceId = "string",
            StorageResourceId = "string",
        },
        StorageAccounts = new[]
        {
            new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
            {
                IsDefault = false,
                StorageAccountKey = "string",
                StorageContainerId = "string",
                StorageResourceId = "string",
            },
        },
        Tags = 
        {
            { "string", "string" },
        },
        ComputeIsolation = new Azure.HDInsight.Inputs.SparkClusterComputeIsolationArgs
        {
            ComputeIsolationEnabled = false,
            HostSku = "string",
        },
        TlsMinVersion = "string",
    });
    
    example, err := hdinsight.NewSparkCluster(ctx, "sparkClusterResource", &hdinsight.SparkClusterArgs{
    	Gateway: &hdinsight.SparkClusterGatewayArgs{
    		Password: pulumi.String("string"),
    		Username: pulumi.String("string"),
    	},
    	ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
    		Spark: pulumi.String("string"),
    	},
    	Tier: pulumi.String("string"),
    	Roles: &hdinsight.SparkClusterRolesArgs{
    		HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
    			Username: pulumi.String("string"),
    			VmSize:   pulumi.String("string"),
    			Password: pulumi.String("string"),
    			ScriptActions: hdinsight.SparkClusterRolesHeadNodeScriptActionArray{
    				&hdinsight.SparkClusterRolesHeadNodeScriptActionArgs{
    					Name:       pulumi.String("string"),
    					Uri:        pulumi.String("string"),
    					Parameters: pulumi.String("string"),
    				},
    			},
    			SshKeys: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			SubnetId:         pulumi.String("string"),
    			VirtualNetworkId: pulumi.String("string"),
    		},
    		WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
    			TargetInstanceCount: pulumi.Int(0),
    			Username:            pulumi.String("string"),
    			VmSize:              pulumi.String("string"),
    			Autoscale: &hdinsight.SparkClusterRolesWorkerNodeAutoscaleArgs{
    				Capacity: &hdinsight.SparkClusterRolesWorkerNodeAutoscaleCapacityArgs{
    					MaxInstanceCount: pulumi.Int(0),
    					MinInstanceCount: pulumi.Int(0),
    				},
    				Recurrence: &hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs{
    					Schedules: hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArray{
    						&hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs{
    							Days: pulumi.StringArray{
    								pulumi.String("string"),
    							},
    							TargetInstanceCount: pulumi.Int(0),
    							Time:                pulumi.String("string"),
    						},
    					},
    					Timezone: pulumi.String("string"),
    				},
    			},
    			Password: pulumi.String("string"),
    			ScriptActions: hdinsight.SparkClusterRolesWorkerNodeScriptActionArray{
    				&hdinsight.SparkClusterRolesWorkerNodeScriptActionArgs{
    					Name:       pulumi.String("string"),
    					Uri:        pulumi.String("string"),
    					Parameters: pulumi.String("string"),
    				},
    			},
    			SshKeys: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			SubnetId:         pulumi.String("string"),
    			VirtualNetworkId: pulumi.String("string"),
    		},
    		ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
    			Username: pulumi.String("string"),
    			VmSize:   pulumi.String("string"),
    			Password: pulumi.String("string"),
    			ScriptActions: hdinsight.SparkClusterRolesZookeeperNodeScriptActionArray{
    				&hdinsight.SparkClusterRolesZookeeperNodeScriptActionArgs{
    					Name:       pulumi.String("string"),
    					Uri:        pulumi.String("string"),
    					Parameters: pulumi.String("string"),
    				},
    			},
    			SshKeys: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    			SubnetId:         pulumi.String("string"),
    			VirtualNetworkId: pulumi.String("string"),
    		},
    	},
    	ResourceGroupName:          pulumi.String("string"),
    	ClusterVersion:             pulumi.String("string"),
    	Name:                       pulumi.String("string"),
    	EncryptionInTransitEnabled: pulumi.Bool(false),
    	Metastores: &hdinsight.SparkClusterMetastoresArgs{
    		Ambari: &hdinsight.SparkClusterMetastoresAmbariArgs{
    			DatabaseName: pulumi.String("string"),
    			Password:     pulumi.String("string"),
    			Server:       pulumi.String("string"),
    			Username:     pulumi.String("string"),
    		},
    		Hive: &hdinsight.SparkClusterMetastoresHiveArgs{
    			DatabaseName: pulumi.String("string"),
    			Password:     pulumi.String("string"),
    			Server:       pulumi.String("string"),
    			Username:     pulumi.String("string"),
    		},
    		Oozie: &hdinsight.SparkClusterMetastoresOozieArgs{
    			DatabaseName: pulumi.String("string"),
    			Password:     pulumi.String("string"),
    			Server:       pulumi.String("string"),
    			Username:     pulumi.String("string"),
    		},
    	},
    	Monitor: &hdinsight.SparkClusterMonitorArgs{
    		LogAnalyticsWorkspaceId: pulumi.String("string"),
    		PrimaryKey:              pulumi.String("string"),
    	},
    	Extension: &hdinsight.SparkClusterExtensionArgs{
    		LogAnalyticsWorkspaceId: pulumi.String("string"),
    		PrimaryKey:              pulumi.String("string"),
    	},
    	Network: &hdinsight.SparkClusterNetworkArgs{
    		ConnectionDirection: pulumi.String("string"),
    		PrivateLinkEnabled:  pulumi.Bool(false),
    	},
    	PrivateLinkConfiguration: &hdinsight.SparkClusterPrivateLinkConfigurationArgs{
    		GroupId: pulumi.String("string"),
    		IpConfiguration: &hdinsight.SparkClusterPrivateLinkConfigurationIpConfigurationArgs{
    			Name:                      pulumi.String("string"),
    			Primary:                   pulumi.Bool(false),
    			PrivateIpAddress:          pulumi.String("string"),
    			PrivateIpAllocationMethod: pulumi.String("string"),
    			SubnetId:                  pulumi.String("string"),
    		},
    		Name: pulumi.String("string"),
    	},
    	Location: pulumi.String("string"),
    	DiskEncryptions: hdinsight.SparkClusterDiskEncryptionArray{
    		&hdinsight.SparkClusterDiskEncryptionArgs{
    			EncryptionAlgorithm:       pulumi.String("string"),
    			EncryptionAtHostEnabled:   pulumi.Bool(false),
    			KeyVaultKeyId:             pulumi.String("string"),
    			KeyVaultManagedIdentityId: pulumi.String("string"),
    		},
    	},
    	SecurityProfile: &hdinsight.SparkClusterSecurityProfileArgs{
    		AaddsResourceId:    pulumi.String("string"),
    		DomainName:         pulumi.String("string"),
    		DomainUserPassword: pulumi.String("string"),
    		DomainUsername:     pulumi.String("string"),
    		LdapsUrls: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		MsiResourceId: pulumi.String("string"),
    		ClusterUsersGroupDns: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    	},
    	StorageAccountGen2: &hdinsight.SparkClusterStorageAccountGen2Args{
    		FilesystemId:              pulumi.String("string"),
    		IsDefault:                 pulumi.Bool(false),
    		ManagedIdentityResourceId: pulumi.String("string"),
    		StorageResourceId:         pulumi.String("string"),
    	},
    	StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
    		&hdinsight.SparkClusterStorageAccountArgs{
    			IsDefault:          pulumi.Bool(false),
    			StorageAccountKey:  pulumi.String("string"),
    			StorageContainerId: pulumi.String("string"),
    			StorageResourceId:  pulumi.String("string"),
    		},
    	},
    	Tags: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	ComputeIsolation: &hdinsight.SparkClusterComputeIsolationArgs{
    		ComputeIsolationEnabled: pulumi.Bool(false),
    		HostSku:                 pulumi.String("string"),
    	},
    	TlsMinVersion: pulumi.String("string"),
    })
    
    var sparkClusterResource = new SparkCluster("sparkClusterResource", SparkClusterArgs.builder()
        .gateway(SparkClusterGatewayArgs.builder()
            .password("string")
            .username("string")
            .build())
        .componentVersion(SparkClusterComponentVersionArgs.builder()
            .spark("string")
            .build())
        .tier("string")
        .roles(SparkClusterRolesArgs.builder()
            .headNode(SparkClusterRolesHeadNodeArgs.builder()
                .username("string")
                .vmSize("string")
                .password("string")
                .scriptActions(SparkClusterRolesHeadNodeScriptActionArgs.builder()
                    .name("string")
                    .uri("string")
                    .parameters("string")
                    .build())
                .sshKeys("string")
                .subnetId("string")
                .virtualNetworkId("string")
                .build())
            .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
                .targetInstanceCount(0)
                .username("string")
                .vmSize("string")
                .autoscale(SparkClusterRolesWorkerNodeAutoscaleArgs.builder()
                    .capacity(SparkClusterRolesWorkerNodeAutoscaleCapacityArgs.builder()
                        .maxInstanceCount(0)
                        .minInstanceCount(0)
                        .build())
                    .recurrence(SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs.builder()
                        .schedules(SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs.builder()
                            .days("string")
                            .targetInstanceCount(0)
                            .time("string")
                            .build())
                        .timezone("string")
                        .build())
                    .build())
                .password("string")
                .scriptActions(SparkClusterRolesWorkerNodeScriptActionArgs.builder()
                    .name("string")
                    .uri("string")
                    .parameters("string")
                    .build())
                .sshKeys("string")
                .subnetId("string")
                .virtualNetworkId("string")
                .build())
            .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
                .username("string")
                .vmSize("string")
                .password("string")
                .scriptActions(SparkClusterRolesZookeeperNodeScriptActionArgs.builder()
                    .name("string")
                    .uri("string")
                    .parameters("string")
                    .build())
                .sshKeys("string")
                .subnetId("string")
                .virtualNetworkId("string")
                .build())
            .build())
        .resourceGroupName("string")
        .clusterVersion("string")
        .name("string")
        .encryptionInTransitEnabled(false)
        .metastores(SparkClusterMetastoresArgs.builder()
            .ambari(SparkClusterMetastoresAmbariArgs.builder()
                .databaseName("string")
                .password("string")
                .server("string")
                .username("string")
                .build())
            .hive(SparkClusterMetastoresHiveArgs.builder()
                .databaseName("string")
                .password("string")
                .server("string")
                .username("string")
                .build())
            .oozie(SparkClusterMetastoresOozieArgs.builder()
                .databaseName("string")
                .password("string")
                .server("string")
                .username("string")
                .build())
            .build())
        .monitor(SparkClusterMonitorArgs.builder()
            .logAnalyticsWorkspaceId("string")
            .primaryKey("string")
            .build())
        .extension(SparkClusterExtensionArgs.builder()
            .logAnalyticsWorkspaceId("string")
            .primaryKey("string")
            .build())
        .network(SparkClusterNetworkArgs.builder()
            .connectionDirection("string")
            .privateLinkEnabled(false)
            .build())
        .privateLinkConfiguration(SparkClusterPrivateLinkConfigurationArgs.builder()
            .groupId("string")
            .ipConfiguration(SparkClusterPrivateLinkConfigurationIpConfigurationArgs.builder()
                .name("string")
                .primary(false)
                .privateIpAddress("string")
                .privateIpAllocationMethod("string")
                .subnetId("string")
                .build())
            .name("string")
            .build())
        .location("string")
        .diskEncryptions(SparkClusterDiskEncryptionArgs.builder()
            .encryptionAlgorithm("string")
            .encryptionAtHostEnabled(false)
            .keyVaultKeyId("string")
            .keyVaultManagedIdentityId("string")
            .build())
        .securityProfile(SparkClusterSecurityProfileArgs.builder()
            .aaddsResourceId("string")
            .domainName("string")
            .domainUserPassword("string")
            .domainUsername("string")
            .ldapsUrls("string")
            .msiResourceId("string")
            .clusterUsersGroupDns("string")
            .build())
        .storageAccountGen2(SparkClusterStorageAccountGen2Args.builder()
            .filesystemId("string")
            .isDefault(false)
            .managedIdentityResourceId("string")
            .storageResourceId("string")
            .build())
        .storageAccounts(SparkClusterStorageAccountArgs.builder()
            .isDefault(false)
            .storageAccountKey("string")
            .storageContainerId("string")
            .storageResourceId("string")
            .build())
        .tags(Map.of("string", "string"))
        .computeIsolation(SparkClusterComputeIsolationArgs.builder()
            .computeIsolationEnabled(false)
            .hostSku("string")
            .build())
        .tlsMinVersion("string")
        .build());
    
    spark_cluster_resource = azure.hdinsight.SparkCluster("sparkClusterResource",
        gateway=azure.hdinsight.SparkClusterGatewayArgs(
            password="string",
            username="string",
        ),
        component_version=azure.hdinsight.SparkClusterComponentVersionArgs(
            spark="string",
        ),
        tier="string",
        roles=azure.hdinsight.SparkClusterRolesArgs(
            head_node=azure.hdinsight.SparkClusterRolesHeadNodeArgs(
                username="string",
                vm_size="string",
                password="string",
                script_actions=[azure.hdinsight.SparkClusterRolesHeadNodeScriptActionArgs(
                    name="string",
                    uri="string",
                    parameters="string",
                )],
                ssh_keys=["string"],
                subnet_id="string",
                virtual_network_id="string",
            ),
            worker_node=azure.hdinsight.SparkClusterRolesWorkerNodeArgs(
                target_instance_count=0,
                username="string",
                vm_size="string",
                autoscale=azure.hdinsight.SparkClusterRolesWorkerNodeAutoscaleArgs(
                    capacity=azure.hdinsight.SparkClusterRolesWorkerNodeAutoscaleCapacityArgs(
                        max_instance_count=0,
                        min_instance_count=0,
                    ),
                    recurrence=azure.hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs(
                        schedules=[azure.hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs(
                            days=["string"],
                            target_instance_count=0,
                            time="string",
                        )],
                        timezone="string",
                    ),
                ),
                password="string",
                script_actions=[azure.hdinsight.SparkClusterRolesWorkerNodeScriptActionArgs(
                    name="string",
                    uri="string",
                    parameters="string",
                )],
                ssh_keys=["string"],
                subnet_id="string",
                virtual_network_id="string",
            ),
            zookeeper_node=azure.hdinsight.SparkClusterRolesZookeeperNodeArgs(
                username="string",
                vm_size="string",
                password="string",
                script_actions=[azure.hdinsight.SparkClusterRolesZookeeperNodeScriptActionArgs(
                    name="string",
                    uri="string",
                    parameters="string",
                )],
                ssh_keys=["string"],
                subnet_id="string",
                virtual_network_id="string",
            ),
        ),
        resource_group_name="string",
        cluster_version="string",
        name="string",
        encryption_in_transit_enabled=False,
        metastores=azure.hdinsight.SparkClusterMetastoresArgs(
            ambari=azure.hdinsight.SparkClusterMetastoresAmbariArgs(
                database_name="string",
                password="string",
                server="string",
                username="string",
            ),
            hive=azure.hdinsight.SparkClusterMetastoresHiveArgs(
                database_name="string",
                password="string",
                server="string",
                username="string",
            ),
            oozie=azure.hdinsight.SparkClusterMetastoresOozieArgs(
                database_name="string",
                password="string",
                server="string",
                username="string",
            ),
        ),
        monitor=azure.hdinsight.SparkClusterMonitorArgs(
            log_analytics_workspace_id="string",
            primary_key="string",
        ),
        extension=azure.hdinsight.SparkClusterExtensionArgs(
            log_analytics_workspace_id="string",
            primary_key="string",
        ),
        network=azure.hdinsight.SparkClusterNetworkArgs(
            connection_direction="string",
            private_link_enabled=False,
        ),
        private_link_configuration=azure.hdinsight.SparkClusterPrivateLinkConfigurationArgs(
            group_id="string",
            ip_configuration=azure.hdinsight.SparkClusterPrivateLinkConfigurationIpConfigurationArgs(
                name="string",
                primary=False,
                private_ip_address="string",
                private_ip_allocation_method="string",
                subnet_id="string",
            ),
            name="string",
        ),
        location="string",
        disk_encryptions=[azure.hdinsight.SparkClusterDiskEncryptionArgs(
            encryption_algorithm="string",
            encryption_at_host_enabled=False,
            key_vault_key_id="string",
            key_vault_managed_identity_id="string",
        )],
        security_profile=azure.hdinsight.SparkClusterSecurityProfileArgs(
            aadds_resource_id="string",
            domain_name="string",
            domain_user_password="string",
            domain_username="string",
            ldaps_urls=["string"],
            msi_resource_id="string",
            cluster_users_group_dns=["string"],
        ),
        storage_account_gen2=azure.hdinsight.SparkClusterStorageAccountGen2Args(
            filesystem_id="string",
            is_default=False,
            managed_identity_resource_id="string",
            storage_resource_id="string",
        ),
        storage_accounts=[azure.hdinsight.SparkClusterStorageAccountArgs(
            is_default=False,
            storage_account_key="string",
            storage_container_id="string",
            storage_resource_id="string",
        )],
        tags={
            "string": "string",
        },
        compute_isolation=azure.hdinsight.SparkClusterComputeIsolationArgs(
            compute_isolation_enabled=False,
            host_sku="string",
        ),
        tls_min_version="string")
    
    const sparkClusterResource = new azure.hdinsight.SparkCluster("sparkClusterResource", {
        gateway: {
            password: "string",
            username: "string",
        },
        componentVersion: {
            spark: "string",
        },
        tier: "string",
        roles: {
            headNode: {
                username: "string",
                vmSize: "string",
                password: "string",
                scriptActions: [{
                    name: "string",
                    uri: "string",
                    parameters: "string",
                }],
                sshKeys: ["string"],
                subnetId: "string",
                virtualNetworkId: "string",
            },
            workerNode: {
                targetInstanceCount: 0,
                username: "string",
                vmSize: "string",
                autoscale: {
                    capacity: {
                        maxInstanceCount: 0,
                        minInstanceCount: 0,
                    },
                    recurrence: {
                        schedules: [{
                            days: ["string"],
                            targetInstanceCount: 0,
                            time: "string",
                        }],
                        timezone: "string",
                    },
                },
                password: "string",
                scriptActions: [{
                    name: "string",
                    uri: "string",
                    parameters: "string",
                }],
                sshKeys: ["string"],
                subnetId: "string",
                virtualNetworkId: "string",
            },
            zookeeperNode: {
                username: "string",
                vmSize: "string",
                password: "string",
                scriptActions: [{
                    name: "string",
                    uri: "string",
                    parameters: "string",
                }],
                sshKeys: ["string"],
                subnetId: "string",
                virtualNetworkId: "string",
            },
        },
        resourceGroupName: "string",
        clusterVersion: "string",
        name: "string",
        encryptionInTransitEnabled: false,
        metastores: {
            ambari: {
                databaseName: "string",
                password: "string",
                server: "string",
                username: "string",
            },
            hive: {
                databaseName: "string",
                password: "string",
                server: "string",
                username: "string",
            },
            oozie: {
                databaseName: "string",
                password: "string",
                server: "string",
                username: "string",
            },
        },
        monitor: {
            logAnalyticsWorkspaceId: "string",
            primaryKey: "string",
        },
        extension: {
            logAnalyticsWorkspaceId: "string",
            primaryKey: "string",
        },
        network: {
            connectionDirection: "string",
            privateLinkEnabled: false,
        },
        privateLinkConfiguration: {
            groupId: "string",
            ipConfiguration: {
                name: "string",
                primary: false,
                privateIpAddress: "string",
                privateIpAllocationMethod: "string",
                subnetId: "string",
            },
            name: "string",
        },
        location: "string",
        diskEncryptions: [{
            encryptionAlgorithm: "string",
            encryptionAtHostEnabled: false,
            keyVaultKeyId: "string",
            keyVaultManagedIdentityId: "string",
        }],
        securityProfile: {
            aaddsResourceId: "string",
            domainName: "string",
            domainUserPassword: "string",
            domainUsername: "string",
            ldapsUrls: ["string"],
            msiResourceId: "string",
            clusterUsersGroupDns: ["string"],
        },
        storageAccountGen2: {
            filesystemId: "string",
            isDefault: false,
            managedIdentityResourceId: "string",
            storageResourceId: "string",
        },
        storageAccounts: [{
            isDefault: false,
            storageAccountKey: "string",
            storageContainerId: "string",
            storageResourceId: "string",
        }],
        tags: {
            string: "string",
        },
        computeIsolation: {
            computeIsolationEnabled: false,
            hostSku: "string",
        },
        tlsMinVersion: "string",
    });
    
    type: azure:hdinsight:SparkCluster
    properties:
        clusterVersion: string
        componentVersion:
            spark: string
        computeIsolation:
            computeIsolationEnabled: false
            hostSku: string
        diskEncryptions:
            - encryptionAlgorithm: string
              encryptionAtHostEnabled: false
              keyVaultKeyId: string
              keyVaultManagedIdentityId: string
        encryptionInTransitEnabled: false
        extension:
            logAnalyticsWorkspaceId: string
            primaryKey: string
        gateway:
            password: string
            username: string
        location: string
        metastores:
            ambari:
                databaseName: string
                password: string
                server: string
                username: string
            hive:
                databaseName: string
                password: string
                server: string
                username: string
            oozie:
                databaseName: string
                password: string
                server: string
                username: string
        monitor:
            logAnalyticsWorkspaceId: string
            primaryKey: string
        name: string
        network:
            connectionDirection: string
            privateLinkEnabled: false
        privateLinkConfiguration:
            groupId: string
            ipConfiguration:
                name: string
                primary: false
                privateIpAddress: string
                privateIpAllocationMethod: string
                subnetId: string
            name: string
        resourceGroupName: string
        roles:
            headNode:
                password: string
                scriptActions:
                    - name: string
                      parameters: string
                      uri: string
                sshKeys:
                    - string
                subnetId: string
                username: string
                virtualNetworkId: string
                vmSize: string
            workerNode:
                autoscale:
                    capacity:
                        maxInstanceCount: 0
                        minInstanceCount: 0
                    recurrence:
                        schedules:
                            - days:
                                - string
                              targetInstanceCount: 0
                              time: string
                        timezone: string
                password: string
                scriptActions:
                    - name: string
                      parameters: string
                      uri: string
                sshKeys:
                    - string
                subnetId: string
                targetInstanceCount: 0
                username: string
                virtualNetworkId: string
                vmSize: string
            zookeeperNode:
                password: string
                scriptActions:
                    - name: string
                      parameters: string
                      uri: string
                sshKeys:
                    - string
                subnetId: string
                username: string
                virtualNetworkId: string
                vmSize: string
        securityProfile:
            aaddsResourceId: string
            clusterUsersGroupDns:
                - string
            domainName: string
            domainUserPassword: string
            domainUsername: string
            ldapsUrls:
                - string
            msiResourceId: string
        storageAccountGen2:
            filesystemId: string
            isDefault: false
            managedIdentityResourceId: string
            storageResourceId: string
        storageAccounts:
            - isDefault: false
              storageAccountKey: string
              storageContainerId: string
              storageResourceId: string
        tags:
            string: string
        tier: string
        tlsMinVersion: string
    

    SparkCluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The SparkCluster resource accepts the following input properties:

    ClusterVersion string
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    ComponentVersion SparkClusterComponentVersion
    A component_version block as defined below.
    Gateway SparkClusterGateway
    A gateway block as defined below.
    ResourceGroupName string
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Roles SparkClusterRoles
    A roles block as defined below.
    Tier string
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    ComputeIsolation SparkClusterComputeIsolation
    A compute_isolation block as defined below.
    DiskEncryptions List<SparkClusterDiskEncryption>
    One or more disk_encryption block as defined below.
    EncryptionInTransitEnabled bool
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    Extension SparkClusterExtension
    An extension block as defined below.
    Location string
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Metastores SparkClusterMetastores
    A metastores block as defined below.
    Monitor SparkClusterMonitor
    A monitor block as defined below.
    Name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Network SparkClusterNetwork
    A network block as defined below.
    PrivateLinkConfiguration SparkClusterPrivateLinkConfiguration
    A private_link_configuration block as defined below.
    SecurityProfile SparkClusterSecurityProfile
    A security_profile block as defined below. Changing this forces a new resource to be created.
    StorageAccountGen2 SparkClusterStorageAccountGen2
    A storage_account_gen2 block as defined below.
    StorageAccounts List<SparkClusterStorageAccount>
    One or more storage_account block as defined below.
    Tags Dictionary<string, string>
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    TlsMinVersion string

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    ClusterVersion string
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    ComponentVersion SparkClusterComponentVersionArgs
    A component_version block as defined below.
    Gateway SparkClusterGatewayArgs
    A gateway block as defined below.
    ResourceGroupName string
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Roles SparkClusterRolesArgs
    A roles block as defined below.
    Tier string
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    ComputeIsolation SparkClusterComputeIsolationArgs
    A compute_isolation block as defined below.
    DiskEncryptions []SparkClusterDiskEncryptionArgs
    One or more disk_encryption block as defined below.
    EncryptionInTransitEnabled bool
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    Extension SparkClusterExtensionArgs
    An extension block as defined below.
    Location string
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Metastores SparkClusterMetastoresArgs
    A metastores block as defined below.
    Monitor SparkClusterMonitorArgs
    A monitor block as defined below.
    Name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Network SparkClusterNetworkArgs
    A network block as defined below.
    PrivateLinkConfiguration SparkClusterPrivateLinkConfigurationArgs
    A private_link_configuration block as defined below.
    SecurityProfile SparkClusterSecurityProfileArgs
    A security_profile block as defined below. Changing this forces a new resource to be created.
    StorageAccountGen2 SparkClusterStorageAccountGen2Args
    A storage_account_gen2 block as defined below.
    StorageAccounts []SparkClusterStorageAccountArgs
    One or more storage_account block as defined below.
    Tags map[string]string
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    TlsMinVersion string

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    clusterVersion String
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    componentVersion SparkClusterComponentVersion
    A component_version block as defined below.
    gateway SparkClusterGateway
    A gateway block as defined below.
    resourceGroupName String
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles SparkClusterRoles
    A roles block as defined below.
    tier String
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    computeIsolation SparkClusterComputeIsolation
    A compute_isolation block as defined below.
    diskEncryptions List<SparkClusterDiskEncryption>
    One or more disk_encryption block as defined below.
    encryptionInTransitEnabled Boolean
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension SparkClusterExtension
    An extension block as defined below.
    location String
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores SparkClusterMetastores
    A metastores block as defined below.
    monitor SparkClusterMonitor
    A monitor block as defined below.
    name String
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network SparkClusterNetwork
    A network block as defined below.
    privateLinkConfiguration SparkClusterPrivateLinkConfiguration
    A private_link_configuration block as defined below.
    securityProfile SparkClusterSecurityProfile
    A security_profile block as defined below. Changing this forces a new resource to be created.
    storageAccountGen2 SparkClusterStorageAccountGen2
    A storage_account_gen2 block as defined below.
    storageAccounts List<SparkClusterStorageAccount>
    One or more storage_account block as defined below.
    tags Map<String,String>
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tlsMinVersion String

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    clusterVersion string
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    componentVersion SparkClusterComponentVersion
    A component_version block as defined below.
    gateway SparkClusterGateway
    A gateway block as defined below.
    resourceGroupName string
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles SparkClusterRoles
    A roles block as defined below.
    tier string
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    computeIsolation SparkClusterComputeIsolation
    A compute_isolation block as defined below.
    diskEncryptions SparkClusterDiskEncryption[]
    One or more disk_encryption block as defined below.
    encryptionInTransitEnabled boolean
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension SparkClusterExtension
    An extension block as defined below.
    location string
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores SparkClusterMetastores
    A metastores block as defined below.
    monitor SparkClusterMonitor
    A monitor block as defined below.
    name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network SparkClusterNetwork
    A network block as defined below.
    privateLinkConfiguration SparkClusterPrivateLinkConfiguration
    A private_link_configuration block as defined below.
    securityProfile SparkClusterSecurityProfile
    A security_profile block as defined below. Changing this forces a new resource to be created.
    storageAccountGen2 SparkClusterStorageAccountGen2
    A storage_account_gen2 block as defined below.
    storageAccounts SparkClusterStorageAccount[]
    One or more storage_account block as defined below.
    tags {[key: string]: string}
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tlsMinVersion string

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    cluster_version str
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    component_version SparkClusterComponentVersionArgs
    A component_version block as defined below.
    gateway SparkClusterGatewayArgs
    A gateway block as defined below.
    resource_group_name str
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles SparkClusterRolesArgs
    A roles block as defined below.
    tier str
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    compute_isolation SparkClusterComputeIsolationArgs
    A compute_isolation block as defined below.
    disk_encryptions Sequence[SparkClusterDiskEncryptionArgs]
    One or more disk_encryption block as defined below.
    encryption_in_transit_enabled bool
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension SparkClusterExtensionArgs
    An extension block as defined below.
    location str
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores SparkClusterMetastoresArgs
    A metastores block as defined below.
    monitor SparkClusterMonitorArgs
    A monitor block as defined below.
    name str
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network SparkClusterNetworkArgs
    A network block as defined below.
    private_link_configuration SparkClusterPrivateLinkConfigurationArgs
    A private_link_configuration block as defined below.
    security_profile SparkClusterSecurityProfileArgs
    A security_profile block as defined below. Changing this forces a new resource to be created.
    storage_account_gen2 SparkClusterStorageAccountGen2Args
    A storage_account_gen2 block as defined below.
    storage_accounts Sequence[SparkClusterStorageAccountArgs]
    One or more storage_account block as defined below.
    tags Mapping[str, str]
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tls_min_version str

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    clusterVersion String
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    componentVersion Property Map
    A component_version block as defined below.
    gateway Property Map
    A gateway block as defined below.
    resourceGroupName String
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles Property Map
    A roles block as defined below.
    tier String
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    computeIsolation Property Map
    A compute_isolation block as defined below.
    diskEncryptions List<Property Map>
    One or more disk_encryption block as defined below.
    encryptionInTransitEnabled Boolean
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension Property Map
    An extension block as defined below.
    location String
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores Property Map
    A metastores block as defined below.
    monitor Property Map
    A monitor block as defined below.
    name String
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network Property Map
    A network block as defined below.
    privateLinkConfiguration Property Map
    A private_link_configuration block as defined below.
    securityProfile Property Map
    A security_profile block as defined below. Changing this forces a new resource to be created.
    storageAccountGen2 Property Map
    A storage_account_gen2 block as defined below.
    storageAccounts List<Property Map>
    One or more storage_account block as defined below.
    tags Map<String>
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tlsMinVersion String

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the SparkCluster resource produces the following output properties:

    HttpsEndpoint string
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    Id string
    The provider-assigned unique ID for this managed resource.
    SshEndpoint string
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    HttpsEndpoint string
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    Id string
    The provider-assigned unique ID for this managed resource.
    SshEndpoint string
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    httpsEndpoint String
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    id String
    The provider-assigned unique ID for this managed resource.
    sshEndpoint String
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    httpsEndpoint string
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    id string
    The provider-assigned unique ID for this managed resource.
    sshEndpoint string
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    https_endpoint str
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    id str
    The provider-assigned unique ID for this managed resource.
    ssh_endpoint str
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    httpsEndpoint String
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    id String
    The provider-assigned unique ID for this managed resource.
    sshEndpoint String
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.

    Look up Existing SparkCluster Resource

    Get an existing SparkCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: SparkClusterState, opts?: CustomResourceOptions): SparkCluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            cluster_version: Optional[str] = None,
            component_version: Optional[SparkClusterComponentVersionArgs] = None,
            compute_isolation: Optional[SparkClusterComputeIsolationArgs] = None,
            disk_encryptions: Optional[Sequence[SparkClusterDiskEncryptionArgs]] = None,
            encryption_in_transit_enabled: Optional[bool] = None,
            extension: Optional[SparkClusterExtensionArgs] = None,
            gateway: Optional[SparkClusterGatewayArgs] = None,
            https_endpoint: Optional[str] = None,
            location: Optional[str] = None,
            metastores: Optional[SparkClusterMetastoresArgs] = None,
            monitor: Optional[SparkClusterMonitorArgs] = None,
            name: Optional[str] = None,
            network: Optional[SparkClusterNetworkArgs] = None,
            private_link_configuration: Optional[SparkClusterPrivateLinkConfigurationArgs] = None,
            resource_group_name: Optional[str] = None,
            roles: Optional[SparkClusterRolesArgs] = None,
            security_profile: Optional[SparkClusterSecurityProfileArgs] = None,
            ssh_endpoint: Optional[str] = None,
            storage_account_gen2: Optional[SparkClusterStorageAccountGen2Args] = None,
            storage_accounts: Optional[Sequence[SparkClusterStorageAccountArgs]] = None,
            tags: Optional[Mapping[str, str]] = None,
            tier: Optional[str] = None,
            tls_min_version: Optional[str] = None) -> SparkCluster
    func GetSparkCluster(ctx *Context, name string, id IDInput, state *SparkClusterState, opts ...ResourceOption) (*SparkCluster, error)
    public static SparkCluster Get(string name, Input<string> id, SparkClusterState? state, CustomResourceOptions? opts = null)
    public static SparkCluster get(String name, Output<String> id, SparkClusterState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ClusterVersion string
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    ComponentVersion SparkClusterComponentVersion
    A component_version block as defined below.
    ComputeIsolation SparkClusterComputeIsolation
    A compute_isolation block as defined below.
    DiskEncryptions List<SparkClusterDiskEncryption>
    One or more disk_encryption block as defined below.
    EncryptionInTransitEnabled bool
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    Extension SparkClusterExtension
    An extension block as defined below.
    Gateway SparkClusterGateway
    A gateway block as defined below.
    HttpsEndpoint string
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    Location string
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Metastores SparkClusterMetastores
    A metastores block as defined below.
    Monitor SparkClusterMonitor
    A monitor block as defined below.
    Name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Network SparkClusterNetwork
    A network block as defined below.
    PrivateLinkConfiguration SparkClusterPrivateLinkConfiguration
    A private_link_configuration block as defined below.
    ResourceGroupName string
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Roles SparkClusterRoles
    A roles block as defined below.
    SecurityProfile SparkClusterSecurityProfile
    A security_profile block as defined below. Changing this forces a new resource to be created.
    SshEndpoint string
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    StorageAccountGen2 SparkClusterStorageAccountGen2
    A storage_account_gen2 block as defined below.
    StorageAccounts List<SparkClusterStorageAccount>
    One or more storage_account block as defined below.
    Tags Dictionary<string, string>
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    Tier string
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    TlsMinVersion string

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    ClusterVersion string
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    ComponentVersion SparkClusterComponentVersionArgs
    A component_version block as defined below.
    ComputeIsolation SparkClusterComputeIsolationArgs
    A compute_isolation block as defined below.
    DiskEncryptions []SparkClusterDiskEncryptionArgs
    One or more disk_encryption block as defined below.
    EncryptionInTransitEnabled bool
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    Extension SparkClusterExtensionArgs
    An extension block as defined below.
    Gateway SparkClusterGatewayArgs
    A gateway block as defined below.
    HttpsEndpoint string
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    Location string
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Metastores SparkClusterMetastoresArgs
    A metastores block as defined below.
    Monitor SparkClusterMonitorArgs
    A monitor block as defined below.
    Name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Network SparkClusterNetworkArgs
    A network block as defined below.
    PrivateLinkConfiguration SparkClusterPrivateLinkConfigurationArgs
    A private_link_configuration block as defined below.
    ResourceGroupName string
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    Roles SparkClusterRolesArgs
    A roles block as defined below.
    SecurityProfile SparkClusterSecurityProfileArgs
    A security_profile block as defined below. Changing this forces a new resource to be created.
    SshEndpoint string
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    StorageAccountGen2 SparkClusterStorageAccountGen2Args
    A storage_account_gen2 block as defined below.
    StorageAccounts []SparkClusterStorageAccountArgs
    One or more storage_account block as defined below.
    Tags map[string]string
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    Tier string
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    TlsMinVersion string

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    clusterVersion String
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    componentVersion SparkClusterComponentVersion
    A component_version block as defined below.
    computeIsolation SparkClusterComputeIsolation
    A compute_isolation block as defined below.
    diskEncryptions List<SparkClusterDiskEncryption>
    One or more disk_encryption block as defined below.
    encryptionInTransitEnabled Boolean
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension SparkClusterExtension
    An extension block as defined below.
    gateway SparkClusterGateway
    A gateway block as defined below.
    httpsEndpoint String
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    location String
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores SparkClusterMetastores
    A metastores block as defined below.
    monitor SparkClusterMonitor
    A monitor block as defined below.
    name String
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network SparkClusterNetwork
    A network block as defined below.
    privateLinkConfiguration SparkClusterPrivateLinkConfiguration
    A private_link_configuration block as defined below.
    resourceGroupName String
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles SparkClusterRoles
    A roles block as defined below.
    securityProfile SparkClusterSecurityProfile
    A security_profile block as defined below. Changing this forces a new resource to be created.
    sshEndpoint String
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    storageAccountGen2 SparkClusterStorageAccountGen2
    A storage_account_gen2 block as defined below.
    storageAccounts List<SparkClusterStorageAccount>
    One or more storage_account block as defined below.
    tags Map<String,String>
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tier String
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    tlsMinVersion String

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    clusterVersion string
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    componentVersion SparkClusterComponentVersion
    A component_version block as defined below.
    computeIsolation SparkClusterComputeIsolation
    A compute_isolation block as defined below.
    diskEncryptions SparkClusterDiskEncryption[]
    One or more disk_encryption block as defined below.
    encryptionInTransitEnabled boolean
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension SparkClusterExtension
    An extension block as defined below.
    gateway SparkClusterGateway
    A gateway block as defined below.
    httpsEndpoint string
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    location string
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores SparkClusterMetastores
    A metastores block as defined below.
    monitor SparkClusterMonitor
    A monitor block as defined below.
    name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network SparkClusterNetwork
    A network block as defined below.
    privateLinkConfiguration SparkClusterPrivateLinkConfiguration
    A private_link_configuration block as defined below.
    resourceGroupName string
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles SparkClusterRoles
    A roles block as defined below.
    securityProfile SparkClusterSecurityProfile
    A security_profile block as defined below. Changing this forces a new resource to be created.
    sshEndpoint string
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    storageAccountGen2 SparkClusterStorageAccountGen2
    A storage_account_gen2 block as defined below.
    storageAccounts SparkClusterStorageAccount[]
    One or more storage_account block as defined below.
    tags {[key: string]: string}
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tier string
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    tlsMinVersion string

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    cluster_version str
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    component_version SparkClusterComponentVersionArgs
    A component_version block as defined below.
    compute_isolation SparkClusterComputeIsolationArgs
    A compute_isolation block as defined below.
    disk_encryptions Sequence[SparkClusterDiskEncryptionArgs]
    One or more disk_encryption block as defined below.
    encryption_in_transit_enabled bool
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension SparkClusterExtensionArgs
    An extension block as defined below.
    gateway SparkClusterGatewayArgs
    A gateway block as defined below.
    https_endpoint str
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    location str
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores SparkClusterMetastoresArgs
    A metastores block as defined below.
    monitor SparkClusterMonitorArgs
    A monitor block as defined below.
    name str
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network SparkClusterNetworkArgs
    A network block as defined below.
    private_link_configuration SparkClusterPrivateLinkConfigurationArgs
    A private_link_configuration block as defined below.
    resource_group_name str
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles SparkClusterRolesArgs
    A roles block as defined below.
    security_profile SparkClusterSecurityProfileArgs
    A security_profile block as defined below. Changing this forces a new resource to be created.
    ssh_endpoint str
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    storage_account_gen2 SparkClusterStorageAccountGen2Args
    A storage_account_gen2 block as defined below.
    storage_accounts Sequence[SparkClusterStorageAccountArgs]
    One or more storage_account block as defined below.
    tags Mapping[str, str]
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tier str
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    tls_min_version str

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    clusterVersion String
    Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
    componentVersion Property Map
    A component_version block as defined below.
    computeIsolation Property Map
    A compute_isolation block as defined below.
    diskEncryptions List<Property Map>
    One or more disk_encryption block as defined below.
    encryptionInTransitEnabled Boolean
    Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
    extension Property Map
    An extension block as defined below.
    gateway Property Map
    A gateway block as defined below.
    httpsEndpoint String
    The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
    location String
    Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    metastores Property Map
    A metastores block as defined below.
    monitor Property Map
    A monitor block as defined below.
    name String
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    network Property Map
    A network block as defined below.
    privateLinkConfiguration Property Map
    A private_link_configuration block as defined below.
    resourceGroupName String
    Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
    roles Property Map
    A roles block as defined below.
    securityProfile Property Map
    A security_profile block as defined below. Changing this forces a new resource to be created.
    sshEndpoint String
    The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
    storageAccountGen2 Property Map
    A storage_account_gen2 block as defined below.
    storageAccounts List<Property Map>
    One or more storage_account block as defined below.
    tags Map<String>
    A map of Tags which should be assigned to this HDInsight Spark Cluster.
    tier String
    Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are Standard or Premium. Changing this forces a new resource to be created.
    tlsMinVersion String

    The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.

    NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement.

    Supporting Types

    SparkClusterComponentVersion, SparkClusterComponentVersionArgs

    Spark string
    The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Spark string
    The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    spark String
    The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    spark string
    The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    spark str
    The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    spark String
    The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.

    SparkClusterComputeIsolation, SparkClusterComputeIsolationArgs

    ComputeIsolationEnabled bool
    This field indicates whether enable compute isolation or not. Possible values are true or false.
    HostSku string
    The name of the host SKU.
    ComputeIsolationEnabled bool
    This field indicates whether enable compute isolation or not. Possible values are true or false.
    HostSku string
    The name of the host SKU.
    computeIsolationEnabled Boolean
    This field indicates whether enable compute isolation or not. Possible values are true or false.
    hostSku String
    The name of the host SKU.
    computeIsolationEnabled boolean
    This field indicates whether enable compute isolation or not. Possible values are true or false.
    hostSku string
    The name of the host SKU.
    compute_isolation_enabled bool
    This field indicates whether enable compute isolation or not. Possible values are true or false.
    host_sku str
    The name of the host SKU.
    computeIsolationEnabled Boolean
    This field indicates whether enable compute isolation or not. Possible values are true or false.
    hostSku String
    The name of the host SKU.

    SparkClusterDiskEncryption, SparkClusterDiskEncryptionArgs

    EncryptionAlgorithm string
    This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256.
    EncryptionAtHostEnabled bool
    This is indicator to show whether resource disk encryption is enabled.
    KeyVaultKeyId string
    The ID of the key vault key.
    KeyVaultManagedIdentityId string
    This is the resource ID of Managed Identity used to access the key vault.
    EncryptionAlgorithm string
    This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256.
    EncryptionAtHostEnabled bool
    This is indicator to show whether resource disk encryption is enabled.
    KeyVaultKeyId string
    The ID of the key vault key.
    KeyVaultManagedIdentityId string
    This is the resource ID of Managed Identity used to access the key vault.
    encryptionAlgorithm String
    This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256.
    encryptionAtHostEnabled Boolean
    This is indicator to show whether resource disk encryption is enabled.
    keyVaultKeyId String
    The ID of the key vault key.
    keyVaultManagedIdentityId String
    This is the resource ID of Managed Identity used to access the key vault.
    encryptionAlgorithm string
    This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256.
    encryptionAtHostEnabled boolean
    This is indicator to show whether resource disk encryption is enabled.
    keyVaultKeyId string
    The ID of the key vault key.
    keyVaultManagedIdentityId string
    This is the resource ID of Managed Identity used to access the key vault.
    encryption_algorithm str
    This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256.
    encryption_at_host_enabled bool
    This is indicator to show whether resource disk encryption is enabled.
    key_vault_key_id str
    The ID of the key vault key.
    key_vault_managed_identity_id str
    This is the resource ID of Managed Identity used to access the key vault.
    encryptionAlgorithm String
    This is an algorithm identifier for encryption. Possible values are RSA1_5, RSA-OAEP, RSA-OAEP-256.
    encryptionAtHostEnabled Boolean
    This is indicator to show whether resource disk encryption is enabled.
    keyVaultKeyId String
    The ID of the key vault key.
    keyVaultManagedIdentityId String
    This is the resource ID of Managed Identity used to access the key vault.

    SparkClusterExtension, SparkClusterExtensionArgs

    LogAnalyticsWorkspaceId string
    The workspace ID of the log analytics extension.
    PrimaryKey string
    The workspace key of the log analytics extension.
    LogAnalyticsWorkspaceId string
    The workspace ID of the log analytics extension.
    PrimaryKey string
    The workspace key of the log analytics extension.
    logAnalyticsWorkspaceId String
    The workspace ID of the log analytics extension.
    primaryKey String
    The workspace key of the log analytics extension.
    logAnalyticsWorkspaceId string
    The workspace ID of the log analytics extension.
    primaryKey string
    The workspace key of the log analytics extension.
    log_analytics_workspace_id str
    The workspace ID of the log analytics extension.
    primary_key str
    The workspace key of the log analytics extension.
    logAnalyticsWorkspaceId String
    The workspace ID of the log analytics extension.
    primaryKey String
    The workspace key of the log analytics extension.

    SparkClusterGateway, SparkClusterGatewayArgs

    Password string

    The password used for the Ambari Portal.

    NOTE: This password must be different from the one used for the head_node, worker_node and zookeeper_node roles.

    Username string
    The username used for the Ambari Portal. Changing this forces a new resource to be created.
    Password string

    The password used for the Ambari Portal.

    NOTE: This password must be different from the one used for the head_node, worker_node and zookeeper_node roles.

    Username string
    The username used for the Ambari Portal. Changing this forces a new resource to be created.
    password String

    The password used for the Ambari Portal.

    NOTE: This password must be different from the one used for the head_node, worker_node and zookeeper_node roles.

    username String
    The username used for the Ambari Portal. Changing this forces a new resource to be created.
    password string

    The password used for the Ambari Portal.

    NOTE: This password must be different from the one used for the head_node, worker_node and zookeeper_node roles.

    username string
    The username used for the Ambari Portal. Changing this forces a new resource to be created.
    password str

    The password used for the Ambari Portal.

    NOTE: This password must be different from the one used for the head_node, worker_node and zookeeper_node roles.

    username str
    The username used for the Ambari Portal. Changing this forces a new resource to be created.
    password String

    The password used for the Ambari Portal.

    NOTE: This password must be different from the one used for the head_node, worker_node and zookeeper_node roles.

    username String
    The username used for the Ambari Portal. Changing this forces a new resource to be created.

    SparkClusterMetastores, SparkClusterMetastoresArgs

    Ambari SparkClusterMetastoresAmbari
    An ambari block as defined below.
    Hive SparkClusterMetastoresHive
    A hive block as defined below.
    Oozie SparkClusterMetastoresOozie
    An oozie block as defined below.
    Ambari SparkClusterMetastoresAmbari
    An ambari block as defined below.
    Hive SparkClusterMetastoresHive
    A hive block as defined below.
    Oozie SparkClusterMetastoresOozie
    An oozie block as defined below.
    ambari SparkClusterMetastoresAmbari
    An ambari block as defined below.
    hive SparkClusterMetastoresHive
    A hive block as defined below.
    oozie SparkClusterMetastoresOozie
    An oozie block as defined below.
    ambari SparkClusterMetastoresAmbari
    An ambari block as defined below.
    hive SparkClusterMetastoresHive
    A hive block as defined below.
    oozie SparkClusterMetastoresOozie
    An oozie block as defined below.
    ambari SparkClusterMetastoresAmbari
    An ambari block as defined below.
    hive SparkClusterMetastoresHive
    A hive block as defined below.
    oozie SparkClusterMetastoresOozie
    An oozie block as defined below.
    ambari Property Map
    An ambari block as defined below.
    hive Property Map
    A hive block as defined below.
    oozie Property Map
    An oozie block as defined below.

    SparkClusterMetastoresAmbari, SparkClusterMetastoresAmbariArgs

    DatabaseName string
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    Password string
    The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    Server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
    Username string
    The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    DatabaseName string
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    Password string
    The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    Server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
    Username string
    The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName String
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password String
    The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server String
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
    username String
    The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName string
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password string
    The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
    username string
    The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    database_name str
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password str
    The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server str
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
    username str
    The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName String
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password String
    The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server String
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
    username String
    The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.

    SparkClusterMetastoresHive, SparkClusterMetastoresHiveArgs

    DatabaseName string
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    Password string
    The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    Server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
    Username string
    The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    DatabaseName string
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    Password string
    The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    Server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
    Username string
    The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName String
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password String
    The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server String
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
    username String
    The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName string
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password string
    The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
    username string
    The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    database_name str
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password str
    The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server str
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
    username str
    The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName String
    The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
    password String
    The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server String
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
    username String
    The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.

    SparkClusterMetastoresOozie, SparkClusterMetastoresOozieArgs

    DatabaseName string
    The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
    Password string
    The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    Server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
    Username string
    The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    DatabaseName string
    The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
    Password string
    The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    Server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
    Username string
    The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName String
    The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
    password String
    The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server String
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
    username String
    The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName string
    The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
    password string
    The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server string
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
    username string
    The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    database_name str
    The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
    password str
    The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server str
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
    username str
    The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
    databaseName String
    The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
    password String
    The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
    server String
    The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
    username String
    The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.

    SparkClusterMonitor, SparkClusterMonitorArgs

    LogAnalyticsWorkspaceId string
    The Operations Management Suite (OMS) workspace ID.
    PrimaryKey string
    The Operations Management Suite (OMS) workspace key.
    LogAnalyticsWorkspaceId string
    The Operations Management Suite (OMS) workspace ID.
    PrimaryKey string
    The Operations Management Suite (OMS) workspace key.
    logAnalyticsWorkspaceId String
    The Operations Management Suite (OMS) workspace ID.
    primaryKey String
    The Operations Management Suite (OMS) workspace key.
    logAnalyticsWorkspaceId string
    The Operations Management Suite (OMS) workspace ID.
    primaryKey string
    The Operations Management Suite (OMS) workspace key.
    log_analytics_workspace_id str
    The Operations Management Suite (OMS) workspace ID.
    primary_key str
    The Operations Management Suite (OMS) workspace key.
    logAnalyticsWorkspaceId String
    The Operations Management Suite (OMS) workspace ID.
    primaryKey String
    The Operations Management Suite (OMS) workspace key.

    SparkClusterNetwork, SparkClusterNetworkArgs

    ConnectionDirection string

    The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created.

    NOTE: To enabled the private link the connection_direction must be set to Outbound.

    PrivateLinkEnabled bool
    Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created.
    ConnectionDirection string

    The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created.

    NOTE: To enabled the private link the connection_direction must be set to Outbound.

    PrivateLinkEnabled bool
    Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created.
    connectionDirection String

    The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created.

    NOTE: To enabled the private link the connection_direction must be set to Outbound.

    privateLinkEnabled Boolean
    Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created.
    connectionDirection string

    The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created.

    NOTE: To enabled the private link the connection_direction must be set to Outbound.

    privateLinkEnabled boolean
    Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created.
    connection_direction str

    The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created.

    NOTE: To enabled the private link the connection_direction must be set to Outbound.

    private_link_enabled bool
    Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created.
    connectionDirection String

    The direction of the resource provider connection. Possible values include Inbound or Outbound. Defaults to Inbound. Changing this forces a new resource to be created.

    NOTE: To enabled the private link the connection_direction must be set to Outbound.

    privateLinkEnabled Boolean
    Is the private link enabled? Possible values include true or false. Defaults to false. Changing this forces a new resource to be created.

    SparkClusterPrivateLinkConfiguration, SparkClusterPrivateLinkConfigurationArgs

    GroupId string
    The ID of the private link service group.
    IpConfiguration SparkClusterPrivateLinkConfigurationIpConfiguration
    Name string
    The name of the private link configuration.
    GroupId string
    The ID of the private link service group.
    IpConfiguration SparkClusterPrivateLinkConfigurationIpConfiguration
    Name string
    The name of the private link configuration.
    groupId String
    The ID of the private link service group.
    ipConfiguration SparkClusterPrivateLinkConfigurationIpConfiguration
    name String
    The name of the private link configuration.
    groupId string
    The ID of the private link service group.
    ipConfiguration SparkClusterPrivateLinkConfigurationIpConfiguration
    name string
    The name of the private link configuration.
    group_id str
    The ID of the private link service group.
    ip_configuration SparkClusterPrivateLinkConfigurationIpConfiguration
    name str
    The name of the private link configuration.
    groupId String
    The ID of the private link service group.
    ipConfiguration Property Map
    name String
    The name of the private link configuration.

    SparkClusterPrivateLinkConfigurationIpConfiguration, SparkClusterPrivateLinkConfigurationIpConfigurationArgs

    Name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Primary bool
    PrivateIpAddress string
    PrivateIpAllocationMethod string
    SubnetId string
    Name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    Primary bool
    PrivateIpAddress string
    PrivateIpAllocationMethod string
    SubnetId string
    name String
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    primary Boolean
    privateIpAddress String
    privateIpAllocationMethod String
    subnetId String
    name string
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    primary boolean
    privateIpAddress string
    privateIpAllocationMethod string
    subnetId string
    name str
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    primary bool
    private_ip_address str
    private_ip_allocation_method str
    subnet_id str
    name String
    Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
    primary Boolean
    privateIpAddress String
    privateIpAllocationMethod String
    subnetId String

    SparkClusterRoles, SparkClusterRolesArgs

    HeadNode SparkClusterRolesHeadNode
    A head_node block as defined above.
    WorkerNode SparkClusterRolesWorkerNode
    A worker_node block as defined below.
    ZookeeperNode SparkClusterRolesZookeeperNode
    A zookeeper_node block as defined below.
    HeadNode SparkClusterRolesHeadNode
    A head_node block as defined above.
    WorkerNode SparkClusterRolesWorkerNode
    A worker_node block as defined below.
    ZookeeperNode SparkClusterRolesZookeeperNode
    A zookeeper_node block as defined below.
    headNode SparkClusterRolesHeadNode
    A head_node block as defined above.
    workerNode SparkClusterRolesWorkerNode
    A worker_node block as defined below.
    zookeeperNode SparkClusterRolesZookeeperNode
    A zookeeper_node block as defined below.
    headNode SparkClusterRolesHeadNode
    A head_node block as defined above.
    workerNode SparkClusterRolesWorkerNode
    A worker_node block as defined below.
    zookeeperNode SparkClusterRolesZookeeperNode
    A zookeeper_node block as defined below.
    head_node SparkClusterRolesHeadNode
    A head_node block as defined above.
    worker_node SparkClusterRolesWorkerNode
    A worker_node block as defined below.
    zookeeper_node SparkClusterRolesZookeeperNode
    A zookeeper_node block as defined below.
    headNode Property Map
    A head_node block as defined above.
    workerNode Property Map
    A worker_node block as defined below.
    zookeeperNode Property Map
    A zookeeper_node block as defined below.

    SparkClusterRolesHeadNode, SparkClusterRolesHeadNodeArgs

    Username string
    The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
    VmSize string
    The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    Password string

    The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    ScriptActions List<SparkClusterRolesHeadNodeScriptAction>
    The script action which will run on the cluster. One or more script_actions blocks as defined below.
    SshKeys List<string>

    A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    SubnetId string
    The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    VirtualNetworkId string
    The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    Username string
    The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
    VmSize string
    The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    Password string

    The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    ScriptActions []SparkClusterRolesHeadNodeScriptAction
    The script action which will run on the cluster. One or more script_actions blocks as defined below.
    SshKeys []string

    A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    SubnetId string
    The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    VirtualNetworkId string
    The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    username String
    The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
    vmSize String
    The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password String

    The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions List<SparkClusterRolesHeadNodeScriptAction>
    The script action which will run on the cluster. One or more script_actions blocks as defined below.
    sshKeys List<String>

    A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId String
    The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId String
    The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    username string
    The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
    vmSize string
    The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password string

    The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions SparkClusterRolesHeadNodeScriptAction[]
    The script action which will run on the cluster. One or more script_actions blocks as defined below.
    sshKeys string[]

    A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId string
    The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId string
    The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    username str
    The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
    vm_size str
    The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password str

    The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    script_actions Sequence[SparkClusterRolesHeadNodeScriptAction]
    The script action which will run on the cluster. One or more script_actions blocks as defined below.
    ssh_keys Sequence[str]

    A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnet_id str
    The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtual_network_id str
    The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    username String
    The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
    vmSize String
    The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password String

    The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions List<Property Map>
    The script action which will run on the cluster. One or more script_actions blocks as defined below.
    sshKeys List<String>

    A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId String
    The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId String
    The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.

    SparkClusterRolesHeadNodeScriptAction, SparkClusterRolesHeadNodeScriptActionArgs

    Name string
    The name of the script action.
    Uri string
    The URI to the script.
    Parameters string
    The parameters for the script provided.
    Name string
    The name of the script action.
    Uri string
    The URI to the script.
    Parameters string
    The parameters for the script provided.
    name String
    The name of the script action.
    uri String
    The URI to the script.
    parameters String
    The parameters for the script provided.
    name string
    The name of the script action.
    uri string
    The URI to the script.
    parameters string
    The parameters for the script provided.
    name str
    The name of the script action.
    uri str
    The URI to the script.
    parameters str
    The parameters for the script provided.
    name String
    The name of the script action.
    uri String
    The URI to the script.
    parameters String
    The parameters for the script provided.

    SparkClusterRolesWorkerNode, SparkClusterRolesWorkerNodeArgs

    TargetInstanceCount int
    The number of instances which should be run for the Worker Nodes.
    Username string
    The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
    VmSize string
    The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    Autoscale SparkClusterRolesWorkerNodeAutoscale
    A autoscale block as defined below.
    Password string

    The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    ScriptActions List<SparkClusterRolesWorkerNodeScriptAction>
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    SshKeys List<string>

    A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    SubnetId string
    The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    VirtualNetworkId string
    The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    TargetInstanceCount int
    The number of instances which should be run for the Worker Nodes.
    Username string
    The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
    VmSize string
    The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    Autoscale SparkClusterRolesWorkerNodeAutoscale
    A autoscale block as defined below.
    Password string

    The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    ScriptActions []SparkClusterRolesWorkerNodeScriptAction
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    SshKeys []string

    A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    SubnetId string
    The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    VirtualNetworkId string
    The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    targetInstanceCount Integer
    The number of instances which should be run for the Worker Nodes.
    username String
    The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
    vmSize String
    The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    autoscale SparkClusterRolesWorkerNodeAutoscale
    A autoscale block as defined below.
    password String

    The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions List<SparkClusterRolesWorkerNodeScriptAction>
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    sshKeys List<String>

    A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId String
    The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId String
    The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    targetInstanceCount number
    The number of instances which should be run for the Worker Nodes.
    username string
    The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
    vmSize string
    The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    autoscale SparkClusterRolesWorkerNodeAutoscale
    A autoscale block as defined below.
    password string

    The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions SparkClusterRolesWorkerNodeScriptAction[]
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    sshKeys string[]

    A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId string
    The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId string
    The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    target_instance_count int
    The number of instances which should be run for the Worker Nodes.
    username str
    The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
    vm_size str
    The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    autoscale SparkClusterRolesWorkerNodeAutoscale
    A autoscale block as defined below.
    password str

    The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    script_actions Sequence[SparkClusterRolesWorkerNodeScriptAction]
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    ssh_keys Sequence[str]

    A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnet_id str
    The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtual_network_id str
    The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    targetInstanceCount Number
    The number of instances which should be run for the Worker Nodes.
    username String
    The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
    vmSize String
    The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    autoscale Property Map
    A autoscale block as defined below.
    password String

    The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions List<Property Map>
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    sshKeys List<String>

    A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId String
    The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId String
    The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.

    SparkClusterRolesWorkerNodeAutoscale, SparkClusterRolesWorkerNodeAutoscaleArgs

    Capacity SparkClusterRolesWorkerNodeAutoscaleCapacity
    A capacity block as defined below.
    Recurrence SparkClusterRolesWorkerNodeAutoscaleRecurrence

    A recurrence block as defined below.

    NOTE: Either a capacity or recurrence block must be specified - but not both.

    Capacity SparkClusterRolesWorkerNodeAutoscaleCapacity
    A capacity block as defined below.
    Recurrence SparkClusterRolesWorkerNodeAutoscaleRecurrence

    A recurrence block as defined below.

    NOTE: Either a capacity or recurrence block must be specified - but not both.

    capacity SparkClusterRolesWorkerNodeAutoscaleCapacity
    A capacity block as defined below.
    recurrence SparkClusterRolesWorkerNodeAutoscaleRecurrence

    A recurrence block as defined below.

    NOTE: Either a capacity or recurrence block must be specified - but not both.

    capacity SparkClusterRolesWorkerNodeAutoscaleCapacity
    A capacity block as defined below.
    recurrence SparkClusterRolesWorkerNodeAutoscaleRecurrence

    A recurrence block as defined below.

    NOTE: Either a capacity or recurrence block must be specified - but not both.

    capacity SparkClusterRolesWorkerNodeAutoscaleCapacity
    A capacity block as defined below.
    recurrence SparkClusterRolesWorkerNodeAutoscaleRecurrence

    A recurrence block as defined below.

    NOTE: Either a capacity or recurrence block must be specified - but not both.

    capacity Property Map
    A capacity block as defined below.
    recurrence Property Map

    A recurrence block as defined below.

    NOTE: Either a capacity or recurrence block must be specified - but not both.

    SparkClusterRolesWorkerNodeAutoscaleCapacity, SparkClusterRolesWorkerNodeAutoscaleCapacityArgs

    MaxInstanceCount int
    The maximum number of worker nodes to autoscale to based on the cluster's activity.
    MinInstanceCount int
    The minimum number of worker nodes to autoscale to based on the cluster's activity.
    MaxInstanceCount int
    The maximum number of worker nodes to autoscale to based on the cluster's activity.
    MinInstanceCount int
    The minimum number of worker nodes to autoscale to based on the cluster's activity.
    maxInstanceCount Integer
    The maximum number of worker nodes to autoscale to based on the cluster's activity.
    minInstanceCount Integer
    The minimum number of worker nodes to autoscale to based on the cluster's activity.
    maxInstanceCount number
    The maximum number of worker nodes to autoscale to based on the cluster's activity.
    minInstanceCount number
    The minimum number of worker nodes to autoscale to based on the cluster's activity.
    max_instance_count int
    The maximum number of worker nodes to autoscale to based on the cluster's activity.
    min_instance_count int
    The minimum number of worker nodes to autoscale to based on the cluster's activity.
    maxInstanceCount Number
    The maximum number of worker nodes to autoscale to based on the cluster's activity.
    minInstanceCount Number
    The minimum number of worker nodes to autoscale to based on the cluster's activity.

    SparkClusterRolesWorkerNodeAutoscaleRecurrence, SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs

    Schedules List<SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule>
    A list of schedule blocks as defined below.
    Timezone string
    The time zone for the autoscale schedule times.
    Schedules []SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule
    A list of schedule blocks as defined below.
    Timezone string
    The time zone for the autoscale schedule times.
    schedules List<SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule>
    A list of schedule blocks as defined below.
    timezone String
    The time zone for the autoscale schedule times.
    schedules SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule[]
    A list of schedule blocks as defined below.
    timezone string
    The time zone for the autoscale schedule times.
    schedules Sequence[SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule]
    A list of schedule blocks as defined below.
    timezone str
    The time zone for the autoscale schedule times.
    schedules List<Property Map>
    A list of schedule blocks as defined below.
    timezone String
    The time zone for the autoscale schedule times.

    SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule, SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs

    Days List<string>
    The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
    TargetInstanceCount int
    The number of worker nodes to autoscale at the specified time.
    Time string
    The time of day to perform the autoscale in 24hour format.
    Days []string
    The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
    TargetInstanceCount int
    The number of worker nodes to autoscale at the specified time.
    Time string
    The time of day to perform the autoscale in 24hour format.
    days List<String>
    The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
    targetInstanceCount Integer
    The number of worker nodes to autoscale at the specified time.
    time String
    The time of day to perform the autoscale in 24hour format.
    days string[]
    The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
    targetInstanceCount number
    The number of worker nodes to autoscale at the specified time.
    time string
    The time of day to perform the autoscale in 24hour format.
    days Sequence[str]
    The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
    target_instance_count int
    The number of worker nodes to autoscale at the specified time.
    time str
    The time of day to perform the autoscale in 24hour format.
    days List<String>
    The days of the week to perform autoscale. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday and Sunday.
    targetInstanceCount Number
    The number of worker nodes to autoscale at the specified time.
    time String
    The time of day to perform the autoscale in 24hour format.

    SparkClusterRolesWorkerNodeScriptAction, SparkClusterRolesWorkerNodeScriptActionArgs

    Name string
    The name of the script action.
    Uri string
    The URI to the script.
    Parameters string
    The parameters for the script provided.
    Name string
    The name of the script action.
    Uri string
    The URI to the script.
    Parameters string
    The parameters for the script provided.
    name String
    The name of the script action.
    uri String
    The URI to the script.
    parameters String
    The parameters for the script provided.
    name string
    The name of the script action.
    uri string
    The URI to the script.
    parameters string
    The parameters for the script provided.
    name str
    The name of the script action.
    uri str
    The URI to the script.
    parameters str
    The parameters for the script provided.
    name String
    The name of the script action.
    uri String
    The URI to the script.
    parameters String
    The parameters for the script provided.

    SparkClusterRolesZookeeperNode, SparkClusterRolesZookeeperNodeArgs

    Username string
    The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
    VmSize string
    The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    Password string

    The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    ScriptActions List<SparkClusterRolesZookeeperNodeScriptAction>
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    SshKeys List<string>

    A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    SubnetId string
    The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    VirtualNetworkId string
    The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    Username string
    The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
    VmSize string
    The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    Password string

    The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    ScriptActions []SparkClusterRolesZookeeperNodeScriptAction
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    SshKeys []string

    A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    SubnetId string
    The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    VirtualNetworkId string
    The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    username String
    The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
    vmSize String
    The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password String

    The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions List<SparkClusterRolesZookeeperNodeScriptAction>
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    sshKeys List<String>

    A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId String
    The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId String
    The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    username string
    The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
    vmSize string
    The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password string

    The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions SparkClusterRolesZookeeperNodeScriptAction[]
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    sshKeys string[]

    A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId string
    The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId string
    The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    username str
    The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
    vm_size str
    The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password str

    The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    script_actions Sequence[SparkClusterRolesZookeeperNodeScriptAction]
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    ssh_keys Sequence[str]

    A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnet_id str
    The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtual_network_id str
    The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    username String
    The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
    vmSize String
    The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall, Small, Medium, Large, ExtraLarge, A5, A6, A7, A8, A9, A10, A11, Standard_A1_V2, Standard_A2_V2, Standard_A2m_V2, Standard_A3, Standard_A4_V2, Standard_A4m_V2, Standard_A8_V2, Standard_A8m_V2, Standard_D1, Standard_D2, Standard_D3, Standard_D4, Standard_D11, Standard_D12, Standard_D13, Standard_D14, Standard_D1_V2, Standard_D2_V2, Standard_D3_V2, Standard_D4_V2, Standard_D5_V2, Standard_D11_V2, Standard_D12_V2, Standard_D13_V2, Standard_D14_V2, Standard_DS1_V2, Standard_DS2_V2, Standard_DS3_V2, Standard_DS4_V2, Standard_DS5_V2, Standard_DS11_V2, Standard_DS12_V2, Standard_DS13_V2, Standard_DS14_V2, Standard_E2_V3, Standard_E4_V3, Standard_E8_V3, Standard_E16_V3, Standard_E20_V3, Standard_E32_V3, Standard_E64_V3, Standard_E64i_V3, Standard_E2s_V3, Standard_E4s_V3, Standard_E8s_V3, Standard_E16s_V3, Standard_E20s_V3, Standard_E32s_V3, Standard_E64s_V3, Standard_E64is_V3, Standard_D2a_V4, Standard_D4a_V4, Standard_D8a_V4, Standard_D16a_V4, Standard_D32a_V4, Standard_D48a_V4, Standard_D64a_V4, Standard_D96a_V4, Standard_E2a_V4, Standard_E4a_V4, Standard_E8a_V4, Standard_E16a_V4, Standard_E20a_V4, Standard_E32a_V4, Standard_E48a_V4, Standard_E64a_V4, Standard_E96a_V4, Standard_G1, Standard_G2, Standard_G3, Standard_G4, Standard_G5, Standard_F2s_V2, Standard_F4s_V2, Standard_F8s_V2, Standard_F16s_V2, Standard_F32s_V2, Standard_F64s_V2, Standard_F72s_V2, Standard_GS1, Standard_GS2, Standard_GS3, Standard_GS4, Standard_GS5 and Standard_NC24. Changing this forces a new resource to be created.
    password String

    The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ).

    scriptActions List<Property Map>
    The script action which will run on the cluster. One or more script_actions blocks as defined above.
    sshKeys List<String>

    A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created.

    NOTE: Either a password or one or more ssh_keys must be specified - but not both.

    subnetId String
    The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
    virtualNetworkId String
    The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.

    SparkClusterRolesZookeeperNodeScriptAction, SparkClusterRolesZookeeperNodeScriptActionArgs

    Name string
    The name of the script action.
    Uri string
    The URI to the script.
    Parameters string
    The parameters for the script provided.
    Name string
    The name of the script action.
    Uri string
    The URI to the script.
    Parameters string
    The parameters for the script provided.
    name String
    The name of the script action.
    uri String
    The URI to the script.
    parameters String
    The parameters for the script provided.
    name string
    The name of the script action.
    uri string
    The URI to the script.
    parameters string
    The parameters for the script provided.
    name str
    The name of the script action.
    uri str
    The URI to the script.
    parameters str
    The parameters for the script provided.
    name String
    The name of the script action.
    uri String
    The URI to the script.
    parameters String
    The parameters for the script provided.

    SparkClusterSecurityProfile, SparkClusterSecurityProfileArgs

    AaddsResourceId string
    The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
    DomainName string
    The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    DomainUserPassword string
    The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    DomainUsername string
    The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    LdapsUrls List<string>
    A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
    MsiResourceId string
    The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
    ClusterUsersGroupDns List<string>
    A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
    AaddsResourceId string
    The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
    DomainName string
    The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    DomainUserPassword string
    The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    DomainUsername string
    The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    LdapsUrls []string
    A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
    MsiResourceId string
    The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
    ClusterUsersGroupDns []string
    A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
    aaddsResourceId String
    The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
    domainName String
    The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domainUserPassword String
    The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domainUsername String
    The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    ldapsUrls List<String>
    A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
    msiResourceId String
    The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
    clusterUsersGroupDns List<String>
    A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
    aaddsResourceId string
    The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
    domainName string
    The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domainUserPassword string
    The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domainUsername string
    The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    ldapsUrls string[]
    A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
    msiResourceId string
    The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
    clusterUsersGroupDns string[]
    A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
    aadds_resource_id str
    The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
    domain_name str
    The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domain_user_password str
    The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domain_username str
    The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    ldaps_urls Sequence[str]
    A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
    msi_resource_id str
    The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
    cluster_users_group_dns Sequence[str]
    A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
    aaddsResourceId String
    The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
    domainName String
    The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domainUserPassword String
    The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    domainUsername String
    The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
    ldapsUrls List<String>
    A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
    msiResourceId String
    The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
    clusterUsersGroupDns List<String>
    A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.

    SparkClusterStorageAccount, SparkClusterStorageAccountArgs

    IsDefault bool

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    StorageAccountKey string
    The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
    StorageContainerId string

    The ID of the Storage Container. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    StorageResourceId string
    The ID of the Storage Account. Changing this forces a new resource to be created.
    IsDefault bool

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    StorageAccountKey string
    The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
    StorageContainerId string

    The ID of the Storage Container. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    StorageResourceId string
    The ID of the Storage Account. Changing this forces a new resource to be created.
    isDefault Boolean

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    storageAccountKey String
    The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
    storageContainerId String

    The ID of the Storage Container. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storageResourceId String
    The ID of the Storage Account. Changing this forces a new resource to be created.
    isDefault boolean

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    storageAccountKey string
    The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
    storageContainerId string

    The ID of the Storage Container. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storageResourceId string
    The ID of the Storage Account. Changing this forces a new resource to be created.
    is_default bool

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    storage_account_key str
    The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
    storage_container_id str

    The ID of the Storage Container. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storage_resource_id str
    The ID of the Storage Account. Changing this forces a new resource to be created.
    isDefault Boolean

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    storageAccountKey String
    The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
    storageContainerId String

    The ID of the Storage Container. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storageResourceId String
    The ID of the Storage Account. Changing this forces a new resource to be created.

    SparkClusterStorageAccountGen2, SparkClusterStorageAccountGen2Args

    FilesystemId string
    The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
    IsDefault bool

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    ManagedIdentityResourceId string

    The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    StorageResourceId string
    The ID of the Storage Account. Changing this forces a new resource to be created.
    FilesystemId string
    The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
    IsDefault bool

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    ManagedIdentityResourceId string

    The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    StorageResourceId string
    The ID of the Storage Account. Changing this forces a new resource to be created.
    filesystemId String
    The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
    isDefault Boolean

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    managedIdentityResourceId String

    The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storageResourceId String
    The ID of the Storage Account. Changing this forces a new resource to be created.
    filesystemId string
    The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
    isDefault boolean

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    managedIdentityResourceId string

    The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storageResourceId string
    The ID of the Storage Account. Changing this forces a new resource to be created.
    filesystem_id str
    The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
    is_default bool

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    managed_identity_resource_id str

    The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storage_resource_id str
    The ID of the Storage Account. Changing this forces a new resource to be created.
    filesystemId String
    The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
    isDefault Boolean

    Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created.

    NOTE: One of the storage_account or storage_account_gen2 blocks must be marked as the default.

    managedIdentityResourceId String

    The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created.

    NOTE: This can be obtained from the id of the azure.storage.Container resource.

    storageResourceId String
    The ID of the Storage Account. Changing this forces a new resource to be created.

    Import

    HDInsight Spark Clusters can be imported using the resource id, e.g.

    $ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Azure Classic pulumi/pulumi-azure
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the azurerm Terraform Provider.
    azure logo

    We recommend using Azure Native.

    Azure Classic v5.81.0 published on Monday, Jun 24, 2024 by Pulumi