We recommend using Azure Native.
azure.hdinsight.SparkCluster
Explore with Pulumi AI
Manages a HDInsight Spark Cluster.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
    name: "example-resources",
    location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
    name: "hdinsightstor",
    resourceGroupName: example.name,
    location: example.location,
    accountTier: "Standard",
    accountReplicationType: "LRS",
});
const exampleContainer = new azure.storage.Container("example", {
    name: "hdinsight",
    storageAccountName: exampleAccount.name,
    containerAccessType: "private",
});
const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
    name: "example-hdicluster",
    resourceGroupName: example.name,
    location: example.location,
    clusterVersion: "3.6",
    tier: "Standard",
    componentVersion: {
        spark: "2.3",
    },
    gateway: {
        username: "acctestusrgw",
        password: "Password123!",
    },
    storageAccounts: [{
        storageContainerId: exampleContainer.id,
        storageAccountKey: exampleAccount.primaryAccessKey,
        isDefault: true,
    }],
    roles: {
        headNode: {
            vmSize: "Standard_A3",
            username: "acctestusrvm",
            password: "AccTestvdSC4daf986!",
        },
        workerNode: {
            vmSize: "Standard_A3",
            username: "acctestusrvm",
            password: "AccTestvdSC4daf986!",
            targetInstanceCount: 3,
        },
        zookeeperNode: {
            vmSize: "Medium",
            username: "acctestusrvm",
            password: "AccTestvdSC4daf986!",
        },
    },
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
    name="example-resources",
    location="West Europe")
example_account = azure.storage.Account("example",
    name="hdinsightstor",
    resource_group_name=example.name,
    location=example.location,
    account_tier="Standard",
    account_replication_type="LRS")
example_container = azure.storage.Container("example",
    name="hdinsight",
    storage_account_name=example_account.name,
    container_access_type="private")
example_spark_cluster = azure.hdinsight.SparkCluster("example",
    name="example-hdicluster",
    resource_group_name=example.name,
    location=example.location,
    cluster_version="3.6",
    tier="Standard",
    component_version={
        "spark": "2.3",
    },
    gateway={
        "username": "acctestusrgw",
        "password": "Password123!",
    },
    storage_accounts=[{
        "storage_container_id": example_container.id,
        "storage_account_key": example_account.primary_access_key,
        "is_default": True,
    }],
    roles={
        "head_node": {
            "vm_size": "Standard_A3",
            "username": "acctestusrvm",
            "password": "AccTestvdSC4daf986!",
        },
        "worker_node": {
            "vm_size": "Standard_A3",
            "username": "acctestusrvm",
            "password": "AccTestvdSC4daf986!",
            "target_instance_count": 3,
        },
        "zookeeper_node": {
            "vm_size": "Medium",
            "username": "acctestusrvm",
            "password": "AccTestvdSC4daf986!",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/core"
	"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/hdinsight"
	"github.com/pulumi/pulumi-azure/sdk/v6/go/azure/storage"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
			Name:     pulumi.String("example-resources"),
			Location: pulumi.String("West Europe"),
		})
		if err != nil {
			return err
		}
		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
			Name:                   pulumi.String("hdinsightstor"),
			ResourceGroupName:      example.Name,
			Location:               example.Location,
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("LRS"),
		})
		if err != nil {
			return err
		}
		exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
			Name:                pulumi.String("hdinsight"),
			StorageAccountName:  exampleAccount.Name,
			ContainerAccessType: pulumi.String("private"),
		})
		if err != nil {
			return err
		}
		_, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
			Name:              pulumi.String("example-hdicluster"),
			ResourceGroupName: example.Name,
			Location:          example.Location,
			ClusterVersion:    pulumi.String("3.6"),
			Tier:              pulumi.String("Standard"),
			ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
				Spark: pulumi.String("2.3"),
			},
			Gateway: &hdinsight.SparkClusterGatewayArgs{
				Username: pulumi.String("acctestusrgw"),
				Password: pulumi.String("Password123!"),
			},
			StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
				&hdinsight.SparkClusterStorageAccountArgs{
					StorageContainerId: exampleContainer.ID(),
					StorageAccountKey:  exampleAccount.PrimaryAccessKey,
					IsDefault:          pulumi.Bool(true),
				},
			},
			Roles: &hdinsight.SparkClusterRolesArgs{
				HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
					VmSize:   pulumi.String("Standard_A3"),
					Username: pulumi.String("acctestusrvm"),
					Password: pulumi.String("AccTestvdSC4daf986!"),
				},
				WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
					VmSize:              pulumi.String("Standard_A3"),
					Username:            pulumi.String("acctestusrvm"),
					Password:            pulumi.String("AccTestvdSC4daf986!"),
					TargetInstanceCount: pulumi.Int(3),
				},
				ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
					VmSize:   pulumi.String("Medium"),
					Username: pulumi.String("acctestusrvm"),
					Password: pulumi.String("AccTestvdSC4daf986!"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() => 
{
    var example = new Azure.Core.ResourceGroup("example", new()
    {
        Name = "example-resources",
        Location = "West Europe",
    });
    var exampleAccount = new Azure.Storage.Account("example", new()
    {
        Name = "hdinsightstor",
        ResourceGroupName = example.Name,
        Location = example.Location,
        AccountTier = "Standard",
        AccountReplicationType = "LRS",
    });
    var exampleContainer = new Azure.Storage.Container("example", new()
    {
        Name = "hdinsight",
        StorageAccountName = exampleAccount.Name,
        ContainerAccessType = "private",
    });
    var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
    {
        Name = "example-hdicluster",
        ResourceGroupName = example.Name,
        Location = example.Location,
        ClusterVersion = "3.6",
        Tier = "Standard",
        ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
        {
            Spark = "2.3",
        },
        Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
        {
            Username = "acctestusrgw",
            Password = "Password123!",
        },
        StorageAccounts = new[]
        {
            new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
            {
                StorageContainerId = exampleContainer.Id,
                StorageAccountKey = exampleAccount.PrimaryAccessKey,
                IsDefault = true,
            },
        },
        Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
        {
            HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
            {
                VmSize = "Standard_A3",
                Username = "acctestusrvm",
                Password = "AccTestvdSC4daf986!",
            },
            WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
            {
                VmSize = "Standard_A3",
                Username = "acctestusrvm",
                Password = "AccTestvdSC4daf986!",
                TargetInstanceCount = 3,
            },
            ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
            {
                VmSize = "Medium",
                Username = "acctestusrvm",
                Password = "AccTestvdSC4daf986!",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.Container;
import com.pulumi.azure.storage.ContainerArgs;
import com.pulumi.azure.hdinsight.SparkCluster;
import com.pulumi.azure.hdinsight.SparkClusterArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new ResourceGroup("example", ResourceGroupArgs.builder()
            .name("example-resources")
            .location("West Europe")
            .build());
        var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
            .name("hdinsightstor")
            .resourceGroupName(example.name())
            .location(example.location())
            .accountTier("Standard")
            .accountReplicationType("LRS")
            .build());
        var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
            .name("hdinsight")
            .storageAccountName(exampleAccount.name())
            .containerAccessType("private")
            .build());
        var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
            .name("example-hdicluster")
            .resourceGroupName(example.name())
            .location(example.location())
            .clusterVersion("3.6")
            .tier("Standard")
            .componentVersion(SparkClusterComponentVersionArgs.builder()
                .spark("2.3")
                .build())
            .gateway(SparkClusterGatewayArgs.builder()
                .username("acctestusrgw")
                .password("Password123!")
                .build())
            .storageAccounts(SparkClusterStorageAccountArgs.builder()
                .storageContainerId(exampleContainer.id())
                .storageAccountKey(exampleAccount.primaryAccessKey())
                .isDefault(true)
                .build())
            .roles(SparkClusterRolesArgs.builder()
                .headNode(SparkClusterRolesHeadNodeArgs.builder()
                    .vmSize("Standard_A3")
                    .username("acctestusrvm")
                    .password("AccTestvdSC4daf986!")
                    .build())
                .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
                    .vmSize("Standard_A3")
                    .username("acctestusrvm")
                    .password("AccTestvdSC4daf986!")
                    .targetInstanceCount(3)
                    .build())
                .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
                    .vmSize("Medium")
                    .username("acctestusrvm")
                    .password("AccTestvdSC4daf986!")
                    .build())
                .build())
            .build());
    }
}
resources:
  example:
    type: azure:core:ResourceGroup
    properties:
      name: example-resources
      location: West Europe
  exampleAccount:
    type: azure:storage:Account
    name: example
    properties:
      name: hdinsightstor
      resourceGroupName: ${example.name}
      location: ${example.location}
      accountTier: Standard
      accountReplicationType: LRS
  exampleContainer:
    type: azure:storage:Container
    name: example
    properties:
      name: hdinsight
      storageAccountName: ${exampleAccount.name}
      containerAccessType: private
  exampleSparkCluster:
    type: azure:hdinsight:SparkCluster
    name: example
    properties:
      name: example-hdicluster
      resourceGroupName: ${example.name}
      location: ${example.location}
      clusterVersion: '3.6'
      tier: Standard
      componentVersion:
        spark: '2.3'
      gateway:
        username: acctestusrgw
        password: Password123!
      storageAccounts:
        - storageContainerId: ${exampleContainer.id}
          storageAccountKey: ${exampleAccount.primaryAccessKey}
          isDefault: true
      roles:
        headNode:
          vmSize: Standard_A3
          username: acctestusrvm
          password: AccTestvdSC4daf986!
        workerNode:
          vmSize: Standard_A3
          username: acctestusrvm
          password: AccTestvdSC4daf986!
          targetInstanceCount: 3
        zookeeperNode:
          vmSize: Medium
          username: acctestusrvm
          password: AccTestvdSC4daf986!
Create SparkCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new SparkCluster(name: string, args: SparkClusterArgs, opts?: CustomResourceOptions);@overload
def SparkCluster(resource_name: str,
                 args: SparkClusterArgs,
                 opts: Optional[ResourceOptions] = None)
@overload
def SparkCluster(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 gateway: Optional[SparkClusterGatewayArgs] = None,
                 component_version: Optional[SparkClusterComponentVersionArgs] = None,
                 tier: Optional[str] = None,
                 roles: Optional[SparkClusterRolesArgs] = None,
                 resource_group_name: Optional[str] = None,
                 cluster_version: Optional[str] = None,
                 name: Optional[str] = None,
                 encryption_in_transit_enabled: Optional[bool] = None,
                 metastores: Optional[SparkClusterMetastoresArgs] = None,
                 monitor: Optional[SparkClusterMonitorArgs] = None,
                 extension: Optional[SparkClusterExtensionArgs] = None,
                 network: Optional[SparkClusterNetworkArgs] = None,
                 private_link_configuration: Optional[SparkClusterPrivateLinkConfigurationArgs] = None,
                 location: Optional[str] = None,
                 disk_encryptions: Optional[Sequence[SparkClusterDiskEncryptionArgs]] = None,
                 security_profile: Optional[SparkClusterSecurityProfileArgs] = None,
                 storage_account_gen2: Optional[SparkClusterStorageAccountGen2Args] = None,
                 storage_accounts: Optional[Sequence[SparkClusterStorageAccountArgs]] = None,
                 tags: Optional[Mapping[str, str]] = None,
                 compute_isolation: Optional[SparkClusterComputeIsolationArgs] = None,
                 tls_min_version: Optional[str] = None)func NewSparkCluster(ctx *Context, name string, args SparkClusterArgs, opts ...ResourceOption) (*SparkCluster, error)public SparkCluster(string name, SparkClusterArgs args, CustomResourceOptions? opts = null)
public SparkCluster(String name, SparkClusterArgs args)
public SparkCluster(String name, SparkClusterArgs args, CustomResourceOptions options)
type: azure:hdinsight:SparkCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args SparkClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args SparkClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args SparkClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args SparkClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args SparkClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var sparkClusterResource = new Azure.HDInsight.SparkCluster("sparkClusterResource", new()
{
    Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
    {
        Password = "string",
        Username = "string",
    },
    ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
    {
        Spark = "string",
    },
    Tier = "string",
    Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
    {
        HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
        {
            Username = "string",
            VmSize = "string",
            Password = "string",
            ScriptActions = new[]
            {
                new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeScriptActionArgs
                {
                    Name = "string",
                    Uri = "string",
                    Parameters = "string",
                },
            },
            SshKeys = new[]
            {
                "string",
            },
            SubnetId = "string",
            VirtualNetworkId = "string",
        },
        WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
        {
            TargetInstanceCount = 0,
            Username = "string",
            VmSize = "string",
            Autoscale = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleArgs
            {
                Capacity = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleCapacityArgs
                {
                    MaxInstanceCount = 0,
                    MinInstanceCount = 0,
                },
                Recurrence = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs
                {
                    Schedules = new[]
                    {
                        new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs
                        {
                            Days = new[]
                            {
                                "string",
                            },
                            TargetInstanceCount = 0,
                            Time = "string",
                        },
                    },
                    Timezone = "string",
                },
            },
            Password = "string",
            ScriptActions = new[]
            {
                new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeScriptActionArgs
                {
                    Name = "string",
                    Uri = "string",
                    Parameters = "string",
                },
            },
            SshKeys = new[]
            {
                "string",
            },
            SubnetId = "string",
            VirtualNetworkId = "string",
        },
        ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
        {
            Username = "string",
            VmSize = "string",
            Password = "string",
            ScriptActions = new[]
            {
                new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeScriptActionArgs
                {
                    Name = "string",
                    Uri = "string",
                    Parameters = "string",
                },
            },
            SshKeys = new[]
            {
                "string",
            },
            SubnetId = "string",
            VirtualNetworkId = "string",
        },
    },
    ResourceGroupName = "string",
    ClusterVersion = "string",
    Name = "string",
    EncryptionInTransitEnabled = false,
    Metastores = new Azure.HDInsight.Inputs.SparkClusterMetastoresArgs
    {
        Ambari = new Azure.HDInsight.Inputs.SparkClusterMetastoresAmbariArgs
        {
            DatabaseName = "string",
            Password = "string",
            Server = "string",
            Username = "string",
        },
        Hive = new Azure.HDInsight.Inputs.SparkClusterMetastoresHiveArgs
        {
            DatabaseName = "string",
            Password = "string",
            Server = "string",
            Username = "string",
        },
        Oozie = new Azure.HDInsight.Inputs.SparkClusterMetastoresOozieArgs
        {
            DatabaseName = "string",
            Password = "string",
            Server = "string",
            Username = "string",
        },
    },
    Monitor = new Azure.HDInsight.Inputs.SparkClusterMonitorArgs
    {
        LogAnalyticsWorkspaceId = "string",
        PrimaryKey = "string",
    },
    Extension = new Azure.HDInsight.Inputs.SparkClusterExtensionArgs
    {
        LogAnalyticsWorkspaceId = "string",
        PrimaryKey = "string",
    },
    Network = new Azure.HDInsight.Inputs.SparkClusterNetworkArgs
    {
        ConnectionDirection = "string",
        PrivateLinkEnabled = false,
    },
    PrivateLinkConfiguration = new Azure.HDInsight.Inputs.SparkClusterPrivateLinkConfigurationArgs
    {
        GroupId = "string",
        IpConfiguration = new Azure.HDInsight.Inputs.SparkClusterPrivateLinkConfigurationIpConfigurationArgs
        {
            Name = "string",
            Primary = false,
            PrivateIpAddress = "string",
            PrivateIpAllocationMethod = "string",
            SubnetId = "string",
        },
        Name = "string",
    },
    Location = "string",
    DiskEncryptions = new[]
    {
        new Azure.HDInsight.Inputs.SparkClusterDiskEncryptionArgs
        {
            EncryptionAlgorithm = "string",
            EncryptionAtHostEnabled = false,
            KeyVaultKeyId = "string",
            KeyVaultManagedIdentityId = "string",
        },
    },
    SecurityProfile = new Azure.HDInsight.Inputs.SparkClusterSecurityProfileArgs
    {
        AaddsResourceId = "string",
        DomainName = "string",
        DomainUserPassword = "string",
        DomainUsername = "string",
        LdapsUrls = new[]
        {
            "string",
        },
        MsiResourceId = "string",
        ClusterUsersGroupDns = new[]
        {
            "string",
        },
    },
    StorageAccountGen2 = new Azure.HDInsight.Inputs.SparkClusterStorageAccountGen2Args
    {
        FilesystemId = "string",
        IsDefault = false,
        ManagedIdentityResourceId = "string",
        StorageResourceId = "string",
    },
    StorageAccounts = new[]
    {
        new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
        {
            IsDefault = false,
            StorageAccountKey = "string",
            StorageContainerId = "string",
            StorageResourceId = "string",
        },
    },
    Tags = 
    {
        { "string", "string" },
    },
    ComputeIsolation = new Azure.HDInsight.Inputs.SparkClusterComputeIsolationArgs
    {
        ComputeIsolationEnabled = false,
        HostSku = "string",
    },
    TlsMinVersion = "string",
});
example, err := hdinsight.NewSparkCluster(ctx, "sparkClusterResource", &hdinsight.SparkClusterArgs{
	Gateway: &hdinsight.SparkClusterGatewayArgs{
		Password: pulumi.String("string"),
		Username: pulumi.String("string"),
	},
	ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
		Spark: pulumi.String("string"),
	},
	Tier: pulumi.String("string"),
	Roles: &hdinsight.SparkClusterRolesArgs{
		HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
			Username: pulumi.String("string"),
			VmSize:   pulumi.String("string"),
			Password: pulumi.String("string"),
			ScriptActions: hdinsight.SparkClusterRolesHeadNodeScriptActionArray{
				&hdinsight.SparkClusterRolesHeadNodeScriptActionArgs{
					Name:       pulumi.String("string"),
					Uri:        pulumi.String("string"),
					Parameters: pulumi.String("string"),
				},
			},
			SshKeys: pulumi.StringArray{
				pulumi.String("string"),
			},
			SubnetId:         pulumi.String("string"),
			VirtualNetworkId: pulumi.String("string"),
		},
		WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
			TargetInstanceCount: pulumi.Int(0),
			Username:            pulumi.String("string"),
			VmSize:              pulumi.String("string"),
			Autoscale: &hdinsight.SparkClusterRolesWorkerNodeAutoscaleArgs{
				Capacity: &hdinsight.SparkClusterRolesWorkerNodeAutoscaleCapacityArgs{
					MaxInstanceCount: pulumi.Int(0),
					MinInstanceCount: pulumi.Int(0),
				},
				Recurrence: &hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs{
					Schedules: hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArray{
						&hdinsight.SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs{
							Days: pulumi.StringArray{
								pulumi.String("string"),
							},
							TargetInstanceCount: pulumi.Int(0),
							Time:                pulumi.String("string"),
						},
					},
					Timezone: pulumi.String("string"),
				},
			},
			Password: pulumi.String("string"),
			ScriptActions: hdinsight.SparkClusterRolesWorkerNodeScriptActionArray{
				&hdinsight.SparkClusterRolesWorkerNodeScriptActionArgs{
					Name:       pulumi.String("string"),
					Uri:        pulumi.String("string"),
					Parameters: pulumi.String("string"),
				},
			},
			SshKeys: pulumi.StringArray{
				pulumi.String("string"),
			},
			SubnetId:         pulumi.String("string"),
			VirtualNetworkId: pulumi.String("string"),
		},
		ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
			Username: pulumi.String("string"),
			VmSize:   pulumi.String("string"),
			Password: pulumi.String("string"),
			ScriptActions: hdinsight.SparkClusterRolesZookeeperNodeScriptActionArray{
				&hdinsight.SparkClusterRolesZookeeperNodeScriptActionArgs{
					Name:       pulumi.String("string"),
					Uri:        pulumi.String("string"),
					Parameters: pulumi.String("string"),
				},
			},
			SshKeys: pulumi.StringArray{
				pulumi.String("string"),
			},
			SubnetId:         pulumi.String("string"),
			VirtualNetworkId: pulumi.String("string"),
		},
	},
	ResourceGroupName:          pulumi.String("string"),
	ClusterVersion:             pulumi.String("string"),
	Name:                       pulumi.String("string"),
	EncryptionInTransitEnabled: pulumi.Bool(false),
	Metastores: &hdinsight.SparkClusterMetastoresArgs{
		Ambari: &hdinsight.SparkClusterMetastoresAmbariArgs{
			DatabaseName: pulumi.String("string"),
			Password:     pulumi.String("string"),
			Server:       pulumi.String("string"),
			Username:     pulumi.String("string"),
		},
		Hive: &hdinsight.SparkClusterMetastoresHiveArgs{
			DatabaseName: pulumi.String("string"),
			Password:     pulumi.String("string"),
			Server:       pulumi.String("string"),
			Username:     pulumi.String("string"),
		},
		Oozie: &hdinsight.SparkClusterMetastoresOozieArgs{
			DatabaseName: pulumi.String("string"),
			Password:     pulumi.String("string"),
			Server:       pulumi.String("string"),
			Username:     pulumi.String("string"),
		},
	},
	Monitor: &hdinsight.SparkClusterMonitorArgs{
		LogAnalyticsWorkspaceId: pulumi.String("string"),
		PrimaryKey:              pulumi.String("string"),
	},
	Extension: &hdinsight.SparkClusterExtensionArgs{
		LogAnalyticsWorkspaceId: pulumi.String("string"),
		PrimaryKey:              pulumi.String("string"),
	},
	Network: &hdinsight.SparkClusterNetworkArgs{
		ConnectionDirection: pulumi.String("string"),
		PrivateLinkEnabled:  pulumi.Bool(false),
	},
	PrivateLinkConfiguration: &hdinsight.SparkClusterPrivateLinkConfigurationArgs{
		GroupId: pulumi.String("string"),
		IpConfiguration: &hdinsight.SparkClusterPrivateLinkConfigurationIpConfigurationArgs{
			Name:                      pulumi.String("string"),
			Primary:                   pulumi.Bool(false),
			PrivateIpAddress:          pulumi.String("string"),
			PrivateIpAllocationMethod: pulumi.String("string"),
			SubnetId:                  pulumi.String("string"),
		},
		Name: pulumi.String("string"),
	},
	Location: pulumi.String("string"),
	DiskEncryptions: hdinsight.SparkClusterDiskEncryptionArray{
		&hdinsight.SparkClusterDiskEncryptionArgs{
			EncryptionAlgorithm:       pulumi.String("string"),
			EncryptionAtHostEnabled:   pulumi.Bool(false),
			KeyVaultKeyId:             pulumi.String("string"),
			KeyVaultManagedIdentityId: pulumi.String("string"),
		},
	},
	SecurityProfile: &hdinsight.SparkClusterSecurityProfileArgs{
		AaddsResourceId:    pulumi.String("string"),
		DomainName:         pulumi.String("string"),
		DomainUserPassword: pulumi.String("string"),
		DomainUsername:     pulumi.String("string"),
		LdapsUrls: pulumi.StringArray{
			pulumi.String("string"),
		},
		MsiResourceId: pulumi.String("string"),
		ClusterUsersGroupDns: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	StorageAccountGen2: &hdinsight.SparkClusterStorageAccountGen2Args{
		FilesystemId:              pulumi.String("string"),
		IsDefault:                 pulumi.Bool(false),
		ManagedIdentityResourceId: pulumi.String("string"),
		StorageResourceId:         pulumi.String("string"),
	},
	StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
		&hdinsight.SparkClusterStorageAccountArgs{
			IsDefault:          pulumi.Bool(false),
			StorageAccountKey:  pulumi.String("string"),
			StorageContainerId: pulumi.String("string"),
			StorageResourceId:  pulumi.String("string"),
		},
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	ComputeIsolation: &hdinsight.SparkClusterComputeIsolationArgs{
		ComputeIsolationEnabled: pulumi.Bool(false),
		HostSku:                 pulumi.String("string"),
	},
	TlsMinVersion: pulumi.String("string"),
})
var sparkClusterResource = new SparkCluster("sparkClusterResource", SparkClusterArgs.builder()
    .gateway(SparkClusterGatewayArgs.builder()
        .password("string")
        .username("string")
        .build())
    .componentVersion(SparkClusterComponentVersionArgs.builder()
        .spark("string")
        .build())
    .tier("string")
    .roles(SparkClusterRolesArgs.builder()
        .headNode(SparkClusterRolesHeadNodeArgs.builder()
            .username("string")
            .vmSize("string")
            .password("string")
            .scriptActions(SparkClusterRolesHeadNodeScriptActionArgs.builder()
                .name("string")
                .uri("string")
                .parameters("string")
                .build())
            .sshKeys("string")
            .subnetId("string")
            .virtualNetworkId("string")
            .build())
        .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
            .targetInstanceCount(0)
            .username("string")
            .vmSize("string")
            .autoscale(SparkClusterRolesWorkerNodeAutoscaleArgs.builder()
                .capacity(SparkClusterRolesWorkerNodeAutoscaleCapacityArgs.builder()
                    .maxInstanceCount(0)
                    .minInstanceCount(0)
                    .build())
                .recurrence(SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs.builder()
                    .schedules(SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs.builder()
                        .days("string")
                        .targetInstanceCount(0)
                        .time("string")
                        .build())
                    .timezone("string")
                    .build())
                .build())
            .password("string")
            .scriptActions(SparkClusterRolesWorkerNodeScriptActionArgs.builder()
                .name("string")
                .uri("string")
                .parameters("string")
                .build())
            .sshKeys("string")
            .subnetId("string")
            .virtualNetworkId("string")
            .build())
        .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
            .username("string")
            .vmSize("string")
            .password("string")
            .scriptActions(SparkClusterRolesZookeeperNodeScriptActionArgs.builder()
                .name("string")
                .uri("string")
                .parameters("string")
                .build())
            .sshKeys("string")
            .subnetId("string")
            .virtualNetworkId("string")
            .build())
        .build())
    .resourceGroupName("string")
    .clusterVersion("string")
    .name("string")
    .encryptionInTransitEnabled(false)
    .metastores(SparkClusterMetastoresArgs.builder()
        .ambari(SparkClusterMetastoresAmbariArgs.builder()
            .databaseName("string")
            .password("string")
            .server("string")
            .username("string")
            .build())
        .hive(SparkClusterMetastoresHiveArgs.builder()
            .databaseName("string")
            .password("string")
            .server("string")
            .username("string")
            .build())
        .oozie(SparkClusterMetastoresOozieArgs.builder()
            .databaseName("string")
            .password("string")
            .server("string")
            .username("string")
            .build())
        .build())
    .monitor(SparkClusterMonitorArgs.builder()
        .logAnalyticsWorkspaceId("string")
        .primaryKey("string")
        .build())
    .extension(SparkClusterExtensionArgs.builder()
        .logAnalyticsWorkspaceId("string")
        .primaryKey("string")
        .build())
    .network(SparkClusterNetworkArgs.builder()
        .connectionDirection("string")
        .privateLinkEnabled(false)
        .build())
    .privateLinkConfiguration(SparkClusterPrivateLinkConfigurationArgs.builder()
        .groupId("string")
        .ipConfiguration(SparkClusterPrivateLinkConfigurationIpConfigurationArgs.builder()
            .name("string")
            .primary(false)
            .privateIpAddress("string")
            .privateIpAllocationMethod("string")
            .subnetId("string")
            .build())
        .name("string")
        .build())
    .location("string")
    .diskEncryptions(SparkClusterDiskEncryptionArgs.builder()
        .encryptionAlgorithm("string")
        .encryptionAtHostEnabled(false)
        .keyVaultKeyId("string")
        .keyVaultManagedIdentityId("string")
        .build())
    .securityProfile(SparkClusterSecurityProfileArgs.builder()
        .aaddsResourceId("string")
        .domainName("string")
        .domainUserPassword("string")
        .domainUsername("string")
        .ldapsUrls("string")
        .msiResourceId("string")
        .clusterUsersGroupDns("string")
        .build())
    .storageAccountGen2(SparkClusterStorageAccountGen2Args.builder()
        .filesystemId("string")
        .isDefault(false)
        .managedIdentityResourceId("string")
        .storageResourceId("string")
        .build())
    .storageAccounts(SparkClusterStorageAccountArgs.builder()
        .isDefault(false)
        .storageAccountKey("string")
        .storageContainerId("string")
        .storageResourceId("string")
        .build())
    .tags(Map.of("string", "string"))
    .computeIsolation(SparkClusterComputeIsolationArgs.builder()
        .computeIsolationEnabled(false)
        .hostSku("string")
        .build())
    .tlsMinVersion("string")
    .build());
spark_cluster_resource = azure.hdinsight.SparkCluster("sparkClusterResource",
    gateway={
        "password": "string",
        "username": "string",
    },
    component_version={
        "spark": "string",
    },
    tier="string",
    roles={
        "headNode": {
            "username": "string",
            "vmSize": "string",
            "password": "string",
            "scriptActions": [{
                "name": "string",
                "uri": "string",
                "parameters": "string",
            }],
            "sshKeys": ["string"],
            "subnetId": "string",
            "virtualNetworkId": "string",
        },
        "workerNode": {
            "targetInstanceCount": 0,
            "username": "string",
            "vmSize": "string",
            "autoscale": {
                "capacity": {
                    "maxInstanceCount": 0,
                    "minInstanceCount": 0,
                },
                "recurrence": {
                    "schedules": [{
                        "days": ["string"],
                        "targetInstanceCount": 0,
                        "time": "string",
                    }],
                    "timezone": "string",
                },
            },
            "password": "string",
            "scriptActions": [{
                "name": "string",
                "uri": "string",
                "parameters": "string",
            }],
            "sshKeys": ["string"],
            "subnetId": "string",
            "virtualNetworkId": "string",
        },
        "zookeeperNode": {
            "username": "string",
            "vmSize": "string",
            "password": "string",
            "scriptActions": [{
                "name": "string",
                "uri": "string",
                "parameters": "string",
            }],
            "sshKeys": ["string"],
            "subnetId": "string",
            "virtualNetworkId": "string",
        },
    },
    resource_group_name="string",
    cluster_version="string",
    name="string",
    encryption_in_transit_enabled=False,
    metastores={
        "ambari": {
            "databaseName": "string",
            "password": "string",
            "server": "string",
            "username": "string",
        },
        "hive": {
            "databaseName": "string",
            "password": "string",
            "server": "string",
            "username": "string",
        },
        "oozie": {
            "databaseName": "string",
            "password": "string",
            "server": "string",
            "username": "string",
        },
    },
    monitor={
        "logAnalyticsWorkspaceId": "string",
        "primaryKey": "string",
    },
    extension={
        "logAnalyticsWorkspaceId": "string",
        "primaryKey": "string",
    },
    network={
        "connectionDirection": "string",
        "privateLinkEnabled": False,
    },
    private_link_configuration={
        "groupId": "string",
        "ipConfiguration": {
            "name": "string",
            "primary": False,
            "privateIpAddress": "string",
            "privateIpAllocationMethod": "string",
            "subnetId": "string",
        },
        "name": "string",
    },
    location="string",
    disk_encryptions=[{
        "encryptionAlgorithm": "string",
        "encryptionAtHostEnabled": False,
        "keyVaultKeyId": "string",
        "keyVaultManagedIdentityId": "string",
    }],
    security_profile={
        "aaddsResourceId": "string",
        "domainName": "string",
        "domainUserPassword": "string",
        "domainUsername": "string",
        "ldapsUrls": ["string"],
        "msiResourceId": "string",
        "clusterUsersGroupDns": ["string"],
    },
    storage_account_gen2={
        "filesystemId": "string",
        "isDefault": False,
        "managedIdentityResourceId": "string",
        "storageResourceId": "string",
    },
    storage_accounts=[{
        "isDefault": False,
        "storageAccountKey": "string",
        "storageContainerId": "string",
        "storageResourceId": "string",
    }],
    tags={
        "string": "string",
    },
    compute_isolation={
        "computeIsolationEnabled": False,
        "hostSku": "string",
    },
    tls_min_version="string")
const sparkClusterResource = new azure.hdinsight.SparkCluster("sparkClusterResource", {
    gateway: {
        password: "string",
        username: "string",
    },
    componentVersion: {
        spark: "string",
    },
    tier: "string",
    roles: {
        headNode: {
            username: "string",
            vmSize: "string",
            password: "string",
            scriptActions: [{
                name: "string",
                uri: "string",
                parameters: "string",
            }],
            sshKeys: ["string"],
            subnetId: "string",
            virtualNetworkId: "string",
        },
        workerNode: {
            targetInstanceCount: 0,
            username: "string",
            vmSize: "string",
            autoscale: {
                capacity: {
                    maxInstanceCount: 0,
                    minInstanceCount: 0,
                },
                recurrence: {
                    schedules: [{
                        days: ["string"],
                        targetInstanceCount: 0,
                        time: "string",
                    }],
                    timezone: "string",
                },
            },
            password: "string",
            scriptActions: [{
                name: "string",
                uri: "string",
                parameters: "string",
            }],
            sshKeys: ["string"],
            subnetId: "string",
            virtualNetworkId: "string",
        },
        zookeeperNode: {
            username: "string",
            vmSize: "string",
            password: "string",
            scriptActions: [{
                name: "string",
                uri: "string",
                parameters: "string",
            }],
            sshKeys: ["string"],
            subnetId: "string",
            virtualNetworkId: "string",
        },
    },
    resourceGroupName: "string",
    clusterVersion: "string",
    name: "string",
    encryptionInTransitEnabled: false,
    metastores: {
        ambari: {
            databaseName: "string",
            password: "string",
            server: "string",
            username: "string",
        },
        hive: {
            databaseName: "string",
            password: "string",
            server: "string",
            username: "string",
        },
        oozie: {
            databaseName: "string",
            password: "string",
            server: "string",
            username: "string",
        },
    },
    monitor: {
        logAnalyticsWorkspaceId: "string",
        primaryKey: "string",
    },
    extension: {
        logAnalyticsWorkspaceId: "string",
        primaryKey: "string",
    },
    network: {
        connectionDirection: "string",
        privateLinkEnabled: false,
    },
    privateLinkConfiguration: {
        groupId: "string",
        ipConfiguration: {
            name: "string",
            primary: false,
            privateIpAddress: "string",
            privateIpAllocationMethod: "string",
            subnetId: "string",
        },
        name: "string",
    },
    location: "string",
    diskEncryptions: [{
        encryptionAlgorithm: "string",
        encryptionAtHostEnabled: false,
        keyVaultKeyId: "string",
        keyVaultManagedIdentityId: "string",
    }],
    securityProfile: {
        aaddsResourceId: "string",
        domainName: "string",
        domainUserPassword: "string",
        domainUsername: "string",
        ldapsUrls: ["string"],
        msiResourceId: "string",
        clusterUsersGroupDns: ["string"],
    },
    storageAccountGen2: {
        filesystemId: "string",
        isDefault: false,
        managedIdentityResourceId: "string",
        storageResourceId: "string",
    },
    storageAccounts: [{
        isDefault: false,
        storageAccountKey: "string",
        storageContainerId: "string",
        storageResourceId: "string",
    }],
    tags: {
        string: "string",
    },
    computeIsolation: {
        computeIsolationEnabled: false,
        hostSku: "string",
    },
    tlsMinVersion: "string",
});
type: azure:hdinsight:SparkCluster
properties:
    clusterVersion: string
    componentVersion:
        spark: string
    computeIsolation:
        computeIsolationEnabled: false
        hostSku: string
    diskEncryptions:
        - encryptionAlgorithm: string
          encryptionAtHostEnabled: false
          keyVaultKeyId: string
          keyVaultManagedIdentityId: string
    encryptionInTransitEnabled: false
    extension:
        logAnalyticsWorkspaceId: string
        primaryKey: string
    gateway:
        password: string
        username: string
    location: string
    metastores:
        ambari:
            databaseName: string
            password: string
            server: string
            username: string
        hive:
            databaseName: string
            password: string
            server: string
            username: string
        oozie:
            databaseName: string
            password: string
            server: string
            username: string
    monitor:
        logAnalyticsWorkspaceId: string
        primaryKey: string
    name: string
    network:
        connectionDirection: string
        privateLinkEnabled: false
    privateLinkConfiguration:
        groupId: string
        ipConfiguration:
            name: string
            primary: false
            privateIpAddress: string
            privateIpAllocationMethod: string
            subnetId: string
        name: string
    resourceGroupName: string
    roles:
        headNode:
            password: string
            scriptActions:
                - name: string
                  parameters: string
                  uri: string
            sshKeys:
                - string
            subnetId: string
            username: string
            virtualNetworkId: string
            vmSize: string
        workerNode:
            autoscale:
                capacity:
                    maxInstanceCount: 0
                    minInstanceCount: 0
                recurrence:
                    schedules:
                        - days:
                            - string
                          targetInstanceCount: 0
                          time: string
                    timezone: string
            password: string
            scriptActions:
                - name: string
                  parameters: string
                  uri: string
            sshKeys:
                - string
            subnetId: string
            targetInstanceCount: 0
            username: string
            virtualNetworkId: string
            vmSize: string
        zookeeperNode:
            password: string
            scriptActions:
                - name: string
                  parameters: string
                  uri: string
            sshKeys:
                - string
            subnetId: string
            username: string
            virtualNetworkId: string
            vmSize: string
    securityProfile:
        aaddsResourceId: string
        clusterUsersGroupDns:
            - string
        domainName: string
        domainUserPassword: string
        domainUsername: string
        ldapsUrls:
            - string
        msiResourceId: string
    storageAccountGen2:
        filesystemId: string
        isDefault: false
        managedIdentityResourceId: string
        storageResourceId: string
    storageAccounts:
        - isDefault: false
          storageAccountKey: string
          storageContainerId: string
          storageResourceId: string
    tags:
        string: string
    tier: string
    tlsMinVersion: string
SparkCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The SparkCluster resource accepts the following input properties:
- ClusterVersion string
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- ComponentVersion SparkCluster Component Version 
- A component_versionblock as defined below.
- Gateway
SparkCluster Gateway 
- A gatewayblock as defined below.
- ResourceGroup stringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Roles
SparkCluster Roles 
- A rolesblock as defined below.
- Tier string
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- ComputeIsolation SparkCluster Compute Isolation 
- A compute_isolationblock as defined below.
- DiskEncryptions List<SparkCluster Disk Encryption> 
- One or more disk_encryptionblock as defined below.
- EncryptionIn boolTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- Extension
SparkCluster Extension 
- An extensionblock as defined below.
- Location string
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Metastores
SparkCluster Metastores 
- A metastoresblock as defined below.
- Monitor
SparkCluster Monitor 
- A monitorblock as defined below.
- Name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Network
SparkCluster Network 
- A networkblock as defined below.
- PrivateLink SparkConfiguration Cluster Private Link Configuration 
- A private_link_configurationblock as defined below.
- SecurityProfile SparkCluster Security Profile 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- StorageAccount SparkGen2 Cluster Storage Account Gen2 
- A storage_account_gen2block as defined below.
- StorageAccounts List<SparkCluster Storage Account> 
- One or more storage_accountblock as defined below.
- Dictionary<string, string>
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- TlsMin stringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- ClusterVersion string
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- ComponentVersion SparkCluster Component Version Args 
- A component_versionblock as defined below.
- Gateway
SparkCluster Gateway Args 
- A gatewayblock as defined below.
- ResourceGroup stringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Roles
SparkCluster Roles Args 
- A rolesblock as defined below.
- Tier string
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- ComputeIsolation SparkCluster Compute Isolation Args 
- A compute_isolationblock as defined below.
- DiskEncryptions []SparkCluster Disk Encryption Args 
- One or more disk_encryptionblock as defined below.
- EncryptionIn boolTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- Extension
SparkCluster Extension Args 
- An extensionblock as defined below.
- Location string
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Metastores
SparkCluster Metastores Args 
- A metastoresblock as defined below.
- Monitor
SparkCluster Monitor Args 
- A monitorblock as defined below.
- Name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Network
SparkCluster Network Args 
- A networkblock as defined below.
- PrivateLink SparkConfiguration Cluster Private Link Configuration Args 
- A private_link_configurationblock as defined below.
- SecurityProfile SparkCluster Security Profile Args 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- StorageAccount SparkGen2 Cluster Storage Account Gen2Args 
- A storage_account_gen2block as defined below.
- StorageAccounts []SparkCluster Storage Account Args 
- One or more storage_accountblock as defined below.
- map[string]string
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- TlsMin stringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- clusterVersion String
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- componentVersion SparkCluster Component Version 
- A component_versionblock as defined below.
- gateway
SparkCluster Gateway 
- A gatewayblock as defined below.
- resourceGroup StringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles
SparkCluster Roles 
- A rolesblock as defined below.
- tier String
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- computeIsolation SparkCluster Compute Isolation 
- A compute_isolationblock as defined below.
- diskEncryptions List<SparkCluster Disk Encryption> 
- One or more disk_encryptionblock as defined below.
- encryptionIn BooleanTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension
SparkCluster Extension 
- An extensionblock as defined below.
- location String
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores
SparkCluster Metastores 
- A metastoresblock as defined below.
- monitor
SparkCluster Monitor 
- A monitorblock as defined below.
- name String
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network
SparkCluster Network 
- A networkblock as defined below.
- privateLink SparkConfiguration Cluster Private Link Configuration 
- A private_link_configurationblock as defined below.
- securityProfile SparkCluster Security Profile 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- storageAccount SparkGen2 Cluster Storage Account Gen2 
- A storage_account_gen2block as defined below.
- storageAccounts List<SparkCluster Storage Account> 
- One or more storage_accountblock as defined below.
- Map<String,String>
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tlsMin StringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- clusterVersion string
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- componentVersion SparkCluster Component Version 
- A component_versionblock as defined below.
- gateway
SparkCluster Gateway 
- A gatewayblock as defined below.
- resourceGroup stringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles
SparkCluster Roles 
- A rolesblock as defined below.
- tier string
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- computeIsolation SparkCluster Compute Isolation 
- A compute_isolationblock as defined below.
- diskEncryptions SparkCluster Disk Encryption[] 
- One or more disk_encryptionblock as defined below.
- encryptionIn booleanTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension
SparkCluster Extension 
- An extensionblock as defined below.
- location string
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores
SparkCluster Metastores 
- A metastoresblock as defined below.
- monitor
SparkCluster Monitor 
- A monitorblock as defined below.
- name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network
SparkCluster Network 
- A networkblock as defined below.
- privateLink SparkConfiguration Cluster Private Link Configuration 
- A private_link_configurationblock as defined below.
- securityProfile SparkCluster Security Profile 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- storageAccount SparkGen2 Cluster Storage Account Gen2 
- A storage_account_gen2block as defined below.
- storageAccounts SparkCluster Storage Account[] 
- One or more storage_accountblock as defined below.
- {[key: string]: string}
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tlsMin stringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- cluster_version str
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- component_version SparkCluster Component Version Args 
- A component_versionblock as defined below.
- gateway
SparkCluster Gateway Args 
- A gatewayblock as defined below.
- resource_group_ strname 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles
SparkCluster Roles Args 
- A rolesblock as defined below.
- tier str
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- compute_isolation SparkCluster Compute Isolation Args 
- A compute_isolationblock as defined below.
- disk_encryptions Sequence[SparkCluster Disk Encryption Args] 
- One or more disk_encryptionblock as defined below.
- encryption_in_ booltransit_ enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension
SparkCluster Extension Args 
- An extensionblock as defined below.
- location str
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores
SparkCluster Metastores Args 
- A metastoresblock as defined below.
- monitor
SparkCluster Monitor Args 
- A monitorblock as defined below.
- name str
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network
SparkCluster Network Args 
- A networkblock as defined below.
- private_link_ Sparkconfiguration Cluster Private Link Configuration Args 
- A private_link_configurationblock as defined below.
- security_profile SparkCluster Security Profile Args 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- storage_account_ Sparkgen2 Cluster Storage Account Gen2Args 
- A storage_account_gen2block as defined below.
- storage_accounts Sequence[SparkCluster Storage Account Args] 
- One or more storage_accountblock as defined below.
- Mapping[str, str]
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tls_min_ strversion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- clusterVersion String
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- componentVersion Property Map
- A component_versionblock as defined below.
- gateway Property Map
- A gatewayblock as defined below.
- resourceGroup StringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles Property Map
- A rolesblock as defined below.
- tier String
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- computeIsolation Property Map
- A compute_isolationblock as defined below.
- diskEncryptions List<Property Map>
- One or more disk_encryptionblock as defined below.
- encryptionIn BooleanTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension Property Map
- An extensionblock as defined below.
- location String
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores Property Map
- A metastoresblock as defined below.
- monitor Property Map
- A monitorblock as defined below.
- name String
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network Property Map
- A networkblock as defined below.
- privateLink Property MapConfiguration 
- A private_link_configurationblock as defined below.
- securityProfile Property Map
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- storageAccount Property MapGen2 
- A storage_account_gen2block as defined below.
- storageAccounts List<Property Map>
- One or more storage_accountblock as defined below.
- Map<String>
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tlsMin StringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
Outputs
All input properties are implicitly available as output properties. Additionally, the SparkCluster resource produces the following output properties:
- HttpsEndpoint string
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- Id string
- The provider-assigned unique ID for this managed resource.
- SshEndpoint string
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- HttpsEndpoint string
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- Id string
- The provider-assigned unique ID for this managed resource.
- SshEndpoint string
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- httpsEndpoint String
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- id String
- The provider-assigned unique ID for this managed resource.
- sshEndpoint String
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- httpsEndpoint string
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- id string
- The provider-assigned unique ID for this managed resource.
- sshEndpoint string
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- https_endpoint str
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- id str
- The provider-assigned unique ID for this managed resource.
- ssh_endpoint str
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- httpsEndpoint String
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- id String
- The provider-assigned unique ID for this managed resource.
- sshEndpoint String
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
Look up Existing SparkCluster Resource
Get an existing SparkCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: SparkClusterState, opts?: CustomResourceOptions): SparkCluster@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        cluster_version: Optional[str] = None,
        component_version: Optional[SparkClusterComponentVersionArgs] = None,
        compute_isolation: Optional[SparkClusterComputeIsolationArgs] = None,
        disk_encryptions: Optional[Sequence[SparkClusterDiskEncryptionArgs]] = None,
        encryption_in_transit_enabled: Optional[bool] = None,
        extension: Optional[SparkClusterExtensionArgs] = None,
        gateway: Optional[SparkClusterGatewayArgs] = None,
        https_endpoint: Optional[str] = None,
        location: Optional[str] = None,
        metastores: Optional[SparkClusterMetastoresArgs] = None,
        monitor: Optional[SparkClusterMonitorArgs] = None,
        name: Optional[str] = None,
        network: Optional[SparkClusterNetworkArgs] = None,
        private_link_configuration: Optional[SparkClusterPrivateLinkConfigurationArgs] = None,
        resource_group_name: Optional[str] = None,
        roles: Optional[SparkClusterRolesArgs] = None,
        security_profile: Optional[SparkClusterSecurityProfileArgs] = None,
        ssh_endpoint: Optional[str] = None,
        storage_account_gen2: Optional[SparkClusterStorageAccountGen2Args] = None,
        storage_accounts: Optional[Sequence[SparkClusterStorageAccountArgs]] = None,
        tags: Optional[Mapping[str, str]] = None,
        tier: Optional[str] = None,
        tls_min_version: Optional[str] = None) -> SparkClusterfunc GetSparkCluster(ctx *Context, name string, id IDInput, state *SparkClusterState, opts ...ResourceOption) (*SparkCluster, error)public static SparkCluster Get(string name, Input<string> id, SparkClusterState? state, CustomResourceOptions? opts = null)public static SparkCluster get(String name, Output<String> id, SparkClusterState state, CustomResourceOptions options)Resource lookup is not supported in YAML- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- ClusterVersion string
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- ComponentVersion SparkCluster Component Version 
- A component_versionblock as defined below.
- ComputeIsolation SparkCluster Compute Isolation 
- A compute_isolationblock as defined below.
- DiskEncryptions List<SparkCluster Disk Encryption> 
- One or more disk_encryptionblock as defined below.
- EncryptionIn boolTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- Extension
SparkCluster Extension 
- An extensionblock as defined below.
- Gateway
SparkCluster Gateway 
- A gatewayblock as defined below.
- HttpsEndpoint string
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- Location string
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Metastores
SparkCluster Metastores 
- A metastoresblock as defined below.
- Monitor
SparkCluster Monitor 
- A monitorblock as defined below.
- Name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Network
SparkCluster Network 
- A networkblock as defined below.
- PrivateLink SparkConfiguration Cluster Private Link Configuration 
- A private_link_configurationblock as defined below.
- ResourceGroup stringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Roles
SparkCluster Roles 
- A rolesblock as defined below.
- SecurityProfile SparkCluster Security Profile 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- SshEndpoint string
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- StorageAccount SparkGen2 Cluster Storage Account Gen2 
- A storage_account_gen2block as defined below.
- StorageAccounts List<SparkCluster Storage Account> 
- One or more storage_accountblock as defined below.
- Dictionary<string, string>
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- Tier string
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- TlsMin stringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- ClusterVersion string
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- ComponentVersion SparkCluster Component Version Args 
- A component_versionblock as defined below.
- ComputeIsolation SparkCluster Compute Isolation Args 
- A compute_isolationblock as defined below.
- DiskEncryptions []SparkCluster Disk Encryption Args 
- One or more disk_encryptionblock as defined below.
- EncryptionIn boolTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- Extension
SparkCluster Extension Args 
- An extensionblock as defined below.
- Gateway
SparkCluster Gateway Args 
- A gatewayblock as defined below.
- HttpsEndpoint string
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- Location string
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Metastores
SparkCluster Metastores Args 
- A metastoresblock as defined below.
- Monitor
SparkCluster Monitor Args 
- A monitorblock as defined below.
- Name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Network
SparkCluster Network Args 
- A networkblock as defined below.
- PrivateLink SparkConfiguration Cluster Private Link Configuration Args 
- A private_link_configurationblock as defined below.
- ResourceGroup stringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- Roles
SparkCluster Roles Args 
- A rolesblock as defined below.
- SecurityProfile SparkCluster Security Profile Args 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- SshEndpoint string
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- StorageAccount SparkGen2 Cluster Storage Account Gen2Args 
- A storage_account_gen2block as defined below.
- StorageAccounts []SparkCluster Storage Account Args 
- One or more storage_accountblock as defined below.
- map[string]string
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- Tier string
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- TlsMin stringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- clusterVersion String
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- componentVersion SparkCluster Component Version 
- A component_versionblock as defined below.
- computeIsolation SparkCluster Compute Isolation 
- A compute_isolationblock as defined below.
- diskEncryptions List<SparkCluster Disk Encryption> 
- One or more disk_encryptionblock as defined below.
- encryptionIn BooleanTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension
SparkCluster Extension 
- An extensionblock as defined below.
- gateway
SparkCluster Gateway 
- A gatewayblock as defined below.
- httpsEndpoint String
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- location String
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores
SparkCluster Metastores 
- A metastoresblock as defined below.
- monitor
SparkCluster Monitor 
- A monitorblock as defined below.
- name String
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network
SparkCluster Network 
- A networkblock as defined below.
- privateLink SparkConfiguration Cluster Private Link Configuration 
- A private_link_configurationblock as defined below.
- resourceGroup StringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles
SparkCluster Roles 
- A rolesblock as defined below.
- securityProfile SparkCluster Security Profile 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- sshEndpoint String
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- storageAccount SparkGen2 Cluster Storage Account Gen2 
- A storage_account_gen2block as defined below.
- storageAccounts List<SparkCluster Storage Account> 
- One or more storage_accountblock as defined below.
- Map<String,String>
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tier String
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- tlsMin StringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- clusterVersion string
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- componentVersion SparkCluster Component Version 
- A component_versionblock as defined below.
- computeIsolation SparkCluster Compute Isolation 
- A compute_isolationblock as defined below.
- diskEncryptions SparkCluster Disk Encryption[] 
- One or more disk_encryptionblock as defined below.
- encryptionIn booleanTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension
SparkCluster Extension 
- An extensionblock as defined below.
- gateway
SparkCluster Gateway 
- A gatewayblock as defined below.
- httpsEndpoint string
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- location string
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores
SparkCluster Metastores 
- A metastoresblock as defined below.
- monitor
SparkCluster Monitor 
- A monitorblock as defined below.
- name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network
SparkCluster Network 
- A networkblock as defined below.
- privateLink SparkConfiguration Cluster Private Link Configuration 
- A private_link_configurationblock as defined below.
- resourceGroup stringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles
SparkCluster Roles 
- A rolesblock as defined below.
- securityProfile SparkCluster Security Profile 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- sshEndpoint string
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- storageAccount SparkGen2 Cluster Storage Account Gen2 
- A storage_account_gen2block as defined below.
- storageAccounts SparkCluster Storage Account[] 
- One or more storage_accountblock as defined below.
- {[key: string]: string}
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tier string
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- tlsMin stringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- cluster_version str
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- component_version SparkCluster Component Version Args 
- A component_versionblock as defined below.
- compute_isolation SparkCluster Compute Isolation Args 
- A compute_isolationblock as defined below.
- disk_encryptions Sequence[SparkCluster Disk Encryption Args] 
- One or more disk_encryptionblock as defined below.
- encryption_in_ booltransit_ enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension
SparkCluster Extension Args 
- An extensionblock as defined below.
- gateway
SparkCluster Gateway Args 
- A gatewayblock as defined below.
- https_endpoint str
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- location str
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores
SparkCluster Metastores Args 
- A metastoresblock as defined below.
- monitor
SparkCluster Monitor Args 
- A monitorblock as defined below.
- name str
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network
SparkCluster Network Args 
- A networkblock as defined below.
- private_link_ Sparkconfiguration Cluster Private Link Configuration Args 
- A private_link_configurationblock as defined below.
- resource_group_ strname 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles
SparkCluster Roles Args 
- A rolesblock as defined below.
- security_profile SparkCluster Security Profile Args 
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- ssh_endpoint str
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- storage_account_ Sparkgen2 Cluster Storage Account Gen2Args 
- A storage_account_gen2block as defined below.
- storage_accounts Sequence[SparkCluster Storage Account Args] 
- One or more storage_accountblock as defined below.
- Mapping[str, str]
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tier str
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- tls_min_ strversion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
- clusterVersion String
- Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
- componentVersion Property Map
- A component_versionblock as defined below.
- computeIsolation Property Map
- A compute_isolationblock as defined below.
- diskEncryptions List<Property Map>
- One or more disk_encryptionblock as defined below.
- encryptionIn BooleanTransit Enabled 
- Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
- extension Property Map
- An extensionblock as defined below.
- gateway Property Map
- A gatewayblock as defined below.
- httpsEndpoint String
- The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
- location String
- Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- metastores Property Map
- A metastoresblock as defined below.
- monitor Property Map
- A monitorblock as defined below.
- name String
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- network Property Map
- A networkblock as defined below.
- privateLink Property MapConfiguration 
- A private_link_configurationblock as defined below.
- resourceGroup StringName 
- Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
- roles Property Map
- A rolesblock as defined below.
- securityProfile Property Map
- A security_profileblock as defined below. Changing this forces a new resource to be created.
- sshEndpoint String
- The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
- storageAccount Property MapGen2 
- A storage_account_gen2block as defined below.
- storageAccounts List<Property Map>
- One or more storage_accountblock as defined below.
- Map<String>
- A map of Tags which should be assigned to this HDInsight Spark Cluster.
- tier String
- Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are StandardorPremium. Changing this forces a new resource to be created.
- tlsMin StringVersion 
- The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created. - NOTE: Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see Azure HDInsight TLS 1.2 Enforcement. 
Supporting Types
SparkClusterComponentVersion, SparkClusterComponentVersionArgs        
- Spark string
- The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Spark string
- The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- spark String
- The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- spark string
- The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- spark str
- The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- spark String
- The version of Spark which should be used for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
SparkClusterComputeIsolation, SparkClusterComputeIsolationArgs        
- ComputeIsolation boolEnabled 
- This field indicates whether enable compute isolation or not. Possible values are trueorfalse.
- HostSku string
- The name of the host SKU.
- ComputeIsolation boolEnabled 
- This field indicates whether enable compute isolation or not. Possible values are trueorfalse.
- HostSku string
- The name of the host SKU.
- computeIsolation BooleanEnabled 
- This field indicates whether enable compute isolation or not. Possible values are trueorfalse.
- hostSku String
- The name of the host SKU.
- computeIsolation booleanEnabled 
- This field indicates whether enable compute isolation or not. Possible values are trueorfalse.
- hostSku string
- The name of the host SKU.
- compute_isolation_ boolenabled 
- This field indicates whether enable compute isolation or not. Possible values are trueorfalse.
- host_sku str
- The name of the host SKU.
- computeIsolation BooleanEnabled 
- This field indicates whether enable compute isolation or not. Possible values are trueorfalse.
- hostSku String
- The name of the host SKU.
SparkClusterDiskEncryption, SparkClusterDiskEncryptionArgs        
- EncryptionAlgorithm string
- This is an algorithm identifier for encryption. Possible values are RSA1_5,RSA-OAEP,RSA-OAEP-256.
- EncryptionAt boolHost Enabled 
- This is indicator to show whether resource disk encryption is enabled.
- KeyVault stringKey Id 
- The ID of the key vault key.
- KeyVault stringManaged Identity Id 
- This is the resource ID of Managed Identity used to access the key vault.
- EncryptionAlgorithm string
- This is an algorithm identifier for encryption. Possible values are RSA1_5,RSA-OAEP,RSA-OAEP-256.
- EncryptionAt boolHost Enabled 
- This is indicator to show whether resource disk encryption is enabled.
- KeyVault stringKey Id 
- The ID of the key vault key.
- KeyVault stringManaged Identity Id 
- This is the resource ID of Managed Identity used to access the key vault.
- encryptionAlgorithm String
- This is an algorithm identifier for encryption. Possible values are RSA1_5,RSA-OAEP,RSA-OAEP-256.
- encryptionAt BooleanHost Enabled 
- This is indicator to show whether resource disk encryption is enabled.
- keyVault StringKey Id 
- The ID of the key vault key.
- keyVault StringManaged Identity Id 
- This is the resource ID of Managed Identity used to access the key vault.
- encryptionAlgorithm string
- This is an algorithm identifier for encryption. Possible values are RSA1_5,RSA-OAEP,RSA-OAEP-256.
- encryptionAt booleanHost Enabled 
- This is indicator to show whether resource disk encryption is enabled.
- keyVault stringKey Id 
- The ID of the key vault key.
- keyVault stringManaged Identity Id 
- This is the resource ID of Managed Identity used to access the key vault.
- encryption_algorithm str
- This is an algorithm identifier for encryption. Possible values are RSA1_5,RSA-OAEP,RSA-OAEP-256.
- encryption_at_ boolhost_ enabled 
- This is indicator to show whether resource disk encryption is enabled.
- key_vault_ strkey_ id 
- The ID of the key vault key.
- key_vault_ strmanaged_ identity_ id 
- This is the resource ID of Managed Identity used to access the key vault.
- encryptionAlgorithm String
- This is an algorithm identifier for encryption. Possible values are RSA1_5,RSA-OAEP,RSA-OAEP-256.
- encryptionAt BooleanHost Enabled 
- This is indicator to show whether resource disk encryption is enabled.
- keyVault StringKey Id 
- The ID of the key vault key.
- keyVault StringManaged Identity Id 
- This is the resource ID of Managed Identity used to access the key vault.
SparkClusterExtension, SparkClusterExtensionArgs      
- LogAnalytics stringWorkspace Id 
- The workspace ID of the log analytics extension.
- PrimaryKey string
- The workspace key of the log analytics extension.
- LogAnalytics stringWorkspace Id 
- The workspace ID of the log analytics extension.
- PrimaryKey string
- The workspace key of the log analytics extension.
- logAnalytics StringWorkspace Id 
- The workspace ID of the log analytics extension.
- primaryKey String
- The workspace key of the log analytics extension.
- logAnalytics stringWorkspace Id 
- The workspace ID of the log analytics extension.
- primaryKey string
- The workspace key of the log analytics extension.
- log_analytics_ strworkspace_ id 
- The workspace ID of the log analytics extension.
- primary_key str
- The workspace key of the log analytics extension.
- logAnalytics StringWorkspace Id 
- The workspace ID of the log analytics extension.
- primaryKey String
- The workspace key of the log analytics extension.
SparkClusterGateway, SparkClusterGatewayArgs      
SparkClusterMetastores, SparkClusterMetastoresArgs      
- Ambari
SparkCluster Metastores Ambari 
- An ambariblock as defined below.
- Hive
SparkCluster Metastores Hive 
- A hiveblock as defined below.
- Oozie
SparkCluster Metastores Oozie 
- An oozieblock as defined below.
- Ambari
SparkCluster Metastores Ambari 
- An ambariblock as defined below.
- Hive
SparkCluster Metastores Hive 
- A hiveblock as defined below.
- Oozie
SparkCluster Metastores Oozie 
- An oozieblock as defined below.
- ambari
SparkCluster Metastores Ambari 
- An ambariblock as defined below.
- hive
SparkCluster Metastores Hive 
- A hiveblock as defined below.
- oozie
SparkCluster Metastores Oozie 
- An oozieblock as defined below.
- ambari
SparkCluster Metastores Ambari 
- An ambariblock as defined below.
- hive
SparkCluster Metastores Hive 
- A hiveblock as defined below.
- oozie
SparkCluster Metastores Oozie 
- An oozieblock as defined below.
- ambari
SparkCluster Metastores Ambari 
- An ambariblock as defined below.
- hive
SparkCluster Metastores Hive 
- A hiveblock as defined below.
- oozie
SparkCluster Metastores Oozie 
- An oozieblock as defined below.
- ambari Property Map
- An ambariblock as defined below.
- hive Property Map
- A hiveblock as defined below.
- oozie Property Map
- An oozieblock as defined below.
SparkClusterMetastoresAmbari, SparkClusterMetastoresAmbariArgs        
- DatabaseName string
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- Password string
- The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- Server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
- Username string
- The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- DatabaseName string
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- Password string
- The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- Server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
- Username string
- The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName String
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password String
- The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server String
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
- username String
- The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName string
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password string
- The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
- username string
- The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- database_name str
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password str
- The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server str
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
- username str
- The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName String
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password String
- The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server String
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Ambari metastore. Changing this forces a new resource to be created.
- username String
- The external Ambari metastore's existing SQL server admin username. Changing this forces a new resource to be created.
SparkClusterMetastoresHive, SparkClusterMetastoresHiveArgs        
- DatabaseName string
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- Password string
- The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- Server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
- Username string
- The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- DatabaseName string
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- Password string
- The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- Server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
- Username string
- The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName String
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password String
- The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server String
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
- username String
- The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName string
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password string
- The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
- username string
- The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- database_name str
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password str
- The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server str
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
- username str
- The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName String
- The external Hive metastore's existing SQL database. Changing this forces a new resource to be created.
- password String
- The external Hive metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server String
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Hive metastore. Changing this forces a new resource to be created.
- username String
- The external Hive metastore's existing SQL server admin username. Changing this forces a new resource to be created.
SparkClusterMetastoresOozie, SparkClusterMetastoresOozieArgs        
- DatabaseName string
- The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
- Password string
- The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- Server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
- Username string
- The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- DatabaseName string
- The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
- Password string
- The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- Server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
- Username string
- The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName String
- The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
- password String
- The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server String
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
- username String
- The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName string
- The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
- password string
- The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server string
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
- username string
- The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- database_name str
- The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
- password str
- The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server str
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
- username str
- The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
- databaseName String
- The external Oozie metastore's existing SQL database. Changing this forces a new resource to be created.
- password String
- The external Oozie metastore's existing SQL server admin password. Changing this forces a new resource to be created.
- server String
- The fully-qualified domain name (FQDN) of the SQL server to use for the external Oozie metastore. Changing this forces a new resource to be created.
- username String
- The external Oozie metastore's existing SQL server admin username. Changing this forces a new resource to be created.
SparkClusterMonitor, SparkClusterMonitorArgs      
- LogAnalytics stringWorkspace Id 
- The Operations Management Suite (OMS) workspace ID.
- PrimaryKey string
- The Operations Management Suite (OMS) workspace key.
- LogAnalytics stringWorkspace Id 
- The Operations Management Suite (OMS) workspace ID.
- PrimaryKey string
- The Operations Management Suite (OMS) workspace key.
- logAnalytics StringWorkspace Id 
- The Operations Management Suite (OMS) workspace ID.
- primaryKey String
- The Operations Management Suite (OMS) workspace key.
- logAnalytics stringWorkspace Id 
- The Operations Management Suite (OMS) workspace ID.
- primaryKey string
- The Operations Management Suite (OMS) workspace key.
- log_analytics_ strworkspace_ id 
- The Operations Management Suite (OMS) workspace ID.
- primary_key str
- The Operations Management Suite (OMS) workspace key.
- logAnalytics StringWorkspace Id 
- The Operations Management Suite (OMS) workspace ID.
- primaryKey String
- The Operations Management Suite (OMS) workspace key.
SparkClusterNetwork, SparkClusterNetworkArgs      
- ConnectionDirection string
- The direction of the resource provider connection. Possible values include - Inboundor- Outbound. Defaults to- Inbound. Changing this forces a new resource to be created.- NOTE: To enabled the private link the - connection_directionmust be set to- Outbound.
- PrivateLink boolEnabled 
- Is the private link enabled? Possible values include trueorfalse. Defaults tofalse. Changing this forces a new resource to be created.
- ConnectionDirection string
- The direction of the resource provider connection. Possible values include - Inboundor- Outbound. Defaults to- Inbound. Changing this forces a new resource to be created.- NOTE: To enabled the private link the - connection_directionmust be set to- Outbound.
- PrivateLink boolEnabled 
- Is the private link enabled? Possible values include trueorfalse. Defaults tofalse. Changing this forces a new resource to be created.
- connectionDirection String
- The direction of the resource provider connection. Possible values include - Inboundor- Outbound. Defaults to- Inbound. Changing this forces a new resource to be created.- NOTE: To enabled the private link the - connection_directionmust be set to- Outbound.
- privateLink BooleanEnabled 
- Is the private link enabled? Possible values include trueorfalse. Defaults tofalse. Changing this forces a new resource to be created.
- connectionDirection string
- The direction of the resource provider connection. Possible values include - Inboundor- Outbound. Defaults to- Inbound. Changing this forces a new resource to be created.- NOTE: To enabled the private link the - connection_directionmust be set to- Outbound.
- privateLink booleanEnabled 
- Is the private link enabled? Possible values include trueorfalse. Defaults tofalse. Changing this forces a new resource to be created.
- connection_direction str
- The direction of the resource provider connection. Possible values include - Inboundor- Outbound. Defaults to- Inbound. Changing this forces a new resource to be created.- NOTE: To enabled the private link the - connection_directionmust be set to- Outbound.
- private_link_ boolenabled 
- Is the private link enabled? Possible values include trueorfalse. Defaults tofalse. Changing this forces a new resource to be created.
- connectionDirection String
- The direction of the resource provider connection. Possible values include - Inboundor- Outbound. Defaults to- Inbound. Changing this forces a new resource to be created.- NOTE: To enabled the private link the - connection_directionmust be set to- Outbound.
- privateLink BooleanEnabled 
- Is the private link enabled? Possible values include trueorfalse. Defaults tofalse. Changing this forces a new resource to be created.
SparkClusterPrivateLinkConfiguration, SparkClusterPrivateLinkConfigurationArgs          
- GroupId string
- The ID of the private link service group.
- IpConfiguration SparkCluster Private Link Configuration Ip Configuration 
- Name string
- The name of the private link configuration.
- GroupId string
- The ID of the private link service group.
- IpConfiguration SparkCluster Private Link Configuration Ip Configuration 
- Name string
- The name of the private link configuration.
- groupId String
- The ID of the private link service group.
- ipConfiguration SparkCluster Private Link Configuration Ip Configuration 
- name String
- The name of the private link configuration.
- groupId string
- The ID of the private link service group.
- ipConfiguration SparkCluster Private Link Configuration Ip Configuration 
- name string
- The name of the private link configuration.
- group_id str
- The ID of the private link service group.
- ip_configuration SparkCluster Private Link Configuration Ip Configuration 
- name str
- The name of the private link configuration.
- groupId String
- The ID of the private link service group.
- ipConfiguration Property Map
- name String
- The name of the private link configuration.
SparkClusterPrivateLinkConfigurationIpConfiguration, SparkClusterPrivateLinkConfigurationIpConfigurationArgs              
- Name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Primary bool
- Indicates whether this IP configuration is primary.
- PrivateIp stringAddress 
- The private IP address of the IP configuration.
- PrivateIp stringAllocation Method 
- The private IP allocation method. The only possible value now is Dynamic.
- SubnetId string
- Name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- Primary bool
- Indicates whether this IP configuration is primary.
- PrivateIp stringAddress 
- The private IP address of the IP configuration.
- PrivateIp stringAllocation Method 
- The private IP allocation method. The only possible value now is Dynamic.
- SubnetId string
- name String
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- primary Boolean
- Indicates whether this IP configuration is primary.
- privateIp StringAddress 
- The private IP address of the IP configuration.
- privateIp StringAllocation Method 
- The private IP allocation method. The only possible value now is Dynamic.
- subnetId String
- name string
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- primary boolean
- Indicates whether this IP configuration is primary.
- privateIp stringAddress 
- The private IP address of the IP configuration.
- privateIp stringAllocation Method 
- The private IP allocation method. The only possible value now is Dynamic.
- subnetId string
- name str
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- primary bool
- Indicates whether this IP configuration is primary.
- private_ip_ straddress 
- The private IP address of the IP configuration.
- private_ip_ strallocation_ method 
- The private IP allocation method. The only possible value now is Dynamic.
- subnet_id str
- name String
- Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
- primary Boolean
- Indicates whether this IP configuration is primary.
- privateIp StringAddress 
- The private IP address of the IP configuration.
- privateIp StringAllocation Method 
- The private IP allocation method. The only possible value now is Dynamic.
- subnetId String
SparkClusterRoles, SparkClusterRolesArgs      
- HeadNode SparkCluster Roles Head Node 
- A head_nodeblock as defined above.
- WorkerNode SparkCluster Roles Worker Node 
- A worker_nodeblock as defined below.
- ZookeeperNode SparkCluster Roles Zookeeper Node 
- A zookeeper_nodeblock as defined below.
- HeadNode SparkCluster Roles Head Node 
- A head_nodeblock as defined above.
- WorkerNode SparkCluster Roles Worker Node 
- A worker_nodeblock as defined below.
- ZookeeperNode SparkCluster Roles Zookeeper Node 
- A zookeeper_nodeblock as defined below.
- headNode SparkCluster Roles Head Node 
- A head_nodeblock as defined above.
- workerNode SparkCluster Roles Worker Node 
- A worker_nodeblock as defined below.
- zookeeperNode SparkCluster Roles Zookeeper Node 
- A zookeeper_nodeblock as defined below.
- headNode SparkCluster Roles Head Node 
- A head_nodeblock as defined above.
- workerNode SparkCluster Roles Worker Node 
- A worker_nodeblock as defined below.
- zookeeperNode SparkCluster Roles Zookeeper Node 
- A zookeeper_nodeblock as defined below.
- head_node SparkCluster Roles Head Node 
- A head_nodeblock as defined above.
- worker_node SparkCluster Roles Worker Node 
- A worker_nodeblock as defined below.
- zookeeper_node SparkCluster Roles Zookeeper Node 
- A zookeeper_nodeblock as defined below.
- headNode Property Map
- A head_nodeblock as defined above.
- workerNode Property Map
- A worker_nodeblock as defined below.
- zookeeperNode Property Map
- A zookeeper_nodeblock as defined below.
SparkClusterRolesHeadNode, SparkClusterRolesHeadNodeArgs          
- Username string
- The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
- VmSize string
- The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- Password string
- The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- ScriptActions List<SparkCluster Roles Head Node Script Action> 
- The script action which will run on the cluster. One or more script_actionsblocks as defined below.
- SshKeys List<string>
- A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- SubnetId string
- The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- VirtualNetwork stringId 
- The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- Username string
- The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
- VmSize string
- The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- Password string
- The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- ScriptActions []SparkCluster Roles Head Node Script Action 
- The script action which will run on the cluster. One or more script_actionsblocks as defined below.
- SshKeys []string
- A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- SubnetId string
- The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- VirtualNetwork stringId 
- The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- username String
- The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
- vmSize String
- The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password String
- The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions List<SparkCluster Roles Head Node Script Action> 
- The script action which will run on the cluster. One or more script_actionsblocks as defined below.
- sshKeys List<String>
- A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId String
- The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork StringId 
- The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- username string
- The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
- vmSize string
- The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password string
- The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions SparkCluster Roles Head Node Script Action[] 
- The script action which will run on the cluster. One or more script_actionsblocks as defined below.
- sshKeys string[]
- A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId string
- The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork stringId 
- The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- username str
- The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
- vm_size str
- The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password str
- The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- script_actions Sequence[SparkCluster Roles Head Node Script Action] 
- The script action which will run on the cluster. One or more script_actionsblocks as defined below.
- ssh_keys Sequence[str]
- A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnet_id str
- The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtual_network_ strid 
- The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- username String
- The Username of the local administrator for the Head Nodes. Changing this forces a new resource to be created.
- vmSize String
- The Size of the Virtual Machine which should be used as the Head Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password String
- The Password associated with the local administrator for the Head Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions List<Property Map>
- The script action which will run on the cluster. One or more script_actionsblocks as defined below.
- sshKeys List<String>
- A list of SSH Keys which should be used for the local administrator on the Head Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId String
- The ID of the Subnet within the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork StringId 
- The ID of the Virtual Network where the Head Nodes should be provisioned within. Changing this forces a new resource to be created.
SparkClusterRolesHeadNodeScriptAction, SparkClusterRolesHeadNodeScriptActionArgs              
- Name string
- The name of the script action.
- Uri string
- The URI to the script.
- Parameters string
- The parameters for the script provided.
- Name string
- The name of the script action.
- Uri string
- The URI to the script.
- Parameters string
- The parameters for the script provided.
- name String
- The name of the script action.
- uri String
- The URI to the script.
- parameters String
- The parameters for the script provided.
- name string
- The name of the script action.
- uri string
- The URI to the script.
- parameters string
- The parameters for the script provided.
- name str
- The name of the script action.
- uri str
- The URI to the script.
- parameters str
- The parameters for the script provided.
- name String
- The name of the script action.
- uri String
- The URI to the script.
- parameters String
- The parameters for the script provided.
SparkClusterRolesWorkerNode, SparkClusterRolesWorkerNodeArgs          
- TargetInstance intCount 
- The number of instances which should be run for the Worker Nodes.
- Username string
- The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
- VmSize string
- The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- Autoscale
SparkCluster Roles Worker Node Autoscale 
- A autoscaleblock as defined below.
- Password string
- The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- ScriptActions List<SparkCluster Roles Worker Node Script Action> 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- SshKeys List<string>
- A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- SubnetId string
- The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- VirtualNetwork stringId 
- The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- TargetInstance intCount 
- The number of instances which should be run for the Worker Nodes.
- Username string
- The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
- VmSize string
- The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- Autoscale
SparkCluster Roles Worker Node Autoscale 
- A autoscaleblock as defined below.
- Password string
- The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- ScriptActions []SparkCluster Roles Worker Node Script Action 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- SshKeys []string
- A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- SubnetId string
- The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- VirtualNetwork stringId 
- The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- targetInstance IntegerCount 
- The number of instances which should be run for the Worker Nodes.
- username String
- The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
- vmSize String
- The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- autoscale
SparkCluster Roles Worker Node Autoscale 
- A autoscaleblock as defined below.
- password String
- The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions List<SparkCluster Roles Worker Node Script Action> 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- sshKeys List<String>
- A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId String
- The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork StringId 
- The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- targetInstance numberCount 
- The number of instances which should be run for the Worker Nodes.
- username string
- The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
- vmSize string
- The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- autoscale
SparkCluster Roles Worker Node Autoscale 
- A autoscaleblock as defined below.
- password string
- The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions SparkCluster Roles Worker Node Script Action[] 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- sshKeys string[]
- A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId string
- The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork stringId 
- The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- target_instance_ intcount 
- The number of instances which should be run for the Worker Nodes.
- username str
- The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
- vm_size str
- The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- autoscale
SparkCluster Roles Worker Node Autoscale 
- A autoscaleblock as defined below.
- password str
- The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- script_actions Sequence[SparkCluster Roles Worker Node Script Action] 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- ssh_keys Sequence[str]
- A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnet_id str
- The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtual_network_ strid 
- The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- targetInstance NumberCount 
- The number of instances which should be run for the Worker Nodes.
- username String
- The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.
- vmSize String
- The Size of the Virtual Machine which should be used as the Worker Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- autoscale Property Map
- A autoscaleblock as defined below.
- password String
- The Password associated with the local administrator for the Worker Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions List<Property Map>
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- sshKeys List<String>
- A list of SSH Keys which should be used for the local administrator on the Worker Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId String
- The ID of the Subnet within the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork StringId 
- The ID of the Virtual Network where the Worker Nodes should be provisioned within. Changing this forces a new resource to be created.
SparkClusterRolesWorkerNodeAutoscale, SparkClusterRolesWorkerNodeAutoscaleArgs            
- Capacity
SparkCluster Roles Worker Node Autoscale Capacity 
- A capacityblock as defined below.
- Recurrence
SparkCluster Roles Worker Node Autoscale Recurrence 
- A - recurrenceblock as defined below.- NOTE: Either a - capacityor- recurrenceblock must be specified - but not both.
- Capacity
SparkCluster Roles Worker Node Autoscale Capacity 
- A capacityblock as defined below.
- Recurrence
SparkCluster Roles Worker Node Autoscale Recurrence 
- A - recurrenceblock as defined below.- NOTE: Either a - capacityor- recurrenceblock must be specified - but not both.
- capacity
SparkCluster Roles Worker Node Autoscale Capacity 
- A capacityblock as defined below.
- recurrence
SparkCluster Roles Worker Node Autoscale Recurrence 
- A - recurrenceblock as defined below.- NOTE: Either a - capacityor- recurrenceblock must be specified - but not both.
- capacity
SparkCluster Roles Worker Node Autoscale Capacity 
- A capacityblock as defined below.
- recurrence
SparkCluster Roles Worker Node Autoscale Recurrence 
- A - recurrenceblock as defined below.- NOTE: Either a - capacityor- recurrenceblock must be specified - but not both.
- capacity
SparkCluster Roles Worker Node Autoscale Capacity 
- A capacityblock as defined below.
- recurrence
SparkCluster Roles Worker Node Autoscale Recurrence 
- A - recurrenceblock as defined below.- NOTE: Either a - capacityor- recurrenceblock must be specified - but not both.
- capacity Property Map
- A capacityblock as defined below.
- recurrence Property Map
- A - recurrenceblock as defined below.- NOTE: Either a - capacityor- recurrenceblock must be specified - but not both.
SparkClusterRolesWorkerNodeAutoscaleCapacity, SparkClusterRolesWorkerNodeAutoscaleCapacityArgs              
- MaxInstance intCount 
- The maximum number of worker nodes to autoscale to based on the cluster's activity.
- MinInstance intCount 
- The minimum number of worker nodes to autoscale to based on the cluster's activity.
- MaxInstance intCount 
- The maximum number of worker nodes to autoscale to based on the cluster's activity.
- MinInstance intCount 
- The minimum number of worker nodes to autoscale to based on the cluster's activity.
- maxInstance IntegerCount 
- The maximum number of worker nodes to autoscale to based on the cluster's activity.
- minInstance IntegerCount 
- The minimum number of worker nodes to autoscale to based on the cluster's activity.
- maxInstance numberCount 
- The maximum number of worker nodes to autoscale to based on the cluster's activity.
- minInstance numberCount 
- The minimum number of worker nodes to autoscale to based on the cluster's activity.
- max_instance_ intcount 
- The maximum number of worker nodes to autoscale to based on the cluster's activity.
- min_instance_ intcount 
- The minimum number of worker nodes to autoscale to based on the cluster's activity.
- maxInstance NumberCount 
- The maximum number of worker nodes to autoscale to based on the cluster's activity.
- minInstance NumberCount 
- The minimum number of worker nodes to autoscale to based on the cluster's activity.
SparkClusterRolesWorkerNodeAutoscaleRecurrence, SparkClusterRolesWorkerNodeAutoscaleRecurrenceArgs              
- Schedules
List<SparkCluster Roles Worker Node Autoscale Recurrence Schedule> 
- A list of scheduleblocks as defined below.
- Timezone string
- The time zone for the autoscale schedule times.
- Schedules
[]SparkCluster Roles Worker Node Autoscale Recurrence Schedule 
- A list of scheduleblocks as defined below.
- Timezone string
- The time zone for the autoscale schedule times.
- schedules
List<SparkCluster Roles Worker Node Autoscale Recurrence Schedule> 
- A list of scheduleblocks as defined below.
- timezone String
- The time zone for the autoscale schedule times.
- schedules
SparkCluster Roles Worker Node Autoscale Recurrence Schedule[] 
- A list of scheduleblocks as defined below.
- timezone string
- The time zone for the autoscale schedule times.
- schedules
Sequence[SparkCluster Roles Worker Node Autoscale Recurrence Schedule] 
- A list of scheduleblocks as defined below.
- timezone str
- The time zone for the autoscale schedule times.
- schedules List<Property Map>
- A list of scheduleblocks as defined below.
- timezone String
- The time zone for the autoscale schedule times.
SparkClusterRolesWorkerNodeAutoscaleRecurrenceSchedule, SparkClusterRolesWorkerNodeAutoscaleRecurrenceScheduleArgs                
- Days List<string>
- The days of the week to perform autoscale. Possible values are Monday,Tuesday,Wednesday,Thursday,Friday,SaturdayandSunday.
- TargetInstance intCount 
- The number of worker nodes to autoscale at the specified time.
- Time string
- The time of day to perform the autoscale in 24hour format.
- Days []string
- The days of the week to perform autoscale. Possible values are Monday,Tuesday,Wednesday,Thursday,Friday,SaturdayandSunday.
- TargetInstance intCount 
- The number of worker nodes to autoscale at the specified time.
- Time string
- The time of day to perform the autoscale in 24hour format.
- days List<String>
- The days of the week to perform autoscale. Possible values are Monday,Tuesday,Wednesday,Thursday,Friday,SaturdayandSunday.
- targetInstance IntegerCount 
- The number of worker nodes to autoscale at the specified time.
- time String
- The time of day to perform the autoscale in 24hour format.
- days string[]
- The days of the week to perform autoscale. Possible values are Monday,Tuesday,Wednesday,Thursday,Friday,SaturdayandSunday.
- targetInstance numberCount 
- The number of worker nodes to autoscale at the specified time.
- time string
- The time of day to perform the autoscale in 24hour format.
- days Sequence[str]
- The days of the week to perform autoscale. Possible values are Monday,Tuesday,Wednesday,Thursday,Friday,SaturdayandSunday.
- target_instance_ intcount 
- The number of worker nodes to autoscale at the specified time.
- time str
- The time of day to perform the autoscale in 24hour format.
- days List<String>
- The days of the week to perform autoscale. Possible values are Monday,Tuesday,Wednesday,Thursday,Friday,SaturdayandSunday.
- targetInstance NumberCount 
- The number of worker nodes to autoscale at the specified time.
- time String
- The time of day to perform the autoscale in 24hour format.
SparkClusterRolesWorkerNodeScriptAction, SparkClusterRolesWorkerNodeScriptActionArgs              
- Name string
- The name of the script action.
- Uri string
- The URI to the script.
- Parameters string
- The parameters for the script provided.
- Name string
- The name of the script action.
- Uri string
- The URI to the script.
- Parameters string
- The parameters for the script provided.
- name String
- The name of the script action.
- uri String
- The URI to the script.
- parameters String
- The parameters for the script provided.
- name string
- The name of the script action.
- uri string
- The URI to the script.
- parameters string
- The parameters for the script provided.
- name str
- The name of the script action.
- uri str
- The URI to the script.
- parameters str
- The parameters for the script provided.
- name String
- The name of the script action.
- uri String
- The URI to the script.
- parameters String
- The parameters for the script provided.
SparkClusterRolesZookeeperNode, SparkClusterRolesZookeeperNodeArgs          
- Username string
- The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
- VmSize string
- The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- Password string
- The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- ScriptActions List<SparkCluster Roles Zookeeper Node Script Action> 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- SshKeys List<string>
- A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- SubnetId string
- The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- VirtualNetwork stringId 
- The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- Username string
- The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
- VmSize string
- The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- Password string
- The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- ScriptActions []SparkCluster Roles Zookeeper Node Script Action 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- SshKeys []string
- A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- SubnetId string
- The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- VirtualNetwork stringId 
- The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- username String
- The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
- vmSize String
- The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password String
- The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions List<SparkCluster Roles Zookeeper Node Script Action> 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- sshKeys List<String>
- A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId String
- The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork StringId 
- The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- username string
- The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
- vmSize string
- The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password string
- The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions SparkCluster Roles Zookeeper Node Script Action[] 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- sshKeys string[]
- A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId string
- The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork stringId 
- The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- username str
- The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
- vm_size str
- The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password str
- The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- script_actions Sequence[SparkCluster Roles Zookeeper Node Script Action] 
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- ssh_keys Sequence[str]
- A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnet_id str
- The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtual_network_ strid 
- The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- username String
- The Username of the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created.
- vmSize String
- The Size of the Virtual Machine which should be used as the Zookeeper Nodes. Possible values are ExtraSmall,Small,Medium,Large,ExtraLarge,A5,A6,A7,A8,A9,A10,A11,Standard_A1_V2,Standard_A2_V2,Standard_A2m_V2,Standard_A3,Standard_A4_V2,Standard_A4m_V2,Standard_A8_V2,Standard_A8m_V2,Standard_D1,Standard_D2,Standard_D3,Standard_D4,Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_D1_V2,Standard_D2_V2,Standard_D3_V2,Standard_D4_V2,Standard_D5_V2,Standard_D11_V2,Standard_D12_V2,Standard_D13_V2,Standard_D14_V2,Standard_DS1_V2,Standard_DS2_V2,Standard_DS3_V2,Standard_DS4_V2,Standard_DS5_V2,Standard_DS11_V2,Standard_DS12_V2,Standard_DS13_V2,Standard_DS14_V2,Standard_E2_V3,Standard_E4_V3,Standard_E8_V3,Standard_E16_V3,Standard_E20_V3,Standard_E32_V3,Standard_E64_V3,Standard_E64i_V3,Standard_E2s_V3,Standard_E4s_V3,Standard_E8s_V3,Standard_E16s_V3,Standard_E20s_V3,Standard_E32s_V3,Standard_E64s_V3,Standard_E64is_V3,Standard_D2a_V4,Standard_D4a_V4,Standard_D8a_V4,Standard_D16a_V4,Standard_D32a_V4,Standard_D48a_V4,Standard_D64a_V4,Standard_D96a_V4,Standard_E2a_V4,Standard_E4a_V4,Standard_E8a_V4,Standard_E16a_V4,Standard_E20a_V4,Standard_E32a_V4,Standard_E48a_V4,Standard_E64a_V4,Standard_E96a_V4,Standard_D2ads_V5,Standard_D4ads_V5,Standard_D8ads_V5,Standard_D16ads_V5,Standard_D32ads_V5,Standard_D48ads_V5,Standard_D64ads_V5,Standard_D96ads_V5,Standard_E2ads_V5,Standard_E4ads_V5,Standard_E8ads_V5,Standard_E16ads_V5,Standard_E20ads_V5,Standard_E32ads_V5,Standard_E48ads_V5,Standard_E64ads_V5,Standard_E96ads_V5,Standard_G1,Standard_G2,Standard_G3,Standard_G4,Standard_G5,Standard_F2s_V2,Standard_F4s_V2,Standard_F8s_V2,Standard_F16s_V2,Standard_F32s_V2,Standard_F64s_V2,Standard_F72s_V2,Standard_GS1,Standard_GS2,Standard_GS3,Standard_GS4,Standard_GS5andStandard_NC24. Changing this forces a new resource to be created.
- password String
- The Password associated with the local administrator for the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: If specified, this password must be at least 10 characters in length and must contain at least one digit, one uppercase and one lower case letter, one non-alphanumeric character (except characters ' " ` ). 
- scriptActions List<Property Map>
- The script action which will run on the cluster. One or more script_actionsblocks as defined above.
- sshKeys List<String>
- A list of SSH Keys which should be used for the local administrator on the Zookeeper Nodes. Changing this forces a new resource to be created. - NOTE: Either a - passwordor one or more- ssh_keysmust be specified - but not both.
- subnetId String
- The ID of the Subnet within the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
- virtualNetwork StringId 
- The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created.
SparkClusterRolesZookeeperNodeScriptAction, SparkClusterRolesZookeeperNodeScriptActionArgs              
- Name string
- The name of the script action.
- Uri string
- The URI to the script.
- Parameters string
- The parameters for the script provided.
- Name string
- The name of the script action.
- Uri string
- The URI to the script.
- Parameters string
- The parameters for the script provided.
- name String
- The name of the script action.
- uri String
- The URI to the script.
- parameters String
- The parameters for the script provided.
- name string
- The name of the script action.
- uri string
- The URI to the script.
- parameters string
- The parameters for the script provided.
- name str
- The name of the script action.
- uri str
- The URI to the script.
- parameters str
- The parameters for the script provided.
- name String
- The name of the script action.
- uri String
- The URI to the script.
- parameters String
- The parameters for the script provided.
SparkClusterSecurityProfile, SparkClusterSecurityProfileArgs        
- AaddsResource stringId 
- The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
- DomainName string
- The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- DomainUser stringPassword 
- The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- DomainUsername string
- The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- LdapsUrls List<string>
- A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
- MsiResource stringId 
- The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
- ClusterUsers List<string>Group Dns 
- A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
- AaddsResource stringId 
- The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
- DomainName string
- The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- DomainUser stringPassword 
- The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- DomainUsername string
- The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- LdapsUrls []string
- A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
- MsiResource stringId 
- The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
- ClusterUsers []stringGroup Dns 
- A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
- aaddsResource StringId 
- The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
- domainName String
- The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domainUser StringPassword 
- The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domainUsername String
- The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- ldapsUrls List<String>
- A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
- msiResource StringId 
- The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
- clusterUsers List<String>Group Dns 
- A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
- aaddsResource stringId 
- The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
- domainName string
- The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domainUser stringPassword 
- The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domainUsername string
- The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- ldapsUrls string[]
- A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
- msiResource stringId 
- The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
- clusterUsers string[]Group Dns 
- A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
- aadds_resource_ strid 
- The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
- domain_name str
- The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domain_user_ strpassword 
- The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domain_username str
- The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- ldaps_urls Sequence[str]
- A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
- msi_resource_ strid 
- The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
- cluster_users_ Sequence[str]group_ dns 
- A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
- aaddsResource StringId 
- The resource ID of the Azure Active Directory Domain Service. Changing this forces a new resource to be created.
- domainName String
- The name of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domainUser StringPassword 
- The user password of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- domainUsername String
- The username of the Azure Active Directory Domain. Changing this forces a new resource to be created.
- ldapsUrls List<String>
- A list of the LDAPS URLs to communicate with the Azure Active Directory. Changing this forces a new resource to be created.
- msiResource StringId 
- The User Assigned Identity for the HDInsight Cluster. Changing this forces a new resource to be created.
- clusterUsers List<String>Group Dns 
- A list of the distinguished names for the cluster user groups. Changing this forces a new resource to be created.
SparkClusterStorageAccount, SparkClusterStorageAccountArgs        
- IsDefault bool
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- StorageAccount stringKey 
- The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
- StorageContainer stringId 
- The ID of the Storage Container. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- StorageResource stringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- IsDefault bool
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- StorageAccount stringKey 
- The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
- StorageContainer stringId 
- The ID of the Storage Container. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- StorageResource stringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- isDefault Boolean
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- storageAccount StringKey 
- The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
- storageContainer StringId 
- The ID of the Storage Container. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storageResource StringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- isDefault boolean
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- storageAccount stringKey 
- The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
- storageContainer stringId 
- The ID of the Storage Container. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storageResource stringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- is_default bool
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- storage_account_ strkey 
- The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
- storage_container_ strid 
- The ID of the Storage Container. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storage_resource_ strid 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- isDefault Boolean
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- storageAccount StringKey 
- The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created.
- storageContainer StringId 
- The ID of the Storage Container. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storageResource StringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
SparkClusterStorageAccountGen2, SparkClusterStorageAccountGen2Args          
- FilesystemId string
- The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
- IsDefault bool
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- ManagedIdentity stringResource Id 
- The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- StorageResource stringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- FilesystemId string
- The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
- IsDefault bool
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- ManagedIdentity stringResource Id 
- The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- StorageResource stringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- filesystemId String
- The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
- isDefault Boolean
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- managedIdentity StringResource Id 
- The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storageResource StringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- filesystemId string
- The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
- isDefault boolean
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- managedIdentity stringResource Id 
- The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storageResource stringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- filesystem_id str
- The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
- is_default bool
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- managed_identity_ strresource_ id 
- The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storage_resource_ strid 
- The ID of the Storage Account. Changing this forces a new resource to be created.
- filesystemId String
- The ID of the Gen2 Filesystem. Changing this forces a new resource to be created.
- isDefault Boolean
- Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. - NOTE: One of the - storage_accountor- storage_account_gen2blocks must be marked as the default.
- managedIdentity StringResource Id 
- The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. - NOTE: This can be obtained from the - idof the- azure.storage.Containerresource.
- storageResource StringId 
- The ID of the Storage Account. Changing this forces a new resource to be created.
Import
HDInsight Spark Clusters can be imported using the resource id, e.g.
$ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Classic pulumi/pulumi-azure
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the azurermTerraform Provider.