1. Packages
  2. Databricks
  3. API Docs
  4. Mount
Databricks v1.50.2 published on Tuesday, Sep 24, 2024 by Pulumi

databricks.Mount

Explore with Pulumi AI

databricks logo
Databricks v1.50.2 published on Tuesday, Sep 24, 2024 by Pulumi

    Import

    -> Note Importing this resource is not currently supported.

    Create Mount Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Mount(name: string, args?: MountArgs, opts?: CustomResourceOptions);
    @overload
    def Mount(resource_name: str,
              args: Optional[MountArgs] = None,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Mount(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              abfs: Optional[MountAbfsArgs] = None,
              adl: Optional[MountAdlArgs] = None,
              cluster_id: Optional[str] = None,
              encryption_type: Optional[str] = None,
              extra_configs: Optional[Mapping[str, str]] = None,
              gs: Optional[MountGsArgs] = None,
              name: Optional[str] = None,
              resource_id: Optional[str] = None,
              s3: Optional[MountS3Args] = None,
              uri: Optional[str] = None,
              wasb: Optional[MountWasbArgs] = None)
    func NewMount(ctx *Context, name string, args *MountArgs, opts ...ResourceOption) (*Mount, error)
    public Mount(string name, MountArgs? args = null, CustomResourceOptions? opts = null)
    public Mount(String name, MountArgs args)
    public Mount(String name, MountArgs args, CustomResourceOptions options)
    
    type: databricks:Mount
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args MountArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var mountResource = new Databricks.Mount("mountResource", new()
    {
        Abfs = new Databricks.Inputs.MountAbfsArgs
        {
            ClientId = "string",
            ClientSecretKey = "string",
            ClientSecretScope = "string",
            InitializeFileSystem = false,
            ContainerName = "string",
            Directory = "string",
            StorageAccountName = "string",
            TenantId = "string",
        },
        Adl = new Databricks.Inputs.MountAdlArgs
        {
            ClientId = "string",
            ClientSecretKey = "string",
            ClientSecretScope = "string",
            Directory = "string",
            SparkConfPrefix = "string",
            StorageResourceName = "string",
            TenantId = "string",
        },
        ClusterId = "string",
        EncryptionType = "string",
        ExtraConfigs = 
        {
            { "string", "string" },
        },
        Gs = new Databricks.Inputs.MountGsArgs
        {
            BucketName = "string",
            ServiceAccount = "string",
        },
        Name = "string",
        ResourceId = "string",
        S3 = new Databricks.Inputs.MountS3Args
        {
            BucketName = "string",
            InstanceProfile = "string",
        },
        Uri = "string",
        Wasb = new Databricks.Inputs.MountWasbArgs
        {
            AuthType = "string",
            TokenSecretKey = "string",
            TokenSecretScope = "string",
            ContainerName = "string",
            Directory = "string",
            StorageAccountName = "string",
        },
    });
    
    example, err := databricks.NewMount(ctx, "mountResource", &databricks.MountArgs{
    	Abfs: &databricks.MountAbfsArgs{
    		ClientId:             pulumi.String("string"),
    		ClientSecretKey:      pulumi.String("string"),
    		ClientSecretScope:    pulumi.String("string"),
    		InitializeFileSystem: pulumi.Bool(false),
    		ContainerName:        pulumi.String("string"),
    		Directory:            pulumi.String("string"),
    		StorageAccountName:   pulumi.String("string"),
    		TenantId:             pulumi.String("string"),
    	},
    	Adl: &databricks.MountAdlArgs{
    		ClientId:            pulumi.String("string"),
    		ClientSecretKey:     pulumi.String("string"),
    		ClientSecretScope:   pulumi.String("string"),
    		Directory:           pulumi.String("string"),
    		SparkConfPrefix:     pulumi.String("string"),
    		StorageResourceName: pulumi.String("string"),
    		TenantId:            pulumi.String("string"),
    	},
    	ClusterId:      pulumi.String("string"),
    	EncryptionType: pulumi.String("string"),
    	ExtraConfigs: pulumi.StringMap{
    		"string": pulumi.String("string"),
    	},
    	Gs: &databricks.MountGsArgs{
    		BucketName:     pulumi.String("string"),
    		ServiceAccount: pulumi.String("string"),
    	},
    	Name:       pulumi.String("string"),
    	ResourceId: pulumi.String("string"),
    	S3: &databricks.MountS3Args{
    		BucketName:      pulumi.String("string"),
    		InstanceProfile: pulumi.String("string"),
    	},
    	Uri: pulumi.String("string"),
    	Wasb: &databricks.MountWasbArgs{
    		AuthType:           pulumi.String("string"),
    		TokenSecretKey:     pulumi.String("string"),
    		TokenSecretScope:   pulumi.String("string"),
    		ContainerName:      pulumi.String("string"),
    		Directory:          pulumi.String("string"),
    		StorageAccountName: pulumi.String("string"),
    	},
    })
    
    var mountResource = new Mount("mountResource", MountArgs.builder()
        .abfs(MountAbfsArgs.builder()
            .clientId("string")
            .clientSecretKey("string")
            .clientSecretScope("string")
            .initializeFileSystem(false)
            .containerName("string")
            .directory("string")
            .storageAccountName("string")
            .tenantId("string")
            .build())
        .adl(MountAdlArgs.builder()
            .clientId("string")
            .clientSecretKey("string")
            .clientSecretScope("string")
            .directory("string")
            .sparkConfPrefix("string")
            .storageResourceName("string")
            .tenantId("string")
            .build())
        .clusterId("string")
        .encryptionType("string")
        .extraConfigs(Map.of("string", "string"))
        .gs(MountGsArgs.builder()
            .bucketName("string")
            .serviceAccount("string")
            .build())
        .name("string")
        .resourceId("string")
        .s3(MountS3Args.builder()
            .bucketName("string")
            .instanceProfile("string")
            .build())
        .uri("string")
        .wasb(MountWasbArgs.builder()
            .authType("string")
            .tokenSecretKey("string")
            .tokenSecretScope("string")
            .containerName("string")
            .directory("string")
            .storageAccountName("string")
            .build())
        .build());
    
    mount_resource = databricks.Mount("mountResource",
        abfs=databricks.MountAbfsArgs(
            client_id="string",
            client_secret_key="string",
            client_secret_scope="string",
            initialize_file_system=False,
            container_name="string",
            directory="string",
            storage_account_name="string",
            tenant_id="string",
        ),
        adl=databricks.MountAdlArgs(
            client_id="string",
            client_secret_key="string",
            client_secret_scope="string",
            directory="string",
            spark_conf_prefix="string",
            storage_resource_name="string",
            tenant_id="string",
        ),
        cluster_id="string",
        encryption_type="string",
        extra_configs={
            "string": "string",
        },
        gs=databricks.MountGsArgs(
            bucket_name="string",
            service_account="string",
        ),
        name="string",
        resource_id="string",
        s3=databricks.MountS3Args(
            bucket_name="string",
            instance_profile="string",
        ),
        uri="string",
        wasb=databricks.MountWasbArgs(
            auth_type="string",
            token_secret_key="string",
            token_secret_scope="string",
            container_name="string",
            directory="string",
            storage_account_name="string",
        ))
    
    const mountResource = new databricks.Mount("mountResource", {
        abfs: {
            clientId: "string",
            clientSecretKey: "string",
            clientSecretScope: "string",
            initializeFileSystem: false,
            containerName: "string",
            directory: "string",
            storageAccountName: "string",
            tenantId: "string",
        },
        adl: {
            clientId: "string",
            clientSecretKey: "string",
            clientSecretScope: "string",
            directory: "string",
            sparkConfPrefix: "string",
            storageResourceName: "string",
            tenantId: "string",
        },
        clusterId: "string",
        encryptionType: "string",
        extraConfigs: {
            string: "string",
        },
        gs: {
            bucketName: "string",
            serviceAccount: "string",
        },
        name: "string",
        resourceId: "string",
        s3: {
            bucketName: "string",
            instanceProfile: "string",
        },
        uri: "string",
        wasb: {
            authType: "string",
            tokenSecretKey: "string",
            tokenSecretScope: "string",
            containerName: "string",
            directory: "string",
            storageAccountName: "string",
        },
    });
    
    type: databricks:Mount
    properties:
        abfs:
            clientId: string
            clientSecretKey: string
            clientSecretScope: string
            containerName: string
            directory: string
            initializeFileSystem: false
            storageAccountName: string
            tenantId: string
        adl:
            clientId: string
            clientSecretKey: string
            clientSecretScope: string
            directory: string
            sparkConfPrefix: string
            storageResourceName: string
            tenantId: string
        clusterId: string
        encryptionType: string
        extraConfigs:
            string: string
        gs:
            bucketName: string
            serviceAccount: string
        name: string
        resourceId: string
        s3:
            bucketName: string
            instanceProfile: string
        uri: string
        wasb:
            authType: string
            containerName: string
            directory: string
            storageAccountName: string
            tokenSecretKey: string
            tokenSecretScope: string
    

    Mount Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Mount resource accepts the following input properties:

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Mount resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Source string
    (String) HDFS-compatible url
    Id string
    The provider-assigned unique ID for this managed resource.
    Source string
    (String) HDFS-compatible url
    id String
    The provider-assigned unique ID for this managed resource.
    source String
    (String) HDFS-compatible url
    id string
    The provider-assigned unique ID for this managed resource.
    source string
    (String) HDFS-compatible url
    id str
    The provider-assigned unique ID for this managed resource.
    source str
    (String) HDFS-compatible url
    id String
    The provider-assigned unique ID for this managed resource.
    source String
    (String) HDFS-compatible url

    Look up Existing Mount Resource

    Get an existing Mount resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: MountState, opts?: CustomResourceOptions): Mount
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            abfs: Optional[MountAbfsArgs] = None,
            adl: Optional[MountAdlArgs] = None,
            cluster_id: Optional[str] = None,
            encryption_type: Optional[str] = None,
            extra_configs: Optional[Mapping[str, str]] = None,
            gs: Optional[MountGsArgs] = None,
            name: Optional[str] = None,
            resource_id: Optional[str] = None,
            s3: Optional[MountS3Args] = None,
            source: Optional[str] = None,
            uri: Optional[str] = None,
            wasb: Optional[MountWasbArgs] = None) -> Mount
    func GetMount(ctx *Context, name string, id IDInput, state *MountState, opts ...ResourceOption) (*Mount, error)
    public static Mount Get(string name, Input<string> id, MountState? state, CustomResourceOptions? opts = null)
    public static Mount get(String name, Output<String> id, MountState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    Abfs MountAbfs
    Adl MountAdl
    ClusterId string
    EncryptionType string
    ExtraConfigs Dictionary<string, string>
    Gs MountGs
    Name string
    ResourceId string
    S3 MountS3
    Source string
    (String) HDFS-compatible url
    Uri string
    Wasb MountWasb
    Abfs MountAbfsArgs
    Adl MountAdlArgs
    ClusterId string
    EncryptionType string
    ExtraConfigs map[string]string
    Gs MountGsArgs
    Name string
    ResourceId string
    S3 MountS3Args
    Source string
    (String) HDFS-compatible url
    Uri string
    Wasb MountWasbArgs
    abfs MountAbfs
    adl MountAdl
    clusterId String
    encryptionType String
    extraConfigs Map<String,String>
    gs MountGs
    name String
    resourceId String
    s3 MountS3
    source String
    (String) HDFS-compatible url
    uri String
    wasb MountWasb
    abfs MountAbfs
    adl MountAdl
    clusterId string
    encryptionType string
    extraConfigs {[key: string]: string}
    gs MountGs
    name string
    resourceId string
    s3 MountS3
    source string
    (String) HDFS-compatible url
    uri string
    wasb MountWasb

    Supporting Types

    MountAbfs, MountAbfsArgs

    MountAdl, MountAdlArgs

    MountGs, MountGsArgs

    MountS3, MountS3Args

    MountWasb, MountWasbArgs

    Package Details

    Repository
    databricks pulumi/pulumi-databricks
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the databricks Terraform Provider.
    databricks logo
    Databricks v1.50.2 published on Tuesday, Sep 24, 2024 by Pulumi