Databricks v1.50.2 published on Tuesday, Sep 24, 2024 by Pulumi
databricks.Mount
Explore with Pulumi AI
Import
-> Note Importing this resource is not currently supported.
Create Mount Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Mount(name: string, args?: MountArgs, opts?: CustomResourceOptions);
@overload
def Mount(resource_name: str,
args: Optional[MountArgs] = None,
opts: Optional[ResourceOptions] = None)
@overload
def Mount(resource_name: str,
opts: Optional[ResourceOptions] = None,
abfs: Optional[MountAbfsArgs] = None,
adl: Optional[MountAdlArgs] = None,
cluster_id: Optional[str] = None,
encryption_type: Optional[str] = None,
extra_configs: Optional[Mapping[str, str]] = None,
gs: Optional[MountGsArgs] = None,
name: Optional[str] = None,
resource_id: Optional[str] = None,
s3: Optional[MountS3Args] = None,
uri: Optional[str] = None,
wasb: Optional[MountWasbArgs] = None)
func NewMount(ctx *Context, name string, args *MountArgs, opts ...ResourceOption) (*Mount, error)
public Mount(string name, MountArgs? args = null, CustomResourceOptions? opts = null)
type: databricks:Mount
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MountArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mountResource = new Databricks.Mount("mountResource", new()
{
Abfs = new Databricks.Inputs.MountAbfsArgs
{
ClientId = "string",
ClientSecretKey = "string",
ClientSecretScope = "string",
InitializeFileSystem = false,
ContainerName = "string",
Directory = "string",
StorageAccountName = "string",
TenantId = "string",
},
Adl = new Databricks.Inputs.MountAdlArgs
{
ClientId = "string",
ClientSecretKey = "string",
ClientSecretScope = "string",
Directory = "string",
SparkConfPrefix = "string",
StorageResourceName = "string",
TenantId = "string",
},
ClusterId = "string",
EncryptionType = "string",
ExtraConfigs =
{
{ "string", "string" },
},
Gs = new Databricks.Inputs.MountGsArgs
{
BucketName = "string",
ServiceAccount = "string",
},
Name = "string",
ResourceId = "string",
S3 = new Databricks.Inputs.MountS3Args
{
BucketName = "string",
InstanceProfile = "string",
},
Uri = "string",
Wasb = new Databricks.Inputs.MountWasbArgs
{
AuthType = "string",
TokenSecretKey = "string",
TokenSecretScope = "string",
ContainerName = "string",
Directory = "string",
StorageAccountName = "string",
},
});
example, err := databricks.NewMount(ctx, "mountResource", &databricks.MountArgs{
Abfs: &databricks.MountAbfsArgs{
ClientId: pulumi.String("string"),
ClientSecretKey: pulumi.String("string"),
ClientSecretScope: pulumi.String("string"),
InitializeFileSystem: pulumi.Bool(false),
ContainerName: pulumi.String("string"),
Directory: pulumi.String("string"),
StorageAccountName: pulumi.String("string"),
TenantId: pulumi.String("string"),
},
Adl: &databricks.MountAdlArgs{
ClientId: pulumi.String("string"),
ClientSecretKey: pulumi.String("string"),
ClientSecretScope: pulumi.String("string"),
Directory: pulumi.String("string"),
SparkConfPrefix: pulumi.String("string"),
StorageResourceName: pulumi.String("string"),
TenantId: pulumi.String("string"),
},
ClusterId: pulumi.String("string"),
EncryptionType: pulumi.String("string"),
ExtraConfigs: pulumi.StringMap{
"string": pulumi.String("string"),
},
Gs: &databricks.MountGsArgs{
BucketName: pulumi.String("string"),
ServiceAccount: pulumi.String("string"),
},
Name: pulumi.String("string"),
ResourceId: pulumi.String("string"),
S3: &databricks.MountS3Args{
BucketName: pulumi.String("string"),
InstanceProfile: pulumi.String("string"),
},
Uri: pulumi.String("string"),
Wasb: &databricks.MountWasbArgs{
AuthType: pulumi.String("string"),
TokenSecretKey: pulumi.String("string"),
TokenSecretScope: pulumi.String("string"),
ContainerName: pulumi.String("string"),
Directory: pulumi.String("string"),
StorageAccountName: pulumi.String("string"),
},
})
var mountResource = new Mount("mountResource", MountArgs.builder()
.abfs(MountAbfsArgs.builder()
.clientId("string")
.clientSecretKey("string")
.clientSecretScope("string")
.initializeFileSystem(false)
.containerName("string")
.directory("string")
.storageAccountName("string")
.tenantId("string")
.build())
.adl(MountAdlArgs.builder()
.clientId("string")
.clientSecretKey("string")
.clientSecretScope("string")
.directory("string")
.sparkConfPrefix("string")
.storageResourceName("string")
.tenantId("string")
.build())
.clusterId("string")
.encryptionType("string")
.extraConfigs(Map.of("string", "string"))
.gs(MountGsArgs.builder()
.bucketName("string")
.serviceAccount("string")
.build())
.name("string")
.resourceId("string")
.s3(MountS3Args.builder()
.bucketName("string")
.instanceProfile("string")
.build())
.uri("string")
.wasb(MountWasbArgs.builder()
.authType("string")
.tokenSecretKey("string")
.tokenSecretScope("string")
.containerName("string")
.directory("string")
.storageAccountName("string")
.build())
.build());
mount_resource = databricks.Mount("mountResource",
abfs=databricks.MountAbfsArgs(
client_id="string",
client_secret_key="string",
client_secret_scope="string",
initialize_file_system=False,
container_name="string",
directory="string",
storage_account_name="string",
tenant_id="string",
),
adl=databricks.MountAdlArgs(
client_id="string",
client_secret_key="string",
client_secret_scope="string",
directory="string",
spark_conf_prefix="string",
storage_resource_name="string",
tenant_id="string",
),
cluster_id="string",
encryption_type="string",
extra_configs={
"string": "string",
},
gs=databricks.MountGsArgs(
bucket_name="string",
service_account="string",
),
name="string",
resource_id="string",
s3=databricks.MountS3Args(
bucket_name="string",
instance_profile="string",
),
uri="string",
wasb=databricks.MountWasbArgs(
auth_type="string",
token_secret_key="string",
token_secret_scope="string",
container_name="string",
directory="string",
storage_account_name="string",
))
const mountResource = new databricks.Mount("mountResource", {
abfs: {
clientId: "string",
clientSecretKey: "string",
clientSecretScope: "string",
initializeFileSystem: false,
containerName: "string",
directory: "string",
storageAccountName: "string",
tenantId: "string",
},
adl: {
clientId: "string",
clientSecretKey: "string",
clientSecretScope: "string",
directory: "string",
sparkConfPrefix: "string",
storageResourceName: "string",
tenantId: "string",
},
clusterId: "string",
encryptionType: "string",
extraConfigs: {
string: "string",
},
gs: {
bucketName: "string",
serviceAccount: "string",
},
name: "string",
resourceId: "string",
s3: {
bucketName: "string",
instanceProfile: "string",
},
uri: "string",
wasb: {
authType: "string",
tokenSecretKey: "string",
tokenSecretScope: "string",
containerName: "string",
directory: "string",
storageAccountName: "string",
},
});
type: databricks:Mount
properties:
abfs:
clientId: string
clientSecretKey: string
clientSecretScope: string
containerName: string
directory: string
initializeFileSystem: false
storageAccountName: string
tenantId: string
adl:
clientId: string
clientSecretKey: string
clientSecretScope: string
directory: string
sparkConfPrefix: string
storageResourceName: string
tenantId: string
clusterId: string
encryptionType: string
extraConfigs:
string: string
gs:
bucketName: string
serviceAccount: string
name: string
resourceId: string
s3:
bucketName: string
instanceProfile: string
uri: string
wasb:
authType: string
containerName: string
directory: string
storageAccountName: string
tokenSecretKey: string
tokenSecretScope: string
Mount Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Mount resource accepts the following input properties:
- Abfs
Mount
Abfs Args - Adl
Mount
Adl Args - Cluster
Id string - Encryption
Type string - Extra
Configs map[string]string - Gs
Mount
Gs Args - Name string
- Resource
Id string - S3
Mount
S3Args - Uri string
- Wasb
Mount
Wasb Args
- abfs
Mount
Abfs Args - adl
Mount
Adl Args - cluster_
id str - encryption_
type str - extra_
configs Mapping[str, str] - gs
Mount
Gs Args - name str
- resource_
id str - s3
Mount
S3Args - uri str
- wasb
Mount
Wasb Args
- abfs Property Map
- adl Property Map
- cluster
Id String - encryption
Type String - extra
Configs Map<String> - gs Property Map
- name String
- resource
Id String - s3 Property Map
- uri String
- wasb Property Map
Outputs
All input properties are implicitly available as output properties. Additionally, the Mount resource produces the following output properties:
Look up Existing Mount Resource
Get an existing Mount resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MountState, opts?: CustomResourceOptions): Mount
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
abfs: Optional[MountAbfsArgs] = None,
adl: Optional[MountAdlArgs] = None,
cluster_id: Optional[str] = None,
encryption_type: Optional[str] = None,
extra_configs: Optional[Mapping[str, str]] = None,
gs: Optional[MountGsArgs] = None,
name: Optional[str] = None,
resource_id: Optional[str] = None,
s3: Optional[MountS3Args] = None,
source: Optional[str] = None,
uri: Optional[str] = None,
wasb: Optional[MountWasbArgs] = None) -> Mount
func GetMount(ctx *Context, name string, id IDInput, state *MountState, opts ...ResourceOption) (*Mount, error)
public static Mount Get(string name, Input<string> id, MountState? state, CustomResourceOptions? opts = null)
public static Mount get(String name, Output<String> id, MountState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Abfs
Mount
Abfs Args - Adl
Mount
Adl Args - Cluster
Id string - Encryption
Type string - Extra
Configs map[string]string - Gs
Mount
Gs Args - Name string
- Resource
Id string - S3
Mount
S3Args - Source string
- (String) HDFS-compatible url
- Uri string
- Wasb
Mount
Wasb Args
- abfs
Mount
Abfs Args - adl
Mount
Adl Args - cluster_
id str - encryption_
type str - extra_
configs Mapping[str, str] - gs
Mount
Gs Args - name str
- resource_
id str - s3
Mount
S3Args - source str
- (String) HDFS-compatible url
- uri str
- wasb
Mount
Wasb Args
- abfs Property Map
- adl Property Map
- cluster
Id String - encryption
Type String - extra
Configs Map<String> - gs Property Map
- name String
- resource
Id String - s3 Property Map
- source String
- (String) HDFS-compatible url
- uri String
- wasb Property Map
Supporting Types
MountAbfs, MountAbfsArgs
- Client
Id string - Client
Secret stringKey - Client
Secret stringScope - Initialize
File boolSystem - Container
Name string - Directory string
- Storage
Account stringName - Tenant
Id string
- Client
Id string - Client
Secret stringKey - Client
Secret stringScope - Initialize
File boolSystem - Container
Name string - Directory string
- Storage
Account stringName - Tenant
Id string
- client
Id String - client
Secret StringKey - client
Secret StringScope - initialize
File BooleanSystem - container
Name String - directory String
- storage
Account StringName - tenant
Id String
- client
Id string - client
Secret stringKey - client
Secret stringScope - initialize
File booleanSystem - container
Name string - directory string
- storage
Account stringName - tenant
Id string
- client_
id str - client_
secret_ strkey - client_
secret_ strscope - initialize_
file_ boolsystem - container_
name str - directory str
- storage_
account_ strname - tenant_
id str
- client
Id String - client
Secret StringKey - client
Secret StringScope - initialize
File BooleanSystem - container
Name String - directory String
- storage
Account StringName - tenant
Id String
MountAdl, MountAdlArgs
- Client
Id string - Client
Secret stringKey - Client
Secret stringScope - Directory string
- Spark
Conf stringPrefix - Storage
Resource stringName - Tenant
Id string
- Client
Id string - Client
Secret stringKey - Client
Secret stringScope - Directory string
- Spark
Conf stringPrefix - Storage
Resource stringName - Tenant
Id string
- client
Id String - client
Secret StringKey - client
Secret StringScope - directory String
- spark
Conf StringPrefix - storage
Resource StringName - tenant
Id String
- client
Id string - client
Secret stringKey - client
Secret stringScope - directory string
- spark
Conf stringPrefix - storage
Resource stringName - tenant
Id string
- client_
id str - client_
secret_ strkey - client_
secret_ strscope - directory str
- spark_
conf_ strprefix - storage_
resource_ strname - tenant_
id str
- client
Id String - client
Secret StringKey - client
Secret StringScope - directory String
- spark
Conf StringPrefix - storage
Resource StringName - tenant
Id String
MountGs, MountGsArgs
- Bucket
Name string - Service
Account string
- Bucket
Name string - Service
Account string
- bucket
Name String - service
Account String
- bucket
Name string - service
Account string
- bucket_
name str - service_
account str
- bucket
Name String - service
Account String
MountS3, MountS3Args
- Bucket
Name string - Instance
Profile string
- Bucket
Name string - Instance
Profile string
- bucket
Name String - instance
Profile String
- bucket
Name string - instance
Profile string
- bucket_
name str - instance_
profile str
- bucket
Name String - instance
Profile String
MountWasb, MountWasbArgs
- Auth
Type string - Token
Secret stringKey - Token
Secret stringScope - Container
Name string - Directory string
- Storage
Account stringName
- Auth
Type string - Token
Secret stringKey - Token
Secret stringScope - Container
Name string - Directory string
- Storage
Account stringName
- auth
Type String - token
Secret StringKey - token
Secret StringScope - container
Name String - directory String
- storage
Account StringName
- auth
Type string - token
Secret stringKey - token
Secret stringScope - container
Name string - directory string
- storage
Account stringName
- auth_
type str - token_
secret_ strkey - token_
secret_ strscope - container_
name str - directory str
- storage_
account_ strname
- auth
Type String - token
Secret StringKey - token
Secret StringScope - container
Name String - directory String
- storage
Account StringName
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
databricks
Terraform Provider.