aws.fsx.DataRepositoryAssociation
Explore with Pulumi AI
Manages a FSx for Lustre Data Repository Association. See Linking your file system to an S3 bucket for more information.
NOTE: Data Repository Associations are only compatible with AWS FSx for Lustre File Systems and
PERSISTENT_2
deployment type.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.s3.BucketV2("example", {bucket: "my-bucket"});
const exampleBucketAclV2 = new aws.s3.BucketAclV2("example", {
bucket: example.id,
acl: "private",
});
const exampleLustreFileSystem = new aws.fsx.LustreFileSystem("example", {
storageCapacity: 1200,
subnetIds: exampleAwsSubnet.id,
deploymentType: "PERSISTENT_2",
perUnitStorageThroughput: 125,
});
const exampleDataRepositoryAssociation = new aws.fsx.DataRepositoryAssociation("example", {
fileSystemId: exampleLustreFileSystem.id,
dataRepositoryPath: pulumi.interpolate`s3://${example.id}`,
fileSystemPath: "/my-bucket",
s3: {
autoExportPolicy: {
events: [
"NEW",
"CHANGED",
"DELETED",
],
},
autoImportPolicy: {
events: [
"NEW",
"CHANGED",
"DELETED",
],
},
},
});
import pulumi
import pulumi_aws as aws
example = aws.s3.BucketV2("example", bucket="my-bucket")
example_bucket_acl_v2 = aws.s3.BucketAclV2("example",
bucket=example.id,
acl="private")
example_lustre_file_system = aws.fsx.LustreFileSystem("example",
storage_capacity=1200,
subnet_ids=example_aws_subnet["id"],
deployment_type="PERSISTENT_2",
per_unit_storage_throughput=125)
example_data_repository_association = aws.fsx.DataRepositoryAssociation("example",
file_system_id=example_lustre_file_system.id,
data_repository_path=example.id.apply(lambda id: f"s3://{id}"),
file_system_path="/my-bucket",
s3={
"auto_export_policy": {
"events": [
"NEW",
"CHANGED",
"DELETED",
],
},
"auto_import_policy": {
"events": [
"NEW",
"CHANGED",
"DELETED",
],
},
})
package main
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/fsx"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
example, err := s3.NewBucketV2(ctx, "example", &s3.BucketV2Args{
Bucket: pulumi.String("my-bucket"),
})
if err != nil {
return err
}
_, err = s3.NewBucketAclV2(ctx, "example", &s3.BucketAclV2Args{
Bucket: example.ID(),
Acl: pulumi.String("private"),
})
if err != nil {
return err
}
exampleLustreFileSystem, err := fsx.NewLustreFileSystem(ctx, "example", &fsx.LustreFileSystemArgs{
StorageCapacity: pulumi.Int(1200),
SubnetIds: pulumi.Any(exampleAwsSubnet.Id),
DeploymentType: pulumi.String("PERSISTENT_2"),
PerUnitStorageThroughput: pulumi.Int(125),
})
if err != nil {
return err
}
_, err = fsx.NewDataRepositoryAssociation(ctx, "example", &fsx.DataRepositoryAssociationArgs{
FileSystemId: exampleLustreFileSystem.ID(),
DataRepositoryPath: example.ID().ApplyT(func(id string) (string, error) {
return fmt.Sprintf("s3://%v", id), nil
}).(pulumi.StringOutput),
FileSystemPath: pulumi.String("/my-bucket"),
S3: &fsx.DataRepositoryAssociationS3Args{
AutoExportPolicy: &fsx.DataRepositoryAssociationS3AutoExportPolicyArgs{
Events: pulumi.StringArray{
pulumi.String("NEW"),
pulumi.String("CHANGED"),
pulumi.String("DELETED"),
},
},
AutoImportPolicy: &fsx.DataRepositoryAssociationS3AutoImportPolicyArgs{
Events: pulumi.StringArray{
pulumi.String("NEW"),
pulumi.String("CHANGED"),
pulumi.String("DELETED"),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.S3.BucketV2("example", new()
{
Bucket = "my-bucket",
});
var exampleBucketAclV2 = new Aws.S3.BucketAclV2("example", new()
{
Bucket = example.Id,
Acl = "private",
});
var exampleLustreFileSystem = new Aws.Fsx.LustreFileSystem("example", new()
{
StorageCapacity = 1200,
SubnetIds = exampleAwsSubnet.Id,
DeploymentType = "PERSISTENT_2",
PerUnitStorageThroughput = 125,
});
var exampleDataRepositoryAssociation = new Aws.Fsx.DataRepositoryAssociation("example", new()
{
FileSystemId = exampleLustreFileSystem.Id,
DataRepositoryPath = example.Id.Apply(id => $"s3://{id}"),
FileSystemPath = "/my-bucket",
S3 = new Aws.Fsx.Inputs.DataRepositoryAssociationS3Args
{
AutoExportPolicy = new Aws.Fsx.Inputs.DataRepositoryAssociationS3AutoExportPolicyArgs
{
Events = new[]
{
"NEW",
"CHANGED",
"DELETED",
},
},
AutoImportPolicy = new Aws.Fsx.Inputs.DataRepositoryAssociationS3AutoImportPolicyArgs
{
Events = new[]
{
"NEW",
"CHANGED",
"DELETED",
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.s3.BucketV2;
import com.pulumi.aws.s3.BucketV2Args;
import com.pulumi.aws.s3.BucketAclV2;
import com.pulumi.aws.s3.BucketAclV2Args;
import com.pulumi.aws.fsx.LustreFileSystem;
import com.pulumi.aws.fsx.LustreFileSystemArgs;
import com.pulumi.aws.fsx.DataRepositoryAssociation;
import com.pulumi.aws.fsx.DataRepositoryAssociationArgs;
import com.pulumi.aws.fsx.inputs.DataRepositoryAssociationS3Args;
import com.pulumi.aws.fsx.inputs.DataRepositoryAssociationS3AutoExportPolicyArgs;
import com.pulumi.aws.fsx.inputs.DataRepositoryAssociationS3AutoImportPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new BucketV2("example", BucketV2Args.builder()
.bucket("my-bucket")
.build());
var exampleBucketAclV2 = new BucketAclV2("exampleBucketAclV2", BucketAclV2Args.builder()
.bucket(example.id())
.acl("private")
.build());
var exampleLustreFileSystem = new LustreFileSystem("exampleLustreFileSystem", LustreFileSystemArgs.builder()
.storageCapacity(1200)
.subnetIds(exampleAwsSubnet.id())
.deploymentType("PERSISTENT_2")
.perUnitStorageThroughput(125)
.build());
var exampleDataRepositoryAssociation = new DataRepositoryAssociation("exampleDataRepositoryAssociation", DataRepositoryAssociationArgs.builder()
.fileSystemId(exampleLustreFileSystem.id())
.dataRepositoryPath(example.id().applyValue(id -> String.format("s3://%s", id)))
.fileSystemPath("/my-bucket")
.s3(DataRepositoryAssociationS3Args.builder()
.autoExportPolicy(DataRepositoryAssociationS3AutoExportPolicyArgs.builder()
.events(
"NEW",
"CHANGED",
"DELETED")
.build())
.autoImportPolicy(DataRepositoryAssociationS3AutoImportPolicyArgs.builder()
.events(
"NEW",
"CHANGED",
"DELETED")
.build())
.build())
.build());
}
}
resources:
example:
type: aws:s3:BucketV2
properties:
bucket: my-bucket
exampleBucketAclV2:
type: aws:s3:BucketAclV2
name: example
properties:
bucket: ${example.id}
acl: private
exampleLustreFileSystem:
type: aws:fsx:LustreFileSystem
name: example
properties:
storageCapacity: 1200
subnetIds: ${exampleAwsSubnet.id}
deploymentType: PERSISTENT_2
perUnitStorageThroughput: 125
exampleDataRepositoryAssociation:
type: aws:fsx:DataRepositoryAssociation
name: example
properties:
fileSystemId: ${exampleLustreFileSystem.id}
dataRepositoryPath: s3://${example.id}
fileSystemPath: /my-bucket
s3:
autoExportPolicy:
events:
- NEW
- CHANGED
- DELETED
autoImportPolicy:
events:
- NEW
- CHANGED
- DELETED
Create DataRepositoryAssociation Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DataRepositoryAssociation(name: string, args: DataRepositoryAssociationArgs, opts?: CustomResourceOptions);
@overload
def DataRepositoryAssociation(resource_name: str,
args: DataRepositoryAssociationArgs,
opts: Optional[ResourceOptions] = None)
@overload
def DataRepositoryAssociation(resource_name: str,
opts: Optional[ResourceOptions] = None,
data_repository_path: Optional[str] = None,
file_system_id: Optional[str] = None,
file_system_path: Optional[str] = None,
batch_import_meta_data_on_create: Optional[bool] = None,
delete_data_in_filesystem: Optional[bool] = None,
imported_file_chunk_size: Optional[int] = None,
s3: Optional[DataRepositoryAssociationS3Args] = None,
tags: Optional[Mapping[str, str]] = None)
func NewDataRepositoryAssociation(ctx *Context, name string, args DataRepositoryAssociationArgs, opts ...ResourceOption) (*DataRepositoryAssociation, error)
public DataRepositoryAssociation(string name, DataRepositoryAssociationArgs args, CustomResourceOptions? opts = null)
public DataRepositoryAssociation(String name, DataRepositoryAssociationArgs args)
public DataRepositoryAssociation(String name, DataRepositoryAssociationArgs args, CustomResourceOptions options)
type: aws:fsx:DataRepositoryAssociation
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DataRepositoryAssociationArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DataRepositoryAssociationArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DataRepositoryAssociationArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DataRepositoryAssociationArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DataRepositoryAssociationArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var dataRepositoryAssociationResource = new Aws.Fsx.DataRepositoryAssociation("dataRepositoryAssociationResource", new()
{
DataRepositoryPath = "string",
FileSystemId = "string",
FileSystemPath = "string",
BatchImportMetaDataOnCreate = false,
DeleteDataInFilesystem = false,
ImportedFileChunkSize = 0,
S3 = new Aws.Fsx.Inputs.DataRepositoryAssociationS3Args
{
AutoExportPolicy = new Aws.Fsx.Inputs.DataRepositoryAssociationS3AutoExportPolicyArgs
{
Events = new[]
{
"string",
},
},
AutoImportPolicy = new Aws.Fsx.Inputs.DataRepositoryAssociationS3AutoImportPolicyArgs
{
Events = new[]
{
"string",
},
},
},
Tags =
{
{ "string", "string" },
},
});
example, err := fsx.NewDataRepositoryAssociation(ctx, "dataRepositoryAssociationResource", &fsx.DataRepositoryAssociationArgs{
DataRepositoryPath: pulumi.String("string"),
FileSystemId: pulumi.String("string"),
FileSystemPath: pulumi.String("string"),
BatchImportMetaDataOnCreate: pulumi.Bool(false),
DeleteDataInFilesystem: pulumi.Bool(false),
ImportedFileChunkSize: pulumi.Int(0),
S3: &fsx.DataRepositoryAssociationS3Args{
AutoExportPolicy: &fsx.DataRepositoryAssociationS3AutoExportPolicyArgs{
Events: pulumi.StringArray{
pulumi.String("string"),
},
},
AutoImportPolicy: &fsx.DataRepositoryAssociationS3AutoImportPolicyArgs{
Events: pulumi.StringArray{
pulumi.String("string"),
},
},
},
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
})
var dataRepositoryAssociationResource = new DataRepositoryAssociation("dataRepositoryAssociationResource", DataRepositoryAssociationArgs.builder()
.dataRepositoryPath("string")
.fileSystemId("string")
.fileSystemPath("string")
.batchImportMetaDataOnCreate(false)
.deleteDataInFilesystem(false)
.importedFileChunkSize(0)
.s3(DataRepositoryAssociationS3Args.builder()
.autoExportPolicy(DataRepositoryAssociationS3AutoExportPolicyArgs.builder()
.events("string")
.build())
.autoImportPolicy(DataRepositoryAssociationS3AutoImportPolicyArgs.builder()
.events("string")
.build())
.build())
.tags(Map.of("string", "string"))
.build());
data_repository_association_resource = aws.fsx.DataRepositoryAssociation("dataRepositoryAssociationResource",
data_repository_path="string",
file_system_id="string",
file_system_path="string",
batch_import_meta_data_on_create=False,
delete_data_in_filesystem=False,
imported_file_chunk_size=0,
s3={
"autoExportPolicy": {
"events": ["string"],
},
"autoImportPolicy": {
"events": ["string"],
},
},
tags={
"string": "string",
})
const dataRepositoryAssociationResource = new aws.fsx.DataRepositoryAssociation("dataRepositoryAssociationResource", {
dataRepositoryPath: "string",
fileSystemId: "string",
fileSystemPath: "string",
batchImportMetaDataOnCreate: false,
deleteDataInFilesystem: false,
importedFileChunkSize: 0,
s3: {
autoExportPolicy: {
events: ["string"],
},
autoImportPolicy: {
events: ["string"],
},
},
tags: {
string: "string",
},
});
type: aws:fsx:DataRepositoryAssociation
properties:
batchImportMetaDataOnCreate: false
dataRepositoryPath: string
deleteDataInFilesystem: false
fileSystemId: string
fileSystemPath: string
importedFileChunkSize: 0
s3:
autoExportPolicy:
events:
- string
autoImportPolicy:
events:
- string
tags:
string: string
DataRepositoryAssociation Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The DataRepositoryAssociation resource accepts the following input properties:
- Data
Repository stringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- File
System stringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- File
System stringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - Batch
Import boolMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - Delete
Data boolIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - Imported
File intChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- S3
Pulumi.
Aws. Fsx. Inputs. Data Repository Association S3 - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Dictionary<string, string>
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.
- Data
Repository stringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- File
System stringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- File
System stringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - Batch
Import boolMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - Delete
Data boolIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - Imported
File intChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- S3
Data
Repository Association S3Args - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - map[string]string
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.
- data
Repository StringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- file
System StringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- file
System StringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - batch
Import BooleanMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - delete
Data BooleanIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - imported
File IntegerChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3
Data
Repository Association S3 - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Map<String,String>
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.
- data
Repository stringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- file
System stringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- file
System stringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - batch
Import booleanMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - delete
Data booleanIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - imported
File numberChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3
Data
Repository Association S3 - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - {[key: string]: string}
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.
- data_
repository_ strpath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- file_
system_ strid - The ID of the Amazon FSx file system to on which to create a data repository association.
- file_
system_ strpath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - batch_
import_ boolmeta_ data_ on_ create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - delete_
data_ boolin_ filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - imported_
file_ intchunk_ size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3
Data
Repository Association S3Args - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Mapping[str, str]
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.
- data
Repository StringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- file
System StringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- file
System StringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - batch
Import BooleanMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - delete
Data BooleanIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - imported
File NumberChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3 Property Map
- See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Map<String>
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Outputs
All input properties are implicitly available as output properties. Additionally, the DataRepositoryAssociation resource produces the following output properties:
- Arn string
- Amazon Resource Name of the file system.
- Association
Id string - Id string
- The provider-assigned unique ID for this managed resource.
- Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- Arn string
- Amazon Resource Name of the file system.
- Association
Id string - Id string
- The provider-assigned unique ID for this managed resource.
- map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn String
- Amazon Resource Name of the file system.
- association
Id String - id String
- The provider-assigned unique ID for this managed resource.
- Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn string
- Amazon Resource Name of the file system.
- association
Id string - id string
- The provider-assigned unique ID for this managed resource.
- {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn str
- Amazon Resource Name of the file system.
- association_
id str - id str
- The provider-assigned unique ID for this managed resource.
- Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn String
- Amazon Resource Name of the file system.
- association
Id String - id String
- The provider-assigned unique ID for this managed resource.
- Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
Look up Existing DataRepositoryAssociation Resource
Get an existing DataRepositoryAssociation resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DataRepositoryAssociationState, opts?: CustomResourceOptions): DataRepositoryAssociation
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
arn: Optional[str] = None,
association_id: Optional[str] = None,
batch_import_meta_data_on_create: Optional[bool] = None,
data_repository_path: Optional[str] = None,
delete_data_in_filesystem: Optional[bool] = None,
file_system_id: Optional[str] = None,
file_system_path: Optional[str] = None,
imported_file_chunk_size: Optional[int] = None,
s3: Optional[DataRepositoryAssociationS3Args] = None,
tags: Optional[Mapping[str, str]] = None,
tags_all: Optional[Mapping[str, str]] = None) -> DataRepositoryAssociation
func GetDataRepositoryAssociation(ctx *Context, name string, id IDInput, state *DataRepositoryAssociationState, opts ...ResourceOption) (*DataRepositoryAssociation, error)
public static DataRepositoryAssociation Get(string name, Input<string> id, DataRepositoryAssociationState? state, CustomResourceOptions? opts = null)
public static DataRepositoryAssociation get(String name, Output<String> id, DataRepositoryAssociationState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arn string
- Amazon Resource Name of the file system.
- Association
Id string - Batch
Import boolMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - Data
Repository stringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- Delete
Data boolIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - File
System stringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- File
System stringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - Imported
File intChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- S3
Pulumi.
Aws. Fsx. Inputs. Data Repository Association S3 - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Dictionary<string, string>
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Dictionary<string, string>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- Arn string
- Amazon Resource Name of the file system.
- Association
Id string - Batch
Import boolMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - Data
Repository stringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- Delete
Data boolIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - File
System stringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- File
System stringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - Imported
File intChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- S3
Data
Repository Association S3Args - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - map[string]string
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - map[string]string
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn String
- Amazon Resource Name of the file system.
- association
Id String - batch
Import BooleanMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - data
Repository StringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- delete
Data BooleanIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - file
System StringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- file
System StringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - imported
File IntegerChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3
Data
Repository Association S3 - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Map<String,String>
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String,String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn string
- Amazon Resource Name of the file system.
- association
Id string - batch
Import booleanMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - data
Repository stringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- delete
Data booleanIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - file
System stringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- file
System stringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - imported
File numberChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3
Data
Repository Association S3 - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - {[key: string]: string}
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - {[key: string]: string}
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn str
- Amazon Resource Name of the file system.
- association_
id str - batch_
import_ boolmeta_ data_ on_ create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - data_
repository_ strpath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- delete_
data_ boolin_ filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - file_
system_ strid - The ID of the Amazon FSx file system to on which to create a data repository association.
- file_
system_ strpath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - imported_
file_ intchunk_ size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3
Data
Repository Association S3Args - See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Mapping[str, str]
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Mapping[str, str]
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
- arn String
- Amazon Resource Name of the file system.
- association
Id String - batch
Import BooleanMeta Data On Create - Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to
false
. - data
Repository StringPath - The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
- delete
Data BooleanIn Filesystem - Set to true to delete files from the file system upon deleting this data repository association. Defaults to
false
. - file
System StringId - The ID of the Amazon FSx file system to on which to create a data repository association.
- file
System StringPath - A path on the file system that points to a high-level directory (such as
/ns1/
) or subdirectory (such as/ns1/subdir/
) that will be mapped 1-1 withdata_repository_path
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path/ns1/
, then you cannot link another data repository with file system path/ns1/ns2
. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. - imported
File NumberChunk Size - For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
- s3 Property Map
- See the
s3
configuration block. Max of 1. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. - Map<String>
- A map of tags to assign to the data repository association. If configured with a provider
default_tags
configuration block present, tags with matching keys will overwrite those defined at the provider-level. - Map<String>
- A map of tags assigned to the resource, including those inherited from the provider
default_tags
configuration block.
Supporting Types
DataRepositoryAssociationS3, DataRepositoryAssociationS3Args
- Auto
Export Pulumi.Policy Aws. Fsx. Inputs. Data Repository Association S3Auto Export Policy - Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the
events
configuration block. - Auto
Import Pulumi.Policy Aws. Fsx. Inputs. Data Repository Association S3Auto Import Policy - Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the
events
configuration block.
- Auto
Export DataPolicy Repository Association S3Auto Export Policy - Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the
events
configuration block. - Auto
Import DataPolicy Repository Association S3Auto Import Policy - Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the
events
configuration block.
- auto
Export DataPolicy Repository Association S3Auto Export Policy - Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the
events
configuration block. - auto
Import DataPolicy Repository Association S3Auto Import Policy - Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the
events
configuration block.
- auto
Export DataPolicy Repository Association S3Auto Export Policy - Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the
events
configuration block. - auto
Import DataPolicy Repository Association S3Auto Import Policy - Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the
events
configuration block.
- auto_
export_ Datapolicy Repository Association S3Auto Export Policy - Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the
events
configuration block. - auto_
import_ Datapolicy Repository Association S3Auto Import Policy - Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the
events
configuration block.
- auto
Export Property MapPolicy - Specifies the type of updated objects that will be automatically exported from your file system to the linked S3 bucket. See the
events
configuration block. - auto
Import Property MapPolicy - Specifies the type of updated objects that will be automatically imported from the linked S3 bucket to your file system. See the
events
configuration block.
DataRepositoryAssociationS3AutoExportPolicy, DataRepositoryAssociationS3AutoExportPolicyArgs
- Events List<string>
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- Events []string
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events List<String>
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events string[]
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events Sequence[str]
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events List<String>
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
DataRepositoryAssociationS3AutoImportPolicy, DataRepositoryAssociationS3AutoImportPolicyArgs
- Events List<string>
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- Events []string
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events List<String>
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events string[]
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events Sequence[str]
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
- events List<String>
- A list of file event types to automatically export to your linked S3 bucket or import from the linked S3 bucket. Valid values are
NEW
,CHANGED
,DELETED
. Max of 3.
Import
Using pulumi import
, import FSx Data Repository Associations using the id
. For example:
$ pulumi import aws:fsx/dataRepositoryAssociation:DataRepositoryAssociation example dra-0b1cfaeca11088b10
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.