MongoDB Atlas v3.18.0 published on Thursday, Sep 12, 2024 by Pulumi
mongodbatlas.getDataLakePipeline
Explore with Pulumi AI
# Data Source: mongodbatlas.DataLakePipeline
mongodbatlas.DataLakePipeline
describes a Data Lake Pipeline.
NOTE: Groups and projects are synonymous terms. You may find
group_id
in the official documentation.
Example Usage
S
Coming soon!
Coming soon!
Coming soon!
Coming soon!
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.mongodbatlas.Project;
import com.pulumi.mongodbatlas.ProjectArgs;
import com.pulumi.mongodbatlas.AdvancedCluster;
import com.pulumi.mongodbatlas.AdvancedClusterArgs;
import com.pulumi.mongodbatlas.inputs.AdvancedClusterReplicationSpecArgs;
import com.pulumi.mongodbatlas.DataLakePipeline;
import com.pulumi.mongodbatlas.DataLakePipelineArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineSinkArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineSourceArgs;
import com.pulumi.mongodbatlas.inputs.DataLakePipelineTransformationArgs;
import com.pulumi.mongodbatlas.MongodbatlasFunctions;
import com.pulumi.mongodbatlas.inputs.GetDataLakePipelineArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var projectTest = new Project("projectTest", ProjectArgs.builder()
.name("NAME OF THE PROJECT")
.orgId("ORGANIZATION ID")
.build());
var automatedBackupTest = new AdvancedCluster("automatedBackupTest", AdvancedClusterArgs.builder()
.projectId(projectId)
.name("automated-backup-test")
.clusterType("REPLICASET")
.backupEnabled(true)
.replicationSpecs(AdvancedClusterReplicationSpecArgs.builder()
.regionConfigs(AdvancedClusterReplicationSpecRegionConfigArgs.builder()
.priority(7)
.providerName("GCP")
.regionName("US_EAST_4")
.electableSpecs(AdvancedClusterReplicationSpecRegionConfigElectableSpecsArgs.builder()
.instanceSize("M10")
.nodeCount(3)
.build())
.build())
.build())
.build());
var pipeline = new DataLakePipeline("pipeline", DataLakePipelineArgs.builder()
.projectId(projectTest.projectId())
.name("DataLakePipelineName")
.sink(DataLakePipelineSinkArgs.builder()
.type("DLS")
.partitionFields(DataLakePipelineSinkPartitionFieldArgs.builder()
.name("access")
.order(0)
.build())
.build())
.source(DataLakePipelineSourceArgs.builder()
.type("ON_DEMAND_CPS")
.clusterName(automatedBackupTest.name())
.databaseName("sample_airbnb")
.collectionName("listingsAndReviews")
.build())
.transformations(
DataLakePipelineTransformationArgs.builder()
.field("test")
.type("EXCLUDE")
.build(),
DataLakePipelineTransformationArgs.builder()
.field("test22")
.type("EXCLUDE")
.build())
.build());
final var pipelineDataSource = MongodbatlasFunctions.getDataLakePipeline(GetDataLakePipelineArgs.builder()
.projectId(pipeline.projectId())
.name(pipeline.name())
.build());
}
}
resources:
projectTest:
type: mongodbatlas:Project
properties:
name: NAME OF THE PROJECT
orgId: ORGANIZATION ID
automatedBackupTest:
type: mongodbatlas:AdvancedCluster
name: automated_backup_test
properties:
projectId: ${projectId}
name: automated-backup-test
clusterType: REPLICASET
backupEnabled: true # enable cloud backup snapshots
replicationSpecs:
- regionConfigs:
- priority: 7
providerName: GCP
regionName: US_EAST_4
electableSpecs:
instanceSize: M10
nodeCount: 3
pipeline:
type: mongodbatlas:DataLakePipeline
properties:
projectId: ${projectTest.projectId}
name: DataLakePipelineName
sink:
type: DLS
partitionFields:
- name: access
order: 0
source:
type: ON_DEMAND_CPS
clusterName: ${automatedBackupTest.name}
databaseName: sample_airbnb
collectionName: listingsAndReviews
transformations:
- field: test
type: EXCLUDE
- field: test22
type: EXCLUDE
variables:
pipelineDataSource:
fn::invoke:
Function: mongodbatlas:getDataLakePipeline
Arguments:
projectId: ${pipeline.projectId}
name: ${pipeline.name}
Using getDataLakePipeline
Two invocation forms are available. The direct form accepts plain arguments and either blocks until the result value is available, or returns a Promise-wrapped result. The output form accepts Input-wrapped arguments and returns an Output-wrapped result.
function getDataLakePipeline(args: GetDataLakePipelineArgs, opts?: InvokeOptions): Promise<GetDataLakePipelineResult>
function getDataLakePipelineOutput(args: GetDataLakePipelineOutputArgs, opts?: InvokeOptions): Output<GetDataLakePipelineResult>
def get_data_lake_pipeline(name: Optional[str] = None,
project_id: Optional[str] = None,
opts: Optional[InvokeOptions] = None) -> GetDataLakePipelineResult
def get_data_lake_pipeline_output(name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
opts: Optional[InvokeOptions] = None) -> Output[GetDataLakePipelineResult]
func LookupDataLakePipeline(ctx *Context, args *LookupDataLakePipelineArgs, opts ...InvokeOption) (*LookupDataLakePipelineResult, error)
func LookupDataLakePipelineOutput(ctx *Context, args *LookupDataLakePipelineOutputArgs, opts ...InvokeOption) LookupDataLakePipelineResultOutput
> Note: This function is named LookupDataLakePipeline
in the Go SDK.
public static class GetDataLakePipeline
{
public static Task<GetDataLakePipelineResult> InvokeAsync(GetDataLakePipelineArgs args, InvokeOptions? opts = null)
public static Output<GetDataLakePipelineResult> Invoke(GetDataLakePipelineInvokeArgs args, InvokeOptions? opts = null)
}
public static CompletableFuture<GetDataLakePipelineResult> getDataLakePipeline(GetDataLakePipelineArgs args, InvokeOptions options)
// Output-based functions aren't available in Java yet
fn::invoke:
function: mongodbatlas:index/getDataLakePipeline:getDataLakePipeline
arguments:
# arguments dictionary
The following arguments are supported:
- name str
- Name of the Atlas Data Lake Pipeline.
- project_
id str - The unique ID for the project to create a Data Lake Pipeline.
getDataLakePipeline Result
The following output properties are available:
- Created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Ingestion
Schedules List<GetData Lake Pipeline Ingestion Schedule> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- Last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Name string
- Project
Id string - Unique 24-hexadecimal character string that identifies the project.
policyItemId
- Unique 24-hexadecimal character string that identifies a policy item.
- Sinks
List<Get
Data Lake Pipeline Sink> - Snapshots
List<Get
Data Lake Pipeline Snapshot> - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- Sources
List<Get
Data Lake Pipeline Source> - State string
- State of this Data Lake Pipeline.
- Transformations
List<Get
Data Lake Pipeline Transformation> - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- Created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Ingestion
Schedules []GetData Lake Pipeline Ingestion Schedule - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- Last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- Name string
- Project
Id string - Unique 24-hexadecimal character string that identifies the project.
policyItemId
- Unique 24-hexadecimal character string that identifies a policy item.
- Sinks
[]Get
Data Lake Pipeline Sink - Snapshots
[]Get
Data Lake Pipeline Snapshot - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- Sources
[]Get
Data Lake Pipeline Source - State string
- State of this Data Lake Pipeline.
- Transformations
[]Get
Data Lake Pipeline Transformation - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created
Date String - Timestamp that indicates when the Data Lake Pipeline was created.
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestion
Schedules List<GetData Lake Pipeline Ingestion Schedule> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated StringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name String
- project
Id String - Unique 24-hexadecimal character string that identifies the project.
policyItemId
- Unique 24-hexadecimal character string that identifies a policy item.
- sinks
List<Get
Data Lake Pipeline Sink> - snapshots
List<Get
Data Lake Pipeline Snapshot> - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- sources
List<Get
Data Lake Pipeline Source> - state String
- State of this Data Lake Pipeline.
- transformations
List<Get
Data Lake Pipeline Transformation> - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created
Date string - Timestamp that indicates when the Data Lake Pipeline was created.
- id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestion
Schedules GetData Lake Pipeline Ingestion Schedule[] - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated stringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name string
- project
Id string - Unique 24-hexadecimal character string that identifies the project.
policyItemId
- Unique 24-hexadecimal character string that identifies a policy item.
- sinks
Get
Data Lake Pipeline Sink[] - snapshots
Get
Data Lake Pipeline Snapshot[] - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- sources
Get
Data Lake Pipeline Source[] - state string
- State of this Data Lake Pipeline.
- transformations
Get
Data Lake Pipeline Transformation[] - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created_
date str - Timestamp that indicates when the Data Lake Pipeline was created.
- id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestion_
schedules Sequence[GetData Lake Pipeline Ingestion Schedule] - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last_
updated_ strdate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name str
- project_
id str - Unique 24-hexadecimal character string that identifies the project.
policyItemId
- Unique 24-hexadecimal character string that identifies a policy item.
- sinks
Sequence[Get
Data Lake Pipeline Sink] - snapshots
Sequence[Get
Data Lake Pipeline Snapshot] - List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- sources
Sequence[Get
Data Lake Pipeline Source] - state str
- State of this Data Lake Pipeline.
- transformations
Sequence[Get
Data Lake Pipeline Transformation] - Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
- created
Date String - Timestamp that indicates when the Data Lake Pipeline was created.
- id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- ingestion
Schedules List<Property Map> - List of backup schedule policy items that you can use as a Data Lake Pipeline source.
ingestion_schedules.#.id
- Unique 24-hexadecimal digit string that identifies this backup policy item.ingestion_schedules.#.frequency_type
- Human-readable label that identifies the frequency type associated with the backup policy.ingestion_schedules.#.frequency_interval
- Number that indicates the frequency interval for a set of snapshots.ingestion_schedules.#.retention_unit
- Unit of time in which MongoDB Atlas measures snapshot retention.ingestion_schedules.#.retention_value
- Duration in days, weeks, or months that MongoDB Atlas retains the snapshot.
- last
Updated StringDate - Timestamp that indicates the last time that the Data Lake Pipeline was updated.
- name String
- project
Id String - Unique 24-hexadecimal character string that identifies the project.
policyItemId
- Unique 24-hexadecimal character string that identifies a policy item.
- sinks List<Property Map>
- snapshots List<Property Map>
- List of backup snapshots that you can use to trigger an on demand pipeline run.
snapshots.#.id
- Unique 24-hexadecimal digit string that identifies the snapshot.snapshots.#.provider
- Human-readable label that identifies the cloud provider that stores this snapshot.snapshots.#.created_at
- Date and time when MongoDB Atlas took the snapshot.snapshots.#.expires_at
- Date and time when MongoDB Atlas deletes the snapshot.snapshots.#.frequency_type
- Human-readable label that identifies how often this snapshot triggers.snapshots.#.master_key
- Unique string that identifies the Amazon Web Services (AWS) Key Management Service (KMS) Customer Master Key (CMK) used to encrypt the snapshot.snapshots.#.mongod_version
- Version of the MongoDB host that this snapshot backs up.snapshots.#.replica_set_name
- Human-readable label that identifies the replica set from which MongoDB Atlas took this snapshot.snapshots.#.type
- Human-readable label that categorizes the cluster as a replica set or sharded cluster.snapshots.#.snapshot_type
- Human-readable label that identifies when this snapshot triggers.snapshots.#.status
- Human-readable label that indicates the stage of the backup process for this snapshot.snapshots.#.size
- List of backup snapshots that you can use to trigger an on demand pipeline run.snapshots.#.copy_region
- List that identifies the regions to which MongoDB Atlas copies the snapshot.snapshots.#.policies
- List that contains unique identifiers for the policy items.
- sources List<Property Map>
- state String
- State of this Data Lake Pipeline.
- transformations List<Property Map>
- Fields to be excluded for this Data Lake Pipeline.
transformations.#.field
- Key in the document.transformations.#.type
- Type of transformation applied during the export of the namespace in a Data Lake Pipeline.
Supporting Types
GetDataLakePipelineIngestionSchedule
- Frequency
Interval int - Frequency
Type string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Retention
Unit string - Retention
Value int
- Frequency
Interval int - Frequency
Type string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Retention
Unit string - Retention
Value int
- frequency
Interval Integer - frequency
Type String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention
Unit String - retention
Value Integer
- frequency
Interval number - frequency
Type string - id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention
Unit string - retention
Value number
- frequency_
interval int - frequency_
type str - id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention_
unit str - retention_
value int
- frequency
Interval Number - frequency
Type String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- retention
Unit String - retention
Value Number
GetDataLakePipelineSink
- Partition
Fields List<GetData Lake Pipeline Sink Partition Field> - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- Partition
Fields []GetData Lake Pipeline Sink Partition Field - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- partition
Fields List<GetData Lake Pipeline Sink Partition Field> - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider String
- Target cloud provider for this Data Lake Pipeline.
- region String
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type String
- Type of ingestion source of this Data Lake Pipeline.
- partition
Fields GetData Lake Pipeline Sink Partition Field[] - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider string
- Target cloud provider for this Data Lake Pipeline.
- region string
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type string
- Type of ingestion source of this Data Lake Pipeline.
- partition_
fields Sequence[GetData Lake Pipeline Sink Partition Field] - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider str
- Target cloud provider for this Data Lake Pipeline.
- region str
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type str
- Type of ingestion source of this Data Lake Pipeline.
- partition
Fields List<Property Map> - Ordered fields used to physically organize data in the destination.
partition_fields.#.field_name
- Human-readable label that identifies the field name used to partition data.partition_fields.#.order
- Sequence in which MongoDB Atlas slices the collection data to create partitions. The resource expresses this sequence starting with zero.
- provider String
- Target cloud provider for this Data Lake Pipeline.
- region String
- Target cloud provider region for this Data Lake Pipeline. Supported cloud provider regions.
- type String
- Type of ingestion source of this Data Lake Pipeline.
GetDataLakePipelineSinkPartitionField
- field_
name str - order int
GetDataLakePipelineSnapshot
- Copy
Region string - Created
At string - Expires
At string - Frequency
Yype string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Master
Key string - Mongod
Version string - Policies List<string>
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Replica
Set stringName - Size int
- Status string
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- Copy
Region string - Created
At string - Expires
At string - Frequency
Yype string - Id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- Master
Key string - Mongod
Version string - Policies []string
- Provider string
- Target cloud provider for this Data Lake Pipeline.
- Replica
Set stringName - Size int
- Status string
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- copy
Region String - created
At String - expires
At String - frequency
Yype String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master
Key String - mongod
Version String - policies List<String>
- provider String
- Target cloud provider for this Data Lake Pipeline.
- replica
Set StringName - size Integer
- status String
- type String
- Type of ingestion source of this Data Lake Pipeline.
- copy
Region string - created
At string - expires
At string - frequency
Yype string - id string
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master
Key string - mongod
Version string - policies string[]
- provider string
- Target cloud provider for this Data Lake Pipeline.
- replica
Set stringName - size number
- status string
- type string
- Type of ingestion source of this Data Lake Pipeline.
- copy_
region str - created_
at str - expires_
at str - frequency_
yype str - id str
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master_
key str - mongod_
version str - policies Sequence[str]
- provider str
- Target cloud provider for this Data Lake Pipeline.
- replica_
set_ strname - size int
- status str
- type str
- Type of ingestion source of this Data Lake Pipeline.
- copy
Region String - created
At String - expires
At String - frequency
Yype String - id String
- Unique 24-hexadecimal digit string that identifies the Data Lake Pipeline.
- master
Key String - mongod
Version String - policies List<String>
- provider String
- Target cloud provider for this Data Lake Pipeline.
- replica
Set StringName - size Number
- status String
- type String
- Type of ingestion source of this Data Lake Pipeline.
GetDataLakePipelineSource
- Cluster
Name string - Human-readable name that identifies the cluster.
- Collection
Name string - Human-readable name that identifies the collection.
- Database
Name string - Human-readable name that identifies the database.
- Project
Id string - The unique ID for the project to create a Data Lake Pipeline.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- Cluster
Name string - Human-readable name that identifies the cluster.
- Collection
Name string - Human-readable name that identifies the collection.
- Database
Name string - Human-readable name that identifies the database.
- Project
Id string - The unique ID for the project to create a Data Lake Pipeline.
- Type string
- Type of ingestion source of this Data Lake Pipeline.
- cluster
Name String - Human-readable name that identifies the cluster.
- collection
Name String - Human-readable name that identifies the collection.
- database
Name String - Human-readable name that identifies the database.
- project
Id String - The unique ID for the project to create a Data Lake Pipeline.
- type String
- Type of ingestion source of this Data Lake Pipeline.
- cluster
Name string - Human-readable name that identifies the cluster.
- collection
Name string - Human-readable name that identifies the collection.
- database
Name string - Human-readable name that identifies the database.
- project
Id string - The unique ID for the project to create a Data Lake Pipeline.
- type string
- Type of ingestion source of this Data Lake Pipeline.
- cluster_
name str - Human-readable name that identifies the cluster.
- collection_
name str - Human-readable name that identifies the collection.
- database_
name str - Human-readable name that identifies the database.
- project_
id str - The unique ID for the project to create a Data Lake Pipeline.
- type str
- Type of ingestion source of this Data Lake Pipeline.
- cluster
Name String - Human-readable name that identifies the cluster.
- collection
Name String - Human-readable name that identifies the collection.
- database
Name String - Human-readable name that identifies the database.
- project
Id String - The unique ID for the project to create a Data Lake Pipeline.
- type String
- Type of ingestion source of this Data Lake Pipeline.
GetDataLakePipelineTransformation
Package Details
- Repository
- MongoDB Atlas pulumi/pulumi-mongodbatlas
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
mongodbatlas
Terraform Provider.