aws.lambda.EventSourceMapping
Explore with Pulumi AI
Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK).
For information about Lambda and how to use it, see What is AWS Lambda?. For information about event source mappings, see CreateEventSourceMapping in the API docs.
Example Usage
DynamoDB
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: exampleAwsDynamodbTable.streamArn,
functionName: exampleAwsLambdaFunction.arn,
startingPosition: "LATEST",
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=example_aws_dynamodb_table["streamArn"],
function_name=example_aws_lambda_function["arn"],
starting_position="LATEST")
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(exampleAwsDynamodbTable.StreamArn),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
StartingPosition: pulumi.String("LATEST"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = exampleAwsDynamodbTable.StreamArn,
FunctionName = exampleAwsLambdaFunction.Arn,
StartingPosition = "LATEST",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(exampleAwsDynamodbTable.streamArn())
.functionName(exampleAwsLambdaFunction.arn())
.startingPosition("LATEST")
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${exampleAwsDynamodbTable.streamArn}
functionName: ${exampleAwsLambdaFunction.arn}
startingPosition: LATEST
Kinesis
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: exampleAwsKinesisStream.arn,
functionName: exampleAwsLambdaFunction.arn,
startingPosition: "LATEST",
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=example_aws_kinesis_stream["arn"],
function_name=example_aws_lambda_function["arn"],
starting_position="LATEST")
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(exampleAwsKinesisStream.Arn),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
StartingPosition: pulumi.String("LATEST"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = exampleAwsKinesisStream.Arn,
FunctionName = exampleAwsLambdaFunction.Arn,
StartingPosition = "LATEST",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(exampleAwsKinesisStream.arn())
.functionName(exampleAwsLambdaFunction.arn())
.startingPosition("LATEST")
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${exampleAwsKinesisStream.arn}
functionName: ${exampleAwsLambdaFunction.arn}
startingPosition: LATEST
Managed Streaming for Apache Kafka (MSK)
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: exampleAwsMskCluster.arn,
functionName: exampleAwsLambdaFunction.arn,
topics: ["Example"],
startingPosition: "TRIM_HORIZON",
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=example_aws_msk_cluster["arn"],
function_name=example_aws_lambda_function["arn"],
topics=["Example"],
starting_position="TRIM_HORIZON")
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(exampleAwsMskCluster.Arn),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
Topics: pulumi.StringArray{
pulumi.String("Example"),
},
StartingPosition: pulumi.String("TRIM_HORIZON"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = exampleAwsMskCluster.Arn,
FunctionName = exampleAwsLambdaFunction.Arn,
Topics = new[]
{
"Example",
},
StartingPosition = "TRIM_HORIZON",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(exampleAwsMskCluster.arn())
.functionName(exampleAwsLambdaFunction.arn())
.topics("Example")
.startingPosition("TRIM_HORIZON")
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${exampleAwsMskCluster.arn}
functionName: ${exampleAwsLambdaFunction.arn}
topics:
- Example
startingPosition: TRIM_HORIZON
Self Managed Apache Kafka
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
functionName: exampleAwsLambdaFunction.arn,
topics: ["Example"],
startingPosition: "TRIM_HORIZON",
selfManagedEventSource: {
endpoints: {
KAFKA_BOOTSTRAP_SERVERS: "kafka1.example.com:9092,kafka2.example.com:9092",
},
},
sourceAccessConfigurations: [
{
type: "VPC_SUBNET",
uri: "subnet:subnet-example1",
},
{
type: "VPC_SUBNET",
uri: "subnet:subnet-example2",
},
{
type: "VPC_SECURITY_GROUP",
uri: "security_group:sg-example",
},
],
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
function_name=example_aws_lambda_function["arn"],
topics=["Example"],
starting_position="TRIM_HORIZON",
self_managed_event_source={
"endpoints": {
"kafk_a__bootstra_p__servers": "kafka1.example.com:9092,kafka2.example.com:9092",
},
},
source_access_configurations=[
{
"type": "VPC_SUBNET",
"uri": "subnet:subnet-example1",
},
{
"type": "VPC_SUBNET",
"uri": "subnet:subnet-example2",
},
{
"type": "VPC_SECURITY_GROUP",
"uri": "security_group:sg-example",
},
])
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
Topics: pulumi.StringArray{
pulumi.String("Example"),
},
StartingPosition: pulumi.String("TRIM_HORIZON"),
SelfManagedEventSource: &lambda.EventSourceMappingSelfManagedEventSourceArgs{
Endpoints: pulumi.StringMap{
"KAFKA_BOOTSTRAP_SERVERS": pulumi.String("kafka1.example.com:9092,kafka2.example.com:9092"),
},
},
SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VPC_SUBNET"),
Uri: pulumi.String("subnet:subnet-example1"),
},
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VPC_SUBNET"),
Uri: pulumi.String("subnet:subnet-example2"),
},
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VPC_SECURITY_GROUP"),
Uri: pulumi.String("security_group:sg-example"),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
FunctionName = exampleAwsLambdaFunction.Arn,
Topics = new[]
{
"Example",
},
StartingPosition = "TRIM_HORIZON",
SelfManagedEventSource = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedEventSourceArgs
{
Endpoints =
{
{ "KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092" },
},
},
SourceAccessConfigurations = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VPC_SUBNET",
Uri = "subnet:subnet-example1",
},
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VPC_SUBNET",
Uri = "subnet:subnet-example2",
},
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VPC_SECURITY_GROUP",
Uri = "security_group:sg-example",
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSelfManagedEventSourceArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.functionName(exampleAwsLambdaFunction.arn())
.topics("Example")
.startingPosition("TRIM_HORIZON")
.selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
.endpoints(Map.of("KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092"))
.build())
.sourceAccessConfigurations(
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SUBNET")
.uri("subnet:subnet-example1")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SUBNET")
.uri("subnet:subnet-example2")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SECURITY_GROUP")
.uri("security_group:sg-example")
.build())
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
functionName: ${exampleAwsLambdaFunction.arn}
topics:
- Example
startingPosition: TRIM_HORIZON
selfManagedEventSource:
endpoints:
KAFKA_BOOTSTRAP_SERVERS: kafka1.example.com:9092,kafka2.example.com:9092
sourceAccessConfigurations:
- type: VPC_SUBNET
uri: subnet:subnet-example1
- type: VPC_SUBNET
uri: subnet:subnet-example2
- type: VPC_SECURITY_GROUP
uri: security_group:sg-example
SQS
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: sqsQueueTest.arn,
functionName: exampleAwsLambdaFunction.arn,
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=sqs_queue_test["arn"],
function_name=example_aws_lambda_function["arn"])
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(sqsQueueTest.Arn),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = sqsQueueTest.Arn,
FunctionName = exampleAwsLambdaFunction.Arn,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(sqsQueueTest.arn())
.functionName(exampleAwsLambdaFunction.arn())
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${sqsQueueTest.arn}
functionName: ${exampleAwsLambdaFunction.arn}
SQS with event filter
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
eventSourceArn: sqsQueueTest.arn,
functionName: exampleAwsLambdaFunction.arn,
filterCriteria: {
filters: [{
pattern: JSON.stringify({
body: {
Temperature: [{
numeric: [
">",
0,
"<=",
100,
],
}],
Location: ["New York"],
},
}),
}],
},
});
import pulumi
import json
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
event_source_arn=sqs_queue_test["arn"],
function_name=example_aws_lambda_function["arn"],
filter_criteria={
"filters": [{
"pattern": json.dumps({
"body": {
"temperature": [{
"numeric": [
">",
0,
"<=",
100,
],
}],
"location": ["New York"],
},
}),
}],
})
package main
import (
"encoding/json"
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
tmpJSON0, err := json.Marshal(map[string]interface{}{
"body": map[string]interface{}{
"Temperature": []map[string]interface{}{
map[string]interface{}{
"numeric": []interface{}{
">",
0,
"<=",
100,
},
},
},
"Location": []string{
"New York",
},
},
})
if err != nil {
return err
}
json0 := string(tmpJSON0)
_, err = lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
EventSourceArn: pulumi.Any(sqsQueueTest.Arn),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
FilterCriteria: &lambda.EventSourceMappingFilterCriteriaArgs{
Filters: lambda.EventSourceMappingFilterCriteriaFilterArray{
&lambda.EventSourceMappingFilterCriteriaFilterArgs{
Pattern: pulumi.String(json0),
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
EventSourceArn = sqsQueueTest.Arn,
FunctionName = exampleAwsLambdaFunction.Arn,
FilterCriteria = new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaArgs
{
Filters = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaFilterArgs
{
Pattern = JsonSerializer.Serialize(new Dictionary<string, object?>
{
["body"] = new Dictionary<string, object?>
{
["Temperature"] = new[]
{
new Dictionary<string, object?>
{
["numeric"] = new object?[]
{
">",
0,
"<=",
100,
},
},
},
["Location"] = new[]
{
"New York",
},
},
}),
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingFilterCriteriaArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(sqsQueueTest.arn())
.functionName(exampleAwsLambdaFunction.arn())
.filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
.filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
.pattern(serializeJson(
jsonObject(
jsonProperty("body", jsonObject(
jsonProperty("Temperature", jsonArray(jsonObject(
jsonProperty("numeric", jsonArray(
">",
0,
"<=",
100
))
))),
jsonProperty("Location", jsonArray("New York"))
))
)))
.build())
.build())
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
eventSourceArn: ${sqsQueueTest.arn}
functionName: ${exampleAwsLambdaFunction.arn}
filterCriteria:
filters:
- pattern:
fn::toJSON:
body:
Temperature:
- numeric:
- '>'
- 0
- <=
- 100
Location:
- New York
Amazon MQ (ActiveMQ)
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
batchSize: 10,
eventSourceArn: exampleAwsMqBroker.arn,
enabled: true,
functionName: exampleAwsLambdaFunction.arn,
queues: "example",
sourceAccessConfigurations: [{
type: "BASIC_AUTH",
uri: exampleAwsSecretsmanagerSecretVersion.arn,
}],
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
batch_size=10,
event_source_arn=example_aws_mq_broker["arn"],
enabled=True,
function_name=example_aws_lambda_function["arn"],
queues="example",
source_access_configurations=[{
"type": "BASIC_AUTH",
"uri": example_aws_secretsmanager_secret_version["arn"],
}])
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
BatchSize: pulumi.Int(10),
EventSourceArn: pulumi.Any(exampleAwsMqBroker.Arn),
Enabled: pulumi.Bool(true),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
Queues: pulumi.String("example"),
SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("BASIC_AUTH"),
Uri: pulumi.Any(exampleAwsSecretsmanagerSecretVersion.Arn),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
BatchSize = 10,
EventSourceArn = exampleAwsMqBroker.Arn,
Enabled = true,
FunctionName = exampleAwsLambdaFunction.Arn,
Queues = "example",
SourceAccessConfigurations = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "BASIC_AUTH",
Uri = exampleAwsSecretsmanagerSecretVersion.Arn,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.batchSize(10)
.eventSourceArn(exampleAwsMqBroker.arn())
.enabled(true)
.functionName(exampleAwsLambdaFunction.arn())
.queues("example")
.sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("BASIC_AUTH")
.uri(exampleAwsSecretsmanagerSecretVersion.arn())
.build())
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
batchSize: 10
eventSourceArn: ${exampleAwsMqBroker.arn}
enabled: true
functionName: ${exampleAwsLambdaFunction.arn}
queues: example
sourceAccessConfigurations:
- type: BASIC_AUTH
uri: ${exampleAwsSecretsmanagerSecretVersion.arn}
Amazon MQ (RabbitMQ)
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.lambda.EventSourceMapping("example", {
batchSize: 1,
eventSourceArn: exampleAwsMqBroker.arn,
enabled: true,
functionName: exampleAwsLambdaFunction.arn,
queues: "example",
sourceAccessConfigurations: [
{
type: "VIRTUAL_HOST",
uri: "/example",
},
{
type: "BASIC_AUTH",
uri: exampleAwsSecretsmanagerSecretVersion.arn,
},
],
});
import pulumi
import pulumi_aws as aws
example = aws.lambda_.EventSourceMapping("example",
batch_size=1,
event_source_arn=example_aws_mq_broker["arn"],
enabled=True,
function_name=example_aws_lambda_function["arn"],
queues="example",
source_access_configurations=[
{
"type": "VIRTUAL_HOST",
"uri": "/example",
},
{
"type": "BASIC_AUTH",
"uri": example_aws_secretsmanager_secret_version["arn"],
},
])
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lambda"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := lambda.NewEventSourceMapping(ctx, "example", &lambda.EventSourceMappingArgs{
BatchSize: pulumi.Int(1),
EventSourceArn: pulumi.Any(exampleAwsMqBroker.Arn),
Enabled: pulumi.Bool(true),
FunctionName: pulumi.Any(exampleAwsLambdaFunction.Arn),
Queues: pulumi.String("example"),
SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("VIRTUAL_HOST"),
Uri: pulumi.String("/example"),
},
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("BASIC_AUTH"),
Uri: pulumi.Any(exampleAwsSecretsmanagerSecretVersion.Arn),
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Lambda.EventSourceMapping("example", new()
{
BatchSize = 1,
EventSourceArn = exampleAwsMqBroker.Arn,
Enabled = true,
FunctionName = exampleAwsLambdaFunction.Arn,
Queues = "example",
SourceAccessConfigurations = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "VIRTUAL_HOST",
Uri = "/example",
},
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "BASIC_AUTH",
Uri = exampleAwsSecretsmanagerSecretVersion.Arn,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.batchSize(1)
.eventSourceArn(exampleAwsMqBroker.arn())
.enabled(true)
.functionName(exampleAwsLambdaFunction.arn())
.queues("example")
.sourceAccessConfigurations(
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VIRTUAL_HOST")
.uri("/example")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("BASIC_AUTH")
.uri(exampleAwsSecretsmanagerSecretVersion.arn())
.build())
.build());
}
}
resources:
example:
type: aws:lambda:EventSourceMapping
properties:
batchSize: 1
eventSourceArn: ${exampleAwsMqBroker.arn}
enabled: true
functionName: ${exampleAwsLambdaFunction.arn}
queues: example
sourceAccessConfigurations:
- type: VIRTUAL_HOST
uri: /example
- type: BASIC_AUTH
uri: ${exampleAwsSecretsmanagerSecretVersion.arn}
Create EventSourceMapping Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new EventSourceMapping(name: string, args: EventSourceMappingArgs, opts?: CustomResourceOptions);
@overload
def EventSourceMapping(resource_name: str,
args: EventSourceMappingArgs,
opts: Optional[ResourceOptions] = None)
@overload
def EventSourceMapping(resource_name: str,
opts: Optional[ResourceOptions] = None,
function_name: Optional[str] = None,
maximum_record_age_in_seconds: Optional[int] = None,
tumbling_window_in_seconds: Optional[int] = None,
maximum_batching_window_in_seconds: Optional[int] = None,
document_db_event_source_config: Optional[_lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs] = None,
enabled: Optional[bool] = None,
event_source_arn: Optional[str] = None,
filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
batch_size: Optional[int] = None,
function_response_types: Optional[Sequence[str]] = None,
maximum_retry_attempts: Optional[int] = None,
destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
bisect_batch_on_function_error: Optional[bool] = None,
kms_key_arn: Optional[str] = None,
parallelization_factor: Optional[int] = None,
queues: Optional[str] = None,
scaling_config: Optional[_lambda_.EventSourceMappingScalingConfigArgs] = None,
self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
self_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs] = None,
source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
starting_position: Optional[str] = None,
starting_position_timestamp: Optional[str] = None,
topics: Optional[Sequence[str]] = None,
amazon_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs] = None)
func NewEventSourceMapping(ctx *Context, name string, args EventSourceMappingArgs, opts ...ResourceOption) (*EventSourceMapping, error)
public EventSourceMapping(string name, EventSourceMappingArgs args, CustomResourceOptions? opts = null)
public EventSourceMapping(String name, EventSourceMappingArgs args)
public EventSourceMapping(String name, EventSourceMappingArgs args, CustomResourceOptions options)
type: aws:lambda:EventSourceMapping
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args EventSourceMappingArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var eventSourceMappingResource = new Aws.Lambda.EventSourceMapping("eventSourceMappingResource", new()
{
FunctionName = "string",
MaximumRecordAgeInSeconds = 0,
TumblingWindowInSeconds = 0,
MaximumBatchingWindowInSeconds = 0,
DocumentDbEventSourceConfig = new Aws.Lambda.Inputs.EventSourceMappingDocumentDbEventSourceConfigArgs
{
DatabaseName = "string",
CollectionName = "string",
FullDocument = "string",
},
Enabled = false,
EventSourceArn = "string",
FilterCriteria = new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaArgs
{
Filters = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingFilterCriteriaFilterArgs
{
Pattern = "string",
},
},
},
BatchSize = 0,
FunctionResponseTypes = new[]
{
"string",
},
MaximumRetryAttempts = 0,
DestinationConfig = new Aws.Lambda.Inputs.EventSourceMappingDestinationConfigArgs
{
OnFailure = new Aws.Lambda.Inputs.EventSourceMappingDestinationConfigOnFailureArgs
{
DestinationArn = "string",
},
},
BisectBatchOnFunctionError = false,
KmsKeyArn = "string",
ParallelizationFactor = 0,
Queues = "string",
ScalingConfig = new Aws.Lambda.Inputs.EventSourceMappingScalingConfigArgs
{
MaximumConcurrency = 0,
},
SelfManagedEventSource = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedEventSourceArgs
{
Endpoints =
{
{ "string", "string" },
},
},
SelfManagedKafkaEventSourceConfig = new Aws.Lambda.Inputs.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
{
ConsumerGroupId = "string",
},
SourceAccessConfigurations = new[]
{
new Aws.Lambda.Inputs.EventSourceMappingSourceAccessConfigurationArgs
{
Type = "string",
Uri = "string",
},
},
StartingPosition = "string",
StartingPositionTimestamp = "string",
Topics = new[]
{
"string",
},
AmazonManagedKafkaEventSourceConfig = new Aws.Lambda.Inputs.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
{
ConsumerGroupId = "string",
},
});
example, err := lambda.NewEventSourceMapping(ctx, "eventSourceMappingResource", &lambda.EventSourceMappingArgs{
FunctionName: pulumi.String("string"),
MaximumRecordAgeInSeconds: pulumi.Int(0),
TumblingWindowInSeconds: pulumi.Int(0),
MaximumBatchingWindowInSeconds: pulumi.Int(0),
DocumentDbEventSourceConfig: &lambda.EventSourceMappingDocumentDbEventSourceConfigArgs{
DatabaseName: pulumi.String("string"),
CollectionName: pulumi.String("string"),
FullDocument: pulumi.String("string"),
},
Enabled: pulumi.Bool(false),
EventSourceArn: pulumi.String("string"),
FilterCriteria: &lambda.EventSourceMappingFilterCriteriaArgs{
Filters: lambda.EventSourceMappingFilterCriteriaFilterArray{
&lambda.EventSourceMappingFilterCriteriaFilterArgs{
Pattern: pulumi.String("string"),
},
},
},
BatchSize: pulumi.Int(0),
FunctionResponseTypes: pulumi.StringArray{
pulumi.String("string"),
},
MaximumRetryAttempts: pulumi.Int(0),
DestinationConfig: &lambda.EventSourceMappingDestinationConfigArgs{
OnFailure: &lambda.EventSourceMappingDestinationConfigOnFailureArgs{
DestinationArn: pulumi.String("string"),
},
},
BisectBatchOnFunctionError: pulumi.Bool(false),
KmsKeyArn: pulumi.String("string"),
ParallelizationFactor: pulumi.Int(0),
Queues: pulumi.String("string"),
ScalingConfig: &lambda.EventSourceMappingScalingConfigArgs{
MaximumConcurrency: pulumi.Int(0),
},
SelfManagedEventSource: &lambda.EventSourceMappingSelfManagedEventSourceArgs{
Endpoints: pulumi.StringMap{
"string": pulumi.String("string"),
},
},
SelfManagedKafkaEventSourceConfig: &lambda.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs{
ConsumerGroupId: pulumi.String("string"),
},
SourceAccessConfigurations: lambda.EventSourceMappingSourceAccessConfigurationArray{
&lambda.EventSourceMappingSourceAccessConfigurationArgs{
Type: pulumi.String("string"),
Uri: pulumi.String("string"),
},
},
StartingPosition: pulumi.String("string"),
StartingPositionTimestamp: pulumi.String("string"),
Topics: pulumi.StringArray{
pulumi.String("string"),
},
AmazonManagedKafkaEventSourceConfig: &lambda.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs{
ConsumerGroupId: pulumi.String("string"),
},
})
var eventSourceMappingResource = new EventSourceMapping("eventSourceMappingResource", EventSourceMappingArgs.builder()
.functionName("string")
.maximumRecordAgeInSeconds(0)
.tumblingWindowInSeconds(0)
.maximumBatchingWindowInSeconds(0)
.documentDbEventSourceConfig(EventSourceMappingDocumentDbEventSourceConfigArgs.builder()
.databaseName("string")
.collectionName("string")
.fullDocument("string")
.build())
.enabled(false)
.eventSourceArn("string")
.filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
.filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
.pattern("string")
.build())
.build())
.batchSize(0)
.functionResponseTypes("string")
.maximumRetryAttempts(0)
.destinationConfig(EventSourceMappingDestinationConfigArgs.builder()
.onFailure(EventSourceMappingDestinationConfigOnFailureArgs.builder()
.destinationArn("string")
.build())
.build())
.bisectBatchOnFunctionError(false)
.kmsKeyArn("string")
.parallelizationFactor(0)
.queues("string")
.scalingConfig(EventSourceMappingScalingConfigArgs.builder()
.maximumConcurrency(0)
.build())
.selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
.endpoints(Map.of("string", "string"))
.build())
.selfManagedKafkaEventSourceConfig(EventSourceMappingSelfManagedKafkaEventSourceConfigArgs.builder()
.consumerGroupId("string")
.build())
.sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("string")
.uri("string")
.build())
.startingPosition("string")
.startingPositionTimestamp("string")
.topics("string")
.amazonManagedKafkaEventSourceConfig(EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs.builder()
.consumerGroupId("string")
.build())
.build());
event_source_mapping_resource = aws.lambda_.EventSourceMapping("eventSourceMappingResource",
function_name="string",
maximum_record_age_in_seconds=0,
tumbling_window_in_seconds=0,
maximum_batching_window_in_seconds=0,
document_db_event_source_config={
"databaseName": "string",
"collectionName": "string",
"fullDocument": "string",
},
enabled=False,
event_source_arn="string",
filter_criteria={
"filters": [{
"pattern": "string",
}],
},
batch_size=0,
function_response_types=["string"],
maximum_retry_attempts=0,
destination_config={
"onFailure": {
"destinationArn": "string",
},
},
bisect_batch_on_function_error=False,
kms_key_arn="string",
parallelization_factor=0,
queues="string",
scaling_config={
"maximumConcurrency": 0,
},
self_managed_event_source={
"endpoints": {
"string": "string",
},
},
self_managed_kafka_event_source_config={
"consumerGroupId": "string",
},
source_access_configurations=[{
"type": "string",
"uri": "string",
}],
starting_position="string",
starting_position_timestamp="string",
topics=["string"],
amazon_managed_kafka_event_source_config={
"consumerGroupId": "string",
})
const eventSourceMappingResource = new aws.lambda.EventSourceMapping("eventSourceMappingResource", {
functionName: "string",
maximumRecordAgeInSeconds: 0,
tumblingWindowInSeconds: 0,
maximumBatchingWindowInSeconds: 0,
documentDbEventSourceConfig: {
databaseName: "string",
collectionName: "string",
fullDocument: "string",
},
enabled: false,
eventSourceArn: "string",
filterCriteria: {
filters: [{
pattern: "string",
}],
},
batchSize: 0,
functionResponseTypes: ["string"],
maximumRetryAttempts: 0,
destinationConfig: {
onFailure: {
destinationArn: "string",
},
},
bisectBatchOnFunctionError: false,
kmsKeyArn: "string",
parallelizationFactor: 0,
queues: "string",
scalingConfig: {
maximumConcurrency: 0,
},
selfManagedEventSource: {
endpoints: {
string: "string",
},
},
selfManagedKafkaEventSourceConfig: {
consumerGroupId: "string",
},
sourceAccessConfigurations: [{
type: "string",
uri: "string",
}],
startingPosition: "string",
startingPositionTimestamp: "string",
topics: ["string"],
amazonManagedKafkaEventSourceConfig: {
consumerGroupId: "string",
},
});
type: aws:lambda:EventSourceMapping
properties:
amazonManagedKafkaEventSourceConfig:
consumerGroupId: string
batchSize: 0
bisectBatchOnFunctionError: false
destinationConfig:
onFailure:
destinationArn: string
documentDbEventSourceConfig:
collectionName: string
databaseName: string
fullDocument: string
enabled: false
eventSourceArn: string
filterCriteria:
filters:
- pattern: string
functionName: string
functionResponseTypes:
- string
kmsKeyArn: string
maximumBatchingWindowInSeconds: 0
maximumRecordAgeInSeconds: 0
maximumRetryAttempts: 0
parallelizationFactor: 0
queues: string
scalingConfig:
maximumConcurrency: 0
selfManagedEventSource:
endpoints:
string: string
selfManagedKafkaEventSourceConfig:
consumerGroupId: string
sourceAccessConfigurations:
- type: string
uri: string
startingPosition: string
startingPositionTimestamp: string
topics:
- string
tumblingWindowInSeconds: 0
EventSourceMapping Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The EventSourceMapping resource accepts the following input properties:
- Function
Name string - The name or the ARN of the Lambda function that will be subscribing to events.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
- Determines if the mapping will be enabled on creation. Defaults to
true
. - Event
Source stringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Response List<string>Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - Kms
Key stringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- Maximum
Batching intWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access List<EventConfigurations Source Mapping Source Access Configuration> - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - Starting
Position string - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - Starting
Position stringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - Topics List<string>
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- Function
Name string - The name or the ARN of the Lambda function that will be subscribing to events.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Args - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config Args - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
- Determines if the mapping will be enabled on creation. Defaults to
true
. - Event
Source stringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria Args - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Response []stringTypes - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - Kms
Key stringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- Maximum
Batching intWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config Args - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Args - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access []EventConfigurations Source Mapping Source Access Configuration Args - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - Starting
Position string - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - Starting
Position stringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - Topics []string
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function
Name String - The name or the ARN of the Lambda function that will be subscribing to events.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Integer - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event
Source StringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Response List<String>Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms
Key StringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- maximum
Batching IntegerWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum
Record IntegerAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry IntegerAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Integer - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<EventConfigurations Source Mapping Source Access Configuration> - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting
Position String - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting
Position StringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - topics List<String>
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window IntegerIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function
Name string - The name or the ARN of the Lambda function that will be subscribing to events.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size number - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect
Batch booleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled boolean
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event
Source stringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Response string[]Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms
Key stringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- maximum
Batching numberWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum
Record numberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry numberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues string
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access EventConfigurations Source Mapping Source Access Configuration[] - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting
Position string - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting
Position stringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - topics string[]
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window numberIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function_
name str - The name or the ARN of the Lambda function that will be subscribing to events.
- amazon_
managed_ lambda_.kafka_ event_ source_ config Event Source Mapping Amazon Managed Kafka Event Source Config Args - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch_
size int - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect_
batch_ boolon_ function_ error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination_
config lambda_.Event Source Mapping Destination Config Args - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document_
db_ lambda_.event_ source_ config Event Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled bool
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event_
source_ strarn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter_
criteria lambda_.Event Source Mapping Filter Criteria Args - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function_
response_ Sequence[str]types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms_
key_ strarn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- maximum_
batching_ intwindow_ in_ seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum_
record_ intage_ in_ seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum_
retry_ intattempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization_
factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues str
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling_
config lambda_.Event Source Mapping Scaling Config Args - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self_
managed_ lambda_.event_ source Event Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self_
managed_ lambda_.kafka_ event_ source_ config Event Source Mapping Self Managed Kafka Event Source Config Args - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source_
access_ Sequence[lambda_.configurations Event Source Mapping Source Access Configuration Args] - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting_
position str - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting_
position_ strtimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - topics Sequence[str]
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling_
window_ intin_ seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- function
Name String - The name or the ARN of the Lambda function that will be subscribing to events.
- amazon
Managed Property MapKafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Number - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config Property Map - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document
Db Property MapEvent Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event
Source StringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria Property Map - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Response List<String>Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms
Key StringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- maximum
Batching NumberWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum
Record NumberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry NumberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config Property Map - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed Property MapEvent Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed Property MapKafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<Property Map>Configurations - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting
Position String - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting
Position StringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - topics List<String>
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window NumberIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
Outputs
All input properties are implicitly available as output properties. Additionally, the EventSourceMapping resource produces the following output properties:
- Function
Arn string - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - Id string
- The provider-assigned unique ID for this managed resource.
- Last
Modified string - The date this resource was last modified.
- Last
Processing stringResult - The result of the last AWS Lambda invocation of your Lambda function.
- State string
- The state of the event source mapping.
- State
Transition stringReason - The reason the event source mapping is in its current state.
- Uuid string
- The UUID of the created event source mapping.
- Function
Arn string - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - Id string
- The provider-assigned unique ID for this managed resource.
- Last
Modified string - The date this resource was last modified.
- Last
Processing stringResult - The result of the last AWS Lambda invocation of your Lambda function.
- State string
- The state of the event source mapping.
- State
Transition stringReason - The reason the event source mapping is in its current state.
- Uuid string
- The UUID of the created event source mapping.
- function
Arn String - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - id String
- The provider-assigned unique ID for this managed resource.
- last
Modified String - The date this resource was last modified.
- last
Processing StringResult - The result of the last AWS Lambda invocation of your Lambda function.
- state String
- The state of the event source mapping.
- state
Transition StringReason - The reason the event source mapping is in its current state.
- uuid String
- The UUID of the created event source mapping.
- function
Arn string - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - id string
- The provider-assigned unique ID for this managed resource.
- last
Modified string - The date this resource was last modified.
- last
Processing stringResult - The result of the last AWS Lambda invocation of your Lambda function.
- state string
- The state of the event source mapping.
- state
Transition stringReason - The reason the event source mapping is in its current state.
- uuid string
- The UUID of the created event source mapping.
- function_
arn str - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - id str
- The provider-assigned unique ID for this managed resource.
- last_
modified str - The date this resource was last modified.
- last_
processing_ strresult - The result of the last AWS Lambda invocation of your Lambda function.
- state str
- The state of the event source mapping.
- state_
transition_ strreason - The reason the event source mapping is in its current state.
- uuid str
- The UUID of the created event source mapping.
- function
Arn String - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - id String
- The provider-assigned unique ID for this managed resource.
- last
Modified String - The date this resource was last modified.
- last
Processing StringResult - The result of the last AWS Lambda invocation of your Lambda function.
- state String
- The state of the event source mapping.
- state
Transition StringReason - The reason the event source mapping is in its current state.
- uuid String
- The UUID of the created event source mapping.
Look up Existing EventSourceMapping Resource
Get an existing EventSourceMapping resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: EventSourceMappingState, opts?: CustomResourceOptions): EventSourceMapping
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
amazon_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs] = None,
batch_size: Optional[int] = None,
bisect_batch_on_function_error: Optional[bool] = None,
destination_config: Optional[_lambda_.EventSourceMappingDestinationConfigArgs] = None,
document_db_event_source_config: Optional[_lambda_.EventSourceMappingDocumentDbEventSourceConfigArgs] = None,
enabled: Optional[bool] = None,
event_source_arn: Optional[str] = None,
filter_criteria: Optional[_lambda_.EventSourceMappingFilterCriteriaArgs] = None,
function_arn: Optional[str] = None,
function_name: Optional[str] = None,
function_response_types: Optional[Sequence[str]] = None,
kms_key_arn: Optional[str] = None,
last_modified: Optional[str] = None,
last_processing_result: Optional[str] = None,
maximum_batching_window_in_seconds: Optional[int] = None,
maximum_record_age_in_seconds: Optional[int] = None,
maximum_retry_attempts: Optional[int] = None,
parallelization_factor: Optional[int] = None,
queues: Optional[str] = None,
scaling_config: Optional[_lambda_.EventSourceMappingScalingConfigArgs] = None,
self_managed_event_source: Optional[_lambda_.EventSourceMappingSelfManagedEventSourceArgs] = None,
self_managed_kafka_event_source_config: Optional[_lambda_.EventSourceMappingSelfManagedKafkaEventSourceConfigArgs] = None,
source_access_configurations: Optional[Sequence[_lambda_.EventSourceMappingSourceAccessConfigurationArgs]] = None,
starting_position: Optional[str] = None,
starting_position_timestamp: Optional[str] = None,
state: Optional[str] = None,
state_transition_reason: Optional[str] = None,
topics: Optional[Sequence[str]] = None,
tumbling_window_in_seconds: Optional[int] = None,
uuid: Optional[str] = None) -> EventSourceMapping
func GetEventSourceMapping(ctx *Context, name string, id IDInput, state *EventSourceMappingState, opts ...ResourceOption) (*EventSourceMapping, error)
public static EventSourceMapping Get(string name, Input<string> id, EventSourceMappingState? state, CustomResourceOptions? opts = null)
public static EventSourceMapping get(String name, Output<String> id, EventSourceMappingState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
- Determines if the mapping will be enabled on creation. Defaults to
true
. - Event
Source stringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Arn string - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - Function
Name string - The name or the ARN of the Lambda function that will be subscribing to events.
- Function
Response List<string>Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - Kms
Key stringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- Last
Modified string - The date this resource was last modified.
- Last
Processing stringResult - The result of the last AWS Lambda invocation of your Lambda function.
- Maximum
Batching intWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access List<EventConfigurations Source Mapping Source Access Configuration> - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - Starting
Position string - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - Starting
Position stringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - State string
- The state of the event source mapping.
- State
Transition stringReason - The reason the event source mapping is in its current state.
- Topics List<string>
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- Uuid string
- The UUID of the created event source mapping.
- Amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config Args - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- Batch
Size int - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - Bisect
Batch boolOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- Destination
Config EventSource Mapping Destination Config Args - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- Document
Db EventEvent Source Config Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- Enabled bool
- Determines if the mapping will be enabled on creation. Defaults to
true
. - Event
Source stringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- Filter
Criteria EventSource Mapping Filter Criteria Args - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- Function
Arn string - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - Function
Name string - The name or the ARN of the Lambda function that will be subscribing to events.
- Function
Response []stringTypes - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - Kms
Key stringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- Last
Modified string - The date this resource was last modified.
- Last
Processing stringResult - The result of the last AWS Lambda invocation of your Lambda function.
- Maximum
Batching intWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - Maximum
Record intAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- Maximum
Retry intAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- Parallelization
Factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- Queues string
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- Scaling
Config EventSource Mapping Scaling Config Args - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- Self
Managed EventEvent Source Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- Self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config Args - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- Source
Access []EventConfigurations Source Mapping Source Access Configuration Args - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - Starting
Position string - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - Starting
Position stringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - State string
- The state of the event source mapping.
- State
Transition stringReason - The reason the event source mapping is in its current state.
- Topics []string
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- Tumbling
Window intIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- Uuid string
- The UUID of the created event source mapping.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Integer - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event
Source StringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Arn String - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - function
Name String - The name or the ARN of the Lambda function that will be subscribing to events.
- function
Response List<String>Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms
Key StringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- last
Modified String - The date this resource was last modified.
- last
Processing StringResult - The result of the last AWS Lambda invocation of your Lambda function.
- maximum
Batching IntegerWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum
Record IntegerAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry IntegerAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Integer - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<EventConfigurations Source Mapping Source Access Configuration> - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting
Position String - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting
Position StringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - state String
- The state of the event source mapping.
- state
Transition StringReason - The reason the event source mapping is in its current state.
- topics List<String>
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window IntegerIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid String
- The UUID of the created event source mapping.
- amazon
Managed EventKafka Event Source Config Source Mapping Amazon Managed Kafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size number - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect
Batch booleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config EventSource Mapping Destination Config - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document
Db EventEvent Source Config Source Mapping Document Db Event Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled boolean
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event
Source stringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria EventSource Mapping Filter Criteria - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Arn string - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - function
Name string - The name or the ARN of the Lambda function that will be subscribing to events.
- function
Response string[]Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms
Key stringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- last
Modified string - The date this resource was last modified.
- last
Processing stringResult - The result of the last AWS Lambda invocation of your Lambda function.
- maximum
Batching numberWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum
Record numberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry numberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues string
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config EventSource Mapping Scaling Config - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed EventEvent Source Source Mapping Self Managed Event Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed EventKafka Event Source Config Source Mapping Self Managed Kafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access EventConfigurations Source Mapping Source Access Configuration[] - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting
Position string - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting
Position stringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - state string
- The state of the event source mapping.
- state
Transition stringReason - The reason the event source mapping is in its current state.
- topics string[]
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window numberIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid string
- The UUID of the created event source mapping.
- amazon_
managed_ lambda_.kafka_ event_ source_ config Event Source Mapping Amazon Managed Kafka Event Source Config Args - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch_
size int - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect_
batch_ boolon_ function_ error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination_
config lambda_.Event Source Mapping Destination Config Args - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document_
db_ lambda_.event_ source_ config Event Source Mapping Document Db Event Source Config Args - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled bool
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event_
source_ strarn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter_
criteria lambda_.Event Source Mapping Filter Criteria Args - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function_
arn str - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - function_
name str - The name or the ARN of the Lambda function that will be subscribing to events.
- function_
response_ Sequence[str]types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms_
key_ strarn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- last_
modified str - The date this resource was last modified.
- last_
processing_ strresult - The result of the last AWS Lambda invocation of your Lambda function.
- maximum_
batching_ intwindow_ in_ seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum_
record_ intage_ in_ seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum_
retry_ intattempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization_
factor int - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues str
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling_
config lambda_.Event Source Mapping Scaling Config Args - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self_
managed_ lambda_.event_ source Event Source Mapping Self Managed Event Source Args - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self_
managed_ lambda_.kafka_ event_ source_ config Event Source Mapping Self Managed Kafka Event Source Config Args - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source_
access_ Sequence[lambda_.configurations Event Source Mapping Source Access Configuration Args] - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting_
position str - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting_
position_ strtimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - state str
- The state of the event source mapping.
- state_
transition_ strreason - The reason the event source mapping is in its current state.
- topics Sequence[str]
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling_
window_ intin_ seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid str
- The UUID of the created event source mapping.
- amazon
Managed Property MapKafka Event Source Config - Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.
- batch
Size Number - The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to
100
for DynamoDB, Kinesis, MQ and MSK,10
for SQS. - bisect
Batch BooleanOn Function Error - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
false
.
- (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to
- destination
Config Property Map - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below.
- document
Db Property MapEvent Source Config - (Optional) Configuration settings for a DocumentDB event source. Detailed below.
- enabled Boolean
- Determines if the mapping will be enabled on creation. Defaults to
true
. - event
Source StringArn - The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.
- filter
Criteria Property Map - The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.
- function
Arn String - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from
function_name
above.) - function
Name String - The name or the ARN of the Lambda function that will be subscribing to events.
- function
Response List<String>Types - A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values:
ReportBatchItemFailures
. - kms
Key StringArn - The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
- last
Modified String - The date this resource was last modified.
- last
Processing StringResult - The result of the last AWS Lambda invocation of your Lambda function.
- maximum
Batching NumberWindow In Seconds - The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either
maximum_batching_window_in_seconds
expires orbatch_size
has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. - maximum
Record NumberAge In Seconds - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive).
- maximum
Retry NumberAttempts - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000.
- parallelization
Factor Number - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10.
- queues String
- The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name.
- scaling
Config Property Map - Scaling configuration of the event source. Only available for SQS queues. Detailed below.
- self
Managed Property MapEvent Source - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
source_access_configuration
. Detailed below.
- (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include
- self
Managed Property MapKafka Event Source Config - Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.
- source
Access List<Property Map>Configurations - For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include
self_managed_event_source
. Detailed below. - starting
Position String - The position in the stream where AWS Lambda should start reading. Must be one of
AT_TIMESTAMP
(Kinesis only),LATEST
orTRIM_HORIZON
if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference. - starting
Position StringTimestamp - A timestamp in RFC3339 format of the data record which to start reading when using
starting_position
set toAT_TIMESTAMP
. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. - state String
- The state of the event source mapping.
- state
Transition StringReason - The reason the event source mapping is in its current state.
- topics List<String>
- The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.
- tumbling
Window NumberIn Seconds - The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).
- uuid String
- The UUID of the created event source mapping.
Supporting Types
EventSourceMappingAmazonManagedKafkaEventSourceConfig, EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs
- Consumer
Group stringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- Consumer
Group stringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer
Group stringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer_
group_ strid - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See AmazonManagedKafkaEventSourceConfig Syntax.
EventSourceMappingDestinationConfig, EventSourceMappingDestinationConfigArgs
- On
Failure EventSource Mapping Destination Config On Failure - The destination configuration for failed invocations. Detailed below.
- On
Failure EventSource Mapping Destination Config On Failure - The destination configuration for failed invocations. Detailed below.
- on
Failure EventSource Mapping Destination Config On Failure - The destination configuration for failed invocations. Detailed below.
- on
Failure EventSource Mapping Destination Config On Failure - The destination configuration for failed invocations. Detailed below.
- on_
failure lambda_.Event Source Mapping Destination Config On Failure - The destination configuration for failed invocations. Detailed below.
- on
Failure Property Map - The destination configuration for failed invocations. Detailed below.
EventSourceMappingDestinationConfigOnFailure, EventSourceMappingDestinationConfigOnFailureArgs
- Destination
Arn string - The Amazon Resource Name (ARN) of the destination resource.
- Destination
Arn string - The Amazon Resource Name (ARN) of the destination resource.
- destination
Arn String - The Amazon Resource Name (ARN) of the destination resource.
- destination
Arn string - The Amazon Resource Name (ARN) of the destination resource.
- destination_
arn str - The Amazon Resource Name (ARN) of the destination resource.
- destination
Arn String - The Amazon Resource Name (ARN) of the destination resource.
EventSourceMappingDocumentDbEventSourceConfig, EventSourceMappingDocumentDbEventSourceConfigArgs
- Database
Name string - The name of the database to consume within the DocumentDB cluster.
- Collection
Name string - The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- Full
Document string - Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- Database
Name string - The name of the database to consume within the DocumentDB cluster.
- Collection
Name string - The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- Full
Document string - Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database
Name String - The name of the database to consume within the DocumentDB cluster.
- collection
Name String - The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full
Document String - Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database
Name string - The name of the database to consume within the DocumentDB cluster.
- collection
Name string - The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full
Document string - Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database_
name str - The name of the database to consume within the DocumentDB cluster.
- collection_
name str - The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full_
document str - Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
- database
Name String - The name of the database to consume within the DocumentDB cluster.
- collection
Name String - The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
- full
Document String - Determines what DocumentDB sends to your event stream during document update operations. If set to
UpdateLookup
, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values:UpdateLookup
,Default
.
EventSourceMappingFilterCriteria, EventSourceMappingFilterCriteriaArgs
- Filters
List<Event
Source Mapping Filter Criteria Filter> - A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- Filters
[]Event
Source Mapping Filter Criteria Filter - A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters
List<Event
Source Mapping Filter Criteria Filter> - A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters
Event
Source Mapping Filter Criteria Filter[] - A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters
Sequence[lambda_.
Event Source Mapping Filter Criteria Filter] - A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
- filters List<Property Map>
- A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below.
EventSourceMappingFilterCriteriaFilter, EventSourceMappingFilterCriteriaFilterArgs
- Pattern string
- A filter pattern up to 4096 characters. See Filter Rule Syntax.
- Pattern string
- A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern String
- A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern string
- A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern str
- A filter pattern up to 4096 characters. See Filter Rule Syntax.
- pattern String
- A filter pattern up to 4096 characters. See Filter Rule Syntax.
EventSourceMappingScalingConfig, EventSourceMappingScalingConfigArgs
- Maximum
Concurrency int - Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to
2
. See Configuring maximum concurrency for Amazon SQS event sources. You need to raise a Service Quota Ticket to increase the concurrency beyond 1000.
- Maximum
Concurrency int - Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to
2
. See Configuring maximum concurrency for Amazon SQS event sources. You need to raise a Service Quota Ticket to increase the concurrency beyond 1000.
- maximum
Concurrency Integer - Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to
2
. See Configuring maximum concurrency for Amazon SQS event sources. You need to raise a Service Quota Ticket to increase the concurrency beyond 1000.
- maximum
Concurrency number - Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to
2
. See Configuring maximum concurrency for Amazon SQS event sources. You need to raise a Service Quota Ticket to increase the concurrency beyond 1000.
- maximum_
concurrency int - Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to
2
. See Configuring maximum concurrency for Amazon SQS event sources. You need to raise a Service Quota Ticket to increase the concurrency beyond 1000.
- maximum
Concurrency Number - Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to
2
. See Configuring maximum concurrency for Amazon SQS event sources. You need to raise a Service Quota Ticket to increase the concurrency beyond 1000.
EventSourceMappingSelfManagedEventSource, EventSourceMappingSelfManagedEventSourceArgs
- Endpoints Dictionary<string, string>
- A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- Endpoints map[string]string
- A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints Map<String,String>
- A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints {[key: string]: string}
- A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints Mapping[str, str]
- A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
- endpoints Map<String>
- A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be
KAFKA_BOOTSTRAP_SERVERS
and the value should be a string with a comma separated list of broker endpoints.
EventSourceMappingSelfManagedKafkaEventSourceConfig, EventSourceMappingSelfManagedKafkaEventSourceConfigArgs
- Consumer
Group stringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- Consumer
Group stringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer
Group stringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer_
group_ strid - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
- consumer
Group StringId - A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See SelfManagedKafkaEventSourceConfig Syntax.
EventSourceMappingSourceAccessConfiguration, EventSourceMappingSourceAccessConfigurationArgs
- Type string
- The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
- Uri string
- The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- Type string
- The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
- Uri string
- The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type String
- The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
- uri String
- The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type string
- The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
- uri string
- The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type str
- The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
- uri str
- The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
- type String
- The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the AWS documentation.
- uri String
- The URI for this configuration. For type
VPC_SUBNET
the value should besubnet:subnet_id
wheresubnet_id
is the value you would find in an aws.ec2.Subnet resource's id attribute. For typeVPC_SECURITY_GROUP
the value should besecurity_group:security_group_id
wheresecurity_group_id
is the value you would find in an aws.ec2.SecurityGroup resource's id attribute.
Import
Using pulumi import
, import Lambda event source mappings using the UUID
(event source mapping identifier). For example:
$ pulumi import aws:lambda/eventSourceMapping:EventSourceMapping event_source_mapping 12345kxodurf3443
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- AWS Classic pulumi/pulumi-aws
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
aws
Terraform Provider.