yandex.MdbKafkaCluster
Explore with Pulumi AI
Manages a Kafka cluster within the Yandex.Cloud. For more information, see the official documentation.
Example Usage
Example of creating a Single Node Kafka.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
public MyStack()
{
var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
{
});
var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.5.0.0/24",
},
Zone = "ru-central1-a",
});
var fooMdbKafkaCluster = new Yandex.MdbKafkaCluster("fooMdbKafkaCluster", new Yandex.MdbKafkaClusterArgs
{
Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
{
AssignPublicIp = false,
BrokersCount = 1,
Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
{
KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
{
CompressionType = "COMPRESSION_TYPE_ZSTD",
DefaultReplicationFactor = "1",
LogFlushIntervalMessages = "1024",
LogFlushIntervalMs = "1000",
LogFlushSchedulerIntervalMs = "1000",
LogPreallocate = true,
LogRetentionBytes = "1073741824",
LogRetentionHours = "168",
LogRetentionMinutes = "10080",
LogRetentionMs = "86400000",
LogSegmentBytes = "134217728",
NumPartitions = "10",
},
Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
{
DiskSize = 32,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
SchemaRegistry = false,
UnmanagedTopics = false,
Version = "2.8",
Zones =
{
"ru-central1-a",
},
},
Environment = "PRESTABLE",
NetworkId = fooVpcNetwork.Id,
SubnetIds =
{
fooVpcSubnet.Id,
},
Users =
{
new Yandex.Inputs.MdbKafkaClusterUserArgs
{
Name = "producer-application",
Password = "password",
Permissions =
{
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "ACCESS_ROLE_PRODUCER",
TopicName = "input",
},
},
},
new Yandex.Inputs.MdbKafkaClusterUserArgs
{
Name = "worker",
Password = "password",
Permissions =
{
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "ACCESS_ROLE_CONSUMER",
TopicName = "input",
},
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "ACCESS_ROLE_PRODUCER",
TopicName = "output",
},
},
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
if err != nil {
return err
}
fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.5.0.0/24"),
},
Zone: pulumi.String("ru-central1-a"),
})
if err != nil {
return err
}
_, err = yandex.NewMdbKafkaCluster(ctx, "fooMdbKafkaCluster", &yandex.MdbKafkaClusterArgs{
Config: &MdbKafkaClusterConfigArgs{
AssignPublicIp: pulumi.Bool(false),
BrokersCount: pulumi.Int(1),
Kafka: &MdbKafkaClusterConfigKafkaArgs{
KafkaConfig: &MdbKafkaClusterConfigKafkaKafkaConfigArgs{
CompressionType: pulumi.String("COMPRESSION_TYPE_ZSTD"),
DefaultReplicationFactor: pulumi.String("1"),
LogFlushIntervalMessages: pulumi.String("1024"),
LogFlushIntervalMs: pulumi.String("1000"),
LogFlushSchedulerIntervalMs: pulumi.String("1000"),
LogPreallocate: pulumi.Bool(true),
LogRetentionBytes: pulumi.String("1073741824"),
LogRetentionHours: pulumi.String("168"),
LogRetentionMinutes: pulumi.String("10080"),
LogRetentionMs: pulumi.String("86400000"),
LogSegmentBytes: pulumi.String("134217728"),
NumPartitions: pulumi.String("10"),
},
Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
DiskSize: pulumi.Int(32),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
SchemaRegistry: pulumi.Bool(false),
UnmanagedTopics: pulumi.Bool(false),
Version: pulumi.String("2.8"),
Zones: pulumi.StringArray{
pulumi.String("ru-central1-a"),
},
},
Environment: pulumi.String("PRESTABLE"),
NetworkId: fooVpcNetwork.ID(),
SubnetIds: pulumi.StringArray{
fooVpcSubnet.ID(),
},
Users: MdbKafkaClusterUserArray{
&MdbKafkaClusterUserArgs{
Name: pulumi.String("producer-application"),
Password: pulumi.String("password"),
Permissions: MdbKafkaClusterUserPermissionArray{
&MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("ACCESS_ROLE_PRODUCER"),
TopicName: pulumi.String("input"),
},
},
},
&MdbKafkaClusterUserArgs{
Name: pulumi.String("worker"),
Password: pulumi.String("password"),
Permissions: MdbKafkaClusterUserPermissionArray{
&MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("ACCESS_ROLE_CONSUMER"),
TopicName: pulumi.String("input"),
},
&MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("ACCESS_ROLE_PRODUCER"),
TopicName: pulumi.String("output"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
Coming soon!
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.5.0.0/24"],
zone="ru-central1-a")
foo_mdb_kafka_cluster = yandex.MdbKafkaCluster("fooMdbKafkaCluster",
config=yandex.MdbKafkaClusterConfigArgs(
assign_public_ip=False,
brokers_count=1,
kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
compression_type="COMPRESSION_TYPE_ZSTD",
default_replication_factor="1",
log_flush_interval_messages="1024",
log_flush_interval_ms="1000",
log_flush_scheduler_interval_ms="1000",
log_preallocate=True,
log_retention_bytes="1073741824",
log_retention_hours="168",
log_retention_minutes="10080",
log_retention_ms="86400000",
log_segment_bytes="134217728",
num_partitions="10",
),
resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
disk_size=32,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
),
schema_registry=False,
unmanaged_topics=False,
version="2.8",
zones=["ru-central1-a"],
),
environment="PRESTABLE",
network_id=foo_vpc_network.id,
subnet_ids=[foo_vpc_subnet.id],
users=[
yandex.MdbKafkaClusterUserArgs(
name="producer-application",
password="password",
permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
role="ACCESS_ROLE_PRODUCER",
topic_name="input",
)],
),
yandex.MdbKafkaClusterUserArgs(
name="worker",
password="password",
permissions=[
yandex.MdbKafkaClusterUserPermissionArgs(
role="ACCESS_ROLE_CONSUMER",
topic_name="input",
),
yandex.MdbKafkaClusterUserPermissionArgs(
role="ACCESS_ROLE_PRODUCER",
topic_name="output",
),
],
),
])
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.5.0.0/24"],
zone: "ru-central1-a",
});
const fooMdbKafkaCluster = new yandex.MdbKafkaCluster("foo", {
config: {
assignPublicIp: false,
brokersCount: 1,
kafka: {
kafkaConfig: {
compressionType: "COMPRESSION_TYPE_ZSTD",
defaultReplicationFactor: "1",
logFlushIntervalMessages: "1024",
logFlushIntervalMs: "1000",
logFlushSchedulerIntervalMs: "1000",
logPreallocate: true,
logRetentionBytes: "1.073741824e+09",
logRetentionHours: "168",
logRetentionMinutes: "10080",
logRetentionMs: "8.64e+07",
logSegmentBytes: "1.34217728e+08",
numPartitions: "10",
},
resources: {
diskSize: 32,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
schemaRegistry: false,
unmanagedTopics: false,
version: "2.8",
zones: ["ru-central1-a"],
},
environment: "PRESTABLE",
networkId: fooVpcNetwork.id,
subnetIds: [fooVpcSubnet.id],
users: [
{
name: "producer-application",
password: "password",
permissions: [{
role: "ACCESS_ROLE_PRODUCER",
topicName: "input",
}],
},
{
name: "worker",
password: "password",
permissions: [
{
role: "ACCESS_ROLE_CONSUMER",
topicName: "input",
},
{
role: "ACCESS_ROLE_PRODUCER",
topicName: "output",
},
],
},
],
});
Coming soon!
Example of creating a HA Kafka Cluster with two brokers per AZ (6 brokers + 3 zk)
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
public MyStack()
{
var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
{
});
var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.1.0.0/24",
},
Zone = "ru-central1-a",
});
var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.2.0.0/24",
},
Zone = "ru-central1-b",
});
var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
{
NetworkId = fooVpcNetwork.Id,
V4CidrBlocks =
{
"10.3.0.0/24",
},
Zone = "ru-central1-c",
});
var fooMdbKafkaCluster = new Yandex.MdbKafkaCluster("fooMdbKafkaCluster", new Yandex.MdbKafkaClusterArgs
{
Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
{
AssignPublicIp = true,
BrokersCount = 2,
Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
{
KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
{
CompressionType = "COMPRESSION_TYPE_ZSTD",
DefaultReplicationFactor = "6",
LogFlushIntervalMessages = "1024",
LogFlushIntervalMs = "1000",
LogFlushSchedulerIntervalMs = "1000",
LogPreallocate = true,
LogRetentionBytes = "1073741824",
LogRetentionHours = "168",
LogRetentionMinutes = "10080",
LogRetentionMs = "86400000",
LogSegmentBytes = "134217728",
NumPartitions = "10",
},
Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
{
DiskSize = 128,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.medium",
},
},
SchemaRegistry = false,
UnmanagedTopics = false,
Version = "2.8",
Zones =
{
"ru-central1-a",
"ru-central1-b",
"ru-central1-c",
},
Zookeeper = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperArgs
{
Resources = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperResourcesArgs
{
DiskSize = 20,
DiskTypeId = "network-ssd",
ResourcePresetId = "s2.micro",
},
},
},
Environment = "PRESTABLE",
NetworkId = fooVpcNetwork.Id,
SubnetIds =
{
fooVpcSubnet.Id,
bar.Id,
baz.Id,
},
Users =
{
new Yandex.Inputs.MdbKafkaClusterUserArgs
{
Name = "producer-application",
Password = "password",
Permissions =
{
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "ACCESS_ROLE_PRODUCER",
TopicName = "input",
},
},
},
new Yandex.Inputs.MdbKafkaClusterUserArgs
{
Name = "worker",
Password = "password",
Permissions =
{
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "ACCESS_ROLE_CONSUMER",
TopicName = "input",
},
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "ACCESS_ROLE_PRODUCER",
TopicName = "output",
},
},
},
},
});
}
}
package main
import (
"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
if err != nil {
return err
}
fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.1.0.0/24"),
},
Zone: pulumi.String("ru-central1-a"),
})
if err != nil {
return err
}
bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.2.0.0/24"),
},
Zone: pulumi.String("ru-central1-b"),
})
if err != nil {
return err
}
baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
NetworkId: fooVpcNetwork.ID(),
V4CidrBlocks: pulumi.StringArray{
pulumi.String("10.3.0.0/24"),
},
Zone: pulumi.String("ru-central1-c"),
})
if err != nil {
return err
}
_, err = yandex.NewMdbKafkaCluster(ctx, "fooMdbKafkaCluster", &yandex.MdbKafkaClusterArgs{
Config: &MdbKafkaClusterConfigArgs{
AssignPublicIp: pulumi.Bool(true),
BrokersCount: pulumi.Int(2),
Kafka: &MdbKafkaClusterConfigKafkaArgs{
KafkaConfig: &MdbKafkaClusterConfigKafkaKafkaConfigArgs{
CompressionType: pulumi.String("COMPRESSION_TYPE_ZSTD"),
DefaultReplicationFactor: pulumi.String("6"),
LogFlushIntervalMessages: pulumi.String("1024"),
LogFlushIntervalMs: pulumi.String("1000"),
LogFlushSchedulerIntervalMs: pulumi.String("1000"),
LogPreallocate: pulumi.Bool(true),
LogRetentionBytes: pulumi.String("1073741824"),
LogRetentionHours: pulumi.String("168"),
LogRetentionMinutes: pulumi.String("10080"),
LogRetentionMs: pulumi.String("86400000"),
LogSegmentBytes: pulumi.String("134217728"),
NumPartitions: pulumi.String("10"),
},
Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
DiskSize: pulumi.Int(128),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.medium"),
},
},
SchemaRegistry: pulumi.Bool(false),
UnmanagedTopics: pulumi.Bool(false),
Version: pulumi.String("2.8"),
Zones: pulumi.StringArray{
pulumi.String("ru-central1-a"),
pulumi.String("ru-central1-b"),
pulumi.String("ru-central1-c"),
},
Zookeeper: &MdbKafkaClusterConfigZookeeperArgs{
Resources: &MdbKafkaClusterConfigZookeeperResourcesArgs{
DiskSize: pulumi.Int(20),
DiskTypeId: pulumi.String("network-ssd"),
ResourcePresetId: pulumi.String("s2.micro"),
},
},
},
Environment: pulumi.String("PRESTABLE"),
NetworkId: fooVpcNetwork.ID(),
SubnetIds: pulumi.StringArray{
fooVpcSubnet.ID(),
bar.ID(),
baz.ID(),
},
Users: MdbKafkaClusterUserArray{
&MdbKafkaClusterUserArgs{
Name: pulumi.String("producer-application"),
Password: pulumi.String("password"),
Permissions: MdbKafkaClusterUserPermissionArray{
&MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("ACCESS_ROLE_PRODUCER"),
TopicName: pulumi.String("input"),
},
},
},
&MdbKafkaClusterUserArgs{
Name: pulumi.String("worker"),
Password: pulumi.String("password"),
Permissions: MdbKafkaClusterUserPermissionArray{
&MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("ACCESS_ROLE_CONSUMER"),
TopicName: pulumi.String("input"),
},
&MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("ACCESS_ROLE_PRODUCER"),
TopicName: pulumi.String("output"),
},
},
},
},
})
if err != nil {
return err
}
return nil
})
}
Coming soon!
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.1.0.0/24"],
zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.2.0.0/24"],
zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
network_id=foo_vpc_network.id,
v4_cidr_blocks=["10.3.0.0/24"],
zone="ru-central1-c")
foo_mdb_kafka_cluster = yandex.MdbKafkaCluster("fooMdbKafkaCluster",
config=yandex.MdbKafkaClusterConfigArgs(
assign_public_ip=True,
brokers_count=2,
kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
compression_type="COMPRESSION_TYPE_ZSTD",
default_replication_factor="6",
log_flush_interval_messages="1024",
log_flush_interval_ms="1000",
log_flush_scheduler_interval_ms="1000",
log_preallocate=True,
log_retention_bytes="1073741824",
log_retention_hours="168",
log_retention_minutes="10080",
log_retention_ms="86400000",
log_segment_bytes="134217728",
num_partitions="10",
),
resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
disk_size=128,
disk_type_id="network-ssd",
resource_preset_id="s2.medium",
),
),
schema_registry=False,
unmanaged_topics=False,
version="2.8",
zones=[
"ru-central1-a",
"ru-central1-b",
"ru-central1-c",
],
zookeeper=yandex.MdbKafkaClusterConfigZookeeperArgs(
resources=yandex.MdbKafkaClusterConfigZookeeperResourcesArgs(
disk_size=20,
disk_type_id="network-ssd",
resource_preset_id="s2.micro",
),
),
),
environment="PRESTABLE",
network_id=foo_vpc_network.id,
subnet_ids=[
foo_vpc_subnet.id,
bar.id,
baz.id,
],
users=[
yandex.MdbKafkaClusterUserArgs(
name="producer-application",
password="password",
permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
role="ACCESS_ROLE_PRODUCER",
topic_name="input",
)],
),
yandex.MdbKafkaClusterUserArgs(
name="worker",
password="password",
permissions=[
yandex.MdbKafkaClusterUserPermissionArgs(
role="ACCESS_ROLE_CONSUMER",
topic_name="input",
),
yandex.MdbKafkaClusterUserPermissionArgs(
role="ACCESS_ROLE_PRODUCER",
topic_name="output",
),
],
),
])
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.1.0.0/24"],
zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.2.0.0/24"],
zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
networkId: fooVpcNetwork.id,
v4CidrBlocks: ["10.3.0.0/24"],
zone: "ru-central1-c",
});
const fooMdbKafkaCluster = new yandex.MdbKafkaCluster("foo", {
config: {
assignPublicIp: true,
brokersCount: 2,
kafka: {
kafkaConfig: {
compressionType: "COMPRESSION_TYPE_ZSTD",
defaultReplicationFactor: "6",
logFlushIntervalMessages: "1024",
logFlushIntervalMs: "1000",
logFlushSchedulerIntervalMs: "1000",
logPreallocate: true,
logRetentionBytes: "1.073741824e+09",
logRetentionHours: "168",
logRetentionMinutes: "10080",
logRetentionMs: "8.64e+07",
logSegmentBytes: "1.34217728e+08",
numPartitions: "10",
},
resources: {
diskSize: 128,
diskTypeId: "network-ssd",
resourcePresetId: "s2.medium",
},
},
schemaRegistry: false,
unmanagedTopics: false,
version: "2.8",
zones: [
"ru-central1-a",
"ru-central1-b",
"ru-central1-c",
],
zookeeper: {
resources: {
diskSize: 20,
diskTypeId: "network-ssd",
resourcePresetId: "s2.micro",
},
},
},
environment: "PRESTABLE",
networkId: fooVpcNetwork.id,
subnetIds: [
fooVpcSubnet.id,
bar.id,
baz.id,
],
users: [
{
name: "producer-application",
password: "password",
permissions: [{
role: "ACCESS_ROLE_PRODUCER",
topicName: "input",
}],
},
{
name: "worker",
password: "password",
permissions: [
{
role: "ACCESS_ROLE_CONSUMER",
topicName: "input",
},
{
role: "ACCESS_ROLE_PRODUCER",
topicName: "output",
},
],
},
],
});
Coming soon!
Create MdbKafkaCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MdbKafkaCluster(name: string, args: MdbKafkaClusterArgs, opts?: CustomResourceOptions);
@overload
def MdbKafkaCluster(resource_name: str,
args: MdbKafkaClusterArgs,
opts: Optional[ResourceOptions] = None)
@overload
def MdbKafkaCluster(resource_name: str,
opts: Optional[ResourceOptions] = None,
config: Optional[MdbKafkaClusterConfigArgs] = None,
network_id: Optional[str] = None,
environment: Optional[str] = None,
description: Optional[str] = None,
folder_id: Optional[str] = None,
host_group_ids: Optional[Sequence[str]] = None,
labels: Optional[Mapping[str, str]] = None,
maintenance_window: Optional[MdbKafkaClusterMaintenanceWindowArgs] = None,
name: Optional[str] = None,
deletion_protection: Optional[bool] = None,
security_group_ids: Optional[Sequence[str]] = None,
subnet_ids: Optional[Sequence[str]] = None,
topics: Optional[Sequence[MdbKafkaClusterTopicArgs]] = None,
users: Optional[Sequence[MdbKafkaClusterUserArgs]] = None)
func NewMdbKafkaCluster(ctx *Context, name string, args MdbKafkaClusterArgs, opts ...ResourceOption) (*MdbKafkaCluster, error)
public MdbKafkaCluster(string name, MdbKafkaClusterArgs args, CustomResourceOptions? opts = null)
public MdbKafkaCluster(String name, MdbKafkaClusterArgs args)
public MdbKafkaCluster(String name, MdbKafkaClusterArgs args, CustomResourceOptions options)
type: yandex:MdbKafkaCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mdbKafkaClusterResource = new Yandex.MdbKafkaCluster("mdbKafkaClusterResource", new()
{
Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
{
Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
{
Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
{
DiskSize = 0,
DiskTypeId = "string",
ResourcePresetId = "string",
},
KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
{
AutoCreateTopicsEnable = false,
CompressionType = "string",
DefaultReplicationFactor = "string",
LogFlushIntervalMessages = "string",
LogFlushIntervalMs = "string",
LogFlushSchedulerIntervalMs = "string",
LogPreallocate = false,
LogRetentionBytes = "string",
LogRetentionHours = "string",
LogRetentionMinutes = "string",
LogRetentionMs = "string",
LogSegmentBytes = "string",
NumPartitions = "string",
SocketReceiveBufferBytes = "string",
SocketSendBufferBytes = "string",
},
},
Version = "string",
Zones = new[]
{
"string",
},
AssignPublicIp = false,
BrokersCount = 0,
SchemaRegistry = false,
UnmanagedTopics = false,
Zookeeper = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperArgs
{
Resources = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperResourcesArgs
{
DiskSize = 0,
DiskTypeId = "string",
ResourcePresetId = "string",
},
},
},
NetworkId = "string",
Environment = "string",
Description = "string",
FolderId = "string",
HostGroupIds = new[]
{
"string",
},
Labels =
{
{ "string", "string" },
},
MaintenanceWindow = new Yandex.Inputs.MdbKafkaClusterMaintenanceWindowArgs
{
Type = "string",
Day = "string",
Hour = 0,
},
Name = "string",
DeletionProtection = false,
SecurityGroupIds = new[]
{
"string",
},
SubnetIds = new[]
{
"string",
},
Users = new[]
{
new Yandex.Inputs.MdbKafkaClusterUserArgs
{
Name = "string",
Password = "string",
Permissions = new[]
{
new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
{
Role = "string",
TopicName = "string",
},
},
},
},
});
example, err := yandex.NewMdbKafkaCluster(ctx, "mdbKafkaClusterResource", &yandex.MdbKafkaClusterArgs{
Config: &yandex.MdbKafkaClusterConfigArgs{
Kafka: &yandex.MdbKafkaClusterConfigKafkaArgs{
Resources: &yandex.MdbKafkaClusterConfigKafkaResourcesArgs{
DiskSize: pulumi.Int(0),
DiskTypeId: pulumi.String("string"),
ResourcePresetId: pulumi.String("string"),
},
KafkaConfig: &yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs{
AutoCreateTopicsEnable: pulumi.Bool(false),
CompressionType: pulumi.String("string"),
DefaultReplicationFactor: pulumi.String("string"),
LogFlushIntervalMessages: pulumi.String("string"),
LogFlushIntervalMs: pulumi.String("string"),
LogFlushSchedulerIntervalMs: pulumi.String("string"),
LogPreallocate: pulumi.Bool(false),
LogRetentionBytes: pulumi.String("string"),
LogRetentionHours: pulumi.String("string"),
LogRetentionMinutes: pulumi.String("string"),
LogRetentionMs: pulumi.String("string"),
LogSegmentBytes: pulumi.String("string"),
NumPartitions: pulumi.String("string"),
SocketReceiveBufferBytes: pulumi.String("string"),
SocketSendBufferBytes: pulumi.String("string"),
},
},
Version: pulumi.String("string"),
Zones: pulumi.StringArray{
pulumi.String("string"),
},
AssignPublicIp: pulumi.Bool(false),
BrokersCount: pulumi.Int(0),
SchemaRegistry: pulumi.Bool(false),
UnmanagedTopics: pulumi.Bool(false),
Zookeeper: &yandex.MdbKafkaClusterConfigZookeeperArgs{
Resources: &yandex.MdbKafkaClusterConfigZookeeperResourcesArgs{
DiskSize: pulumi.Int(0),
DiskTypeId: pulumi.String("string"),
ResourcePresetId: pulumi.String("string"),
},
},
},
NetworkId: pulumi.String("string"),
Environment: pulumi.String("string"),
Description: pulumi.String("string"),
FolderId: pulumi.String("string"),
HostGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
MaintenanceWindow: &yandex.MdbKafkaClusterMaintenanceWindowArgs{
Type: pulumi.String("string"),
Day: pulumi.String("string"),
Hour: pulumi.Int(0),
},
Name: pulumi.String("string"),
DeletionProtection: pulumi.Bool(false),
SecurityGroupIds: pulumi.StringArray{
pulumi.String("string"),
},
SubnetIds: pulumi.StringArray{
pulumi.String("string"),
},
Users: yandex.MdbKafkaClusterUserArray{
&yandex.MdbKafkaClusterUserArgs{
Name: pulumi.String("string"),
Password: pulumi.String("string"),
Permissions: yandex.MdbKafkaClusterUserPermissionArray{
&yandex.MdbKafkaClusterUserPermissionArgs{
Role: pulumi.String("string"),
TopicName: pulumi.String("string"),
},
},
},
},
})
var mdbKafkaClusterResource = new MdbKafkaCluster("mdbKafkaClusterResource", MdbKafkaClusterArgs.builder()
.config(MdbKafkaClusterConfigArgs.builder()
.kafka(MdbKafkaClusterConfigKafkaArgs.builder()
.resources(MdbKafkaClusterConfigKafkaResourcesArgs.builder()
.diskSize(0)
.diskTypeId("string")
.resourcePresetId("string")
.build())
.kafkaConfig(MdbKafkaClusterConfigKafkaKafkaConfigArgs.builder()
.autoCreateTopicsEnable(false)
.compressionType("string")
.defaultReplicationFactor("string")
.logFlushIntervalMessages("string")
.logFlushIntervalMs("string")
.logFlushSchedulerIntervalMs("string")
.logPreallocate(false)
.logRetentionBytes("string")
.logRetentionHours("string")
.logRetentionMinutes("string")
.logRetentionMs("string")
.logSegmentBytes("string")
.numPartitions("string")
.socketReceiveBufferBytes("string")
.socketSendBufferBytes("string")
.build())
.build())
.version("string")
.zones("string")
.assignPublicIp(false)
.brokersCount(0)
.schemaRegistry(false)
.unmanagedTopics(false)
.zookeeper(MdbKafkaClusterConfigZookeeperArgs.builder()
.resources(MdbKafkaClusterConfigZookeeperResourcesArgs.builder()
.diskSize(0)
.diskTypeId("string")
.resourcePresetId("string")
.build())
.build())
.build())
.networkId("string")
.environment("string")
.description("string")
.folderId("string")
.hostGroupIds("string")
.labels(Map.of("string", "string"))
.maintenanceWindow(MdbKafkaClusterMaintenanceWindowArgs.builder()
.type("string")
.day("string")
.hour(0)
.build())
.name("string")
.deletionProtection(false)
.securityGroupIds("string")
.subnetIds("string")
.users(MdbKafkaClusterUserArgs.builder()
.name("string")
.password("string")
.permissions(MdbKafkaClusterUserPermissionArgs.builder()
.role("string")
.topicName("string")
.build())
.build())
.build());
mdb_kafka_cluster_resource = yandex.MdbKafkaCluster("mdbKafkaClusterResource",
config=yandex.MdbKafkaClusterConfigArgs(
kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
disk_size=0,
disk_type_id="string",
resource_preset_id="string",
),
kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
auto_create_topics_enable=False,
compression_type="string",
default_replication_factor="string",
log_flush_interval_messages="string",
log_flush_interval_ms="string",
log_flush_scheduler_interval_ms="string",
log_preallocate=False,
log_retention_bytes="string",
log_retention_hours="string",
log_retention_minutes="string",
log_retention_ms="string",
log_segment_bytes="string",
num_partitions="string",
socket_receive_buffer_bytes="string",
socket_send_buffer_bytes="string",
),
),
version="string",
zones=["string"],
assign_public_ip=False,
brokers_count=0,
schema_registry=False,
unmanaged_topics=False,
zookeeper=yandex.MdbKafkaClusterConfigZookeeperArgs(
resources=yandex.MdbKafkaClusterConfigZookeeperResourcesArgs(
disk_size=0,
disk_type_id="string",
resource_preset_id="string",
),
),
),
network_id="string",
environment="string",
description="string",
folder_id="string",
host_group_ids=["string"],
labels={
"string": "string",
},
maintenance_window=yandex.MdbKafkaClusterMaintenanceWindowArgs(
type="string",
day="string",
hour=0,
),
name="string",
deletion_protection=False,
security_group_ids=["string"],
subnet_ids=["string"],
users=[yandex.MdbKafkaClusterUserArgs(
name="string",
password="string",
permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
role="string",
topic_name="string",
)],
)])
const mdbKafkaClusterResource = new yandex.MdbKafkaCluster("mdbKafkaClusterResource", {
config: {
kafka: {
resources: {
diskSize: 0,
diskTypeId: "string",
resourcePresetId: "string",
},
kafkaConfig: {
autoCreateTopicsEnable: false,
compressionType: "string",
defaultReplicationFactor: "string",
logFlushIntervalMessages: "string",
logFlushIntervalMs: "string",
logFlushSchedulerIntervalMs: "string",
logPreallocate: false,
logRetentionBytes: "string",
logRetentionHours: "string",
logRetentionMinutes: "string",
logRetentionMs: "string",
logSegmentBytes: "string",
numPartitions: "string",
socketReceiveBufferBytes: "string",
socketSendBufferBytes: "string",
},
},
version: "string",
zones: ["string"],
assignPublicIp: false,
brokersCount: 0,
schemaRegistry: false,
unmanagedTopics: false,
zookeeper: {
resources: {
diskSize: 0,
diskTypeId: "string",
resourcePresetId: "string",
},
},
},
networkId: "string",
environment: "string",
description: "string",
folderId: "string",
hostGroupIds: ["string"],
labels: {
string: "string",
},
maintenanceWindow: {
type: "string",
day: "string",
hour: 0,
},
name: "string",
deletionProtection: false,
securityGroupIds: ["string"],
subnetIds: ["string"],
users: [{
name: "string",
password: "string",
permissions: [{
role: "string",
topicName: "string",
}],
}],
});
type: yandex:MdbKafkaCluster
properties:
config:
assignPublicIp: false
brokersCount: 0
kafka:
kafkaConfig:
autoCreateTopicsEnable: false
compressionType: string
defaultReplicationFactor: string
logFlushIntervalMessages: string
logFlushIntervalMs: string
logFlushSchedulerIntervalMs: string
logPreallocate: false
logRetentionBytes: string
logRetentionHours: string
logRetentionMinutes: string
logRetentionMs: string
logSegmentBytes: string
numPartitions: string
socketReceiveBufferBytes: string
socketSendBufferBytes: string
resources:
diskSize: 0
diskTypeId: string
resourcePresetId: string
schemaRegistry: false
unmanagedTopics: false
version: string
zones:
- string
zookeeper:
resources:
diskSize: 0
diskTypeId: string
resourcePresetId: string
deletionProtection: false
description: string
environment: string
folderId: string
hostGroupIds:
- string
labels:
string: string
maintenanceWindow:
day: string
hour: 0
type: string
name: string
networkId: string
securityGroupIds:
- string
subnetIds:
- string
users:
- name: string
password: string
permissions:
- role: string
topicName: string
MdbKafkaCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The MdbKafkaCluster resource accepts the following input properties:
- Config
Mdb
Kafka Cluster Config - Configuration of the Kafka cluster. The structure is documented below.
- Network
Id string - ID of the network, to which the Kafka cluster belongs.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Host
Group List<string>Ids - A list of IDs of the host groups to place VMs of the cluster on.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the Kafka cluster.
- Maintenance
Window MdbKafka Cluster Maintenance Window - Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- Security
Group List<string>Ids - Security group ids, to which the Kafka cluster belongs.
- Subnet
Ids List<string> - IDs of the subnets, to which the Kafka cluster belongs.
- Topics
List<Mdb
Kafka Cluster Topic> - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - Users
List<Mdb
Kafka Cluster User> - A user of the Kafka cluster. The structure is documented below.
- Config
Mdb
Kafka Cluster Config Args - Configuration of the Kafka cluster. The structure is documented below.
- Network
Id string - ID of the network, to which the Kafka cluster belongs.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Host
Group []stringIds - A list of IDs of the host groups to place VMs of the cluster on.
- Labels map[string]string
- A set of key/value label pairs to assign to the Kafka cluster.
- Maintenance
Window MdbKafka Cluster Maintenance Window Args - Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- Security
Group []stringIds - Security group ids, to which the Kafka cluster belongs.
- Subnet
Ids []string - IDs of the subnets, to which the Kafka cluster belongs.
- Topics
[]Mdb
Kafka Cluster Topic Args - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - Users
[]Mdb
Kafka Cluster User Args - A user of the Kafka cluster. The structure is documented below.
- config
Mdb
Kafka Cluster Config - Configuration of the Kafka cluster. The structure is documented below.
- network
Id String - ID of the network, to which the Kafka cluster belongs.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- host
Group List<String>Ids - A list of IDs of the host groups to place VMs of the cluster on.
- labels Map<String,String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance
Window MdbKafka Cluster Maintenance Window - Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- security
Group List<String>Ids - Security group ids, to which the Kafka cluster belongs.
- subnet
Ids List<String> - IDs of the subnets, to which the Kafka cluster belongs.
- topics
List<Mdb
Kafka Cluster Topic> - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users
List<Mdb
Kafka Cluster User> - A user of the Kafka cluster. The structure is documented below.
- config
Mdb
Kafka Cluster Config - Configuration of the Kafka cluster. The structure is documented below.
- network
Id string - ID of the network, to which the Kafka cluster belongs.
- deletion
Protection boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description string
- Description of the Kafka cluster.
- environment string
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- host
Group string[]Ids - A list of IDs of the host groups to place VMs of the cluster on.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance
Window MdbKafka Cluster Maintenance Window - Maintenance policy of the Kafka cluster. The structure is documented below.
- name string
- The name of the topic.
- security
Group string[]Ids - Security group ids, to which the Kafka cluster belongs.
- subnet
Ids string[] - IDs of the subnets, to which the Kafka cluster belongs.
- topics
Mdb
Kafka Cluster Topic[] - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users
Mdb
Kafka Cluster User[] - A user of the Kafka cluster. The structure is documented below.
- config
Mdb
Kafka Cluster Config Args - Configuration of the Kafka cluster. The structure is documented below.
- network_
id str - ID of the network, to which the Kafka cluster belongs.
- deletion_
protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description str
- Description of the Kafka cluster.
- environment str
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder_
id str - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- host_
group_ Sequence[str]ids - A list of IDs of the host groups to place VMs of the cluster on.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance_
window MdbKafka Cluster Maintenance Window Args - Maintenance policy of the Kafka cluster. The structure is documented below.
- name str
- The name of the topic.
- security_
group_ Sequence[str]ids - Security group ids, to which the Kafka cluster belongs.
- subnet_
ids Sequence[str] - IDs of the subnets, to which the Kafka cluster belongs.
- topics
Sequence[Mdb
Kafka Cluster Topic Args] - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users
Sequence[Mdb
Kafka Cluster User Args] - A user of the Kafka cluster. The structure is documented below.
- config Property Map
- Configuration of the Kafka cluster. The structure is documented below.
- network
Id String - ID of the network, to which the Kafka cluster belongs.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- host
Group List<String>Ids - A list of IDs of the host groups to place VMs of the cluster on.
- labels Map<String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance
Window Property Map - Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- security
Group List<String>Ids - Security group ids, to which the Kafka cluster belongs.
- subnet
Ids List<String> - IDs of the subnets, to which the Kafka cluster belongs.
- topics List<Property Map>
- To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users List<Property Map>
- A user of the Kafka cluster. The structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the MdbKafkaCluster resource produces the following output properties:
- Created
At string - Timestamp of cluster creation.
- Health string
- Health of the host.
- Hosts
List<Mdb
Kafka Cluster Host> - A host of the Kafka cluster. The structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- Created
At string - Timestamp of cluster creation.
- Health string
- Health of the host.
- Hosts
[]Mdb
Kafka Cluster Host - A host of the Kafka cluster. The structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created
At String - Timestamp of cluster creation.
- health String
- Health of the host.
- hosts
List<Mdb
Kafka Cluster Host> - A host of the Kafka cluster. The structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created
At string - Timestamp of cluster creation.
- health string
- Health of the host.
- hosts
Mdb
Kafka Cluster Host[] - A host of the Kafka cluster. The structure is documented below.
- id string
- The provider-assigned unique ID for this managed resource.
- status string
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created_
at str - Timestamp of cluster creation.
- health str
- Health of the host.
- hosts
Sequence[Mdb
Kafka Cluster Host] - A host of the Kafka cluster. The structure is documented below.
- id str
- The provider-assigned unique ID for this managed resource.
- status str
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
- created
At String - Timestamp of cluster creation.
- health String
- Health of the host.
- hosts List<Property Map>
- A host of the Kafka cluster. The structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation.
Look up Existing MdbKafkaCluster Resource
Get an existing MdbKafkaCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MdbKafkaClusterState, opts?: CustomResourceOptions): MdbKafkaCluster
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
config: Optional[MdbKafkaClusterConfigArgs] = None,
created_at: Optional[str] = None,
deletion_protection: Optional[bool] = None,
description: Optional[str] = None,
environment: Optional[str] = None,
folder_id: Optional[str] = None,
health: Optional[str] = None,
host_group_ids: Optional[Sequence[str]] = None,
hosts: Optional[Sequence[MdbKafkaClusterHostArgs]] = None,
labels: Optional[Mapping[str, str]] = None,
maintenance_window: Optional[MdbKafkaClusterMaintenanceWindowArgs] = None,
name: Optional[str] = None,
network_id: Optional[str] = None,
security_group_ids: Optional[Sequence[str]] = None,
status: Optional[str] = None,
subnet_ids: Optional[Sequence[str]] = None,
topics: Optional[Sequence[MdbKafkaClusterTopicArgs]] = None,
users: Optional[Sequence[MdbKafkaClusterUserArgs]] = None) -> MdbKafkaCluster
func GetMdbKafkaCluster(ctx *Context, name string, id IDInput, state *MdbKafkaClusterState, opts ...ResourceOption) (*MdbKafkaCluster, error)
public static MdbKafkaCluster Get(string name, Input<string> id, MdbKafkaClusterState? state, CustomResourceOptions? opts = null)
public static MdbKafkaCluster get(String name, Output<String> id, MdbKafkaClusterState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Config
Mdb
Kafka Cluster Config - Configuration of the Kafka cluster. The structure is documented below.
- Created
At string - Timestamp of cluster creation.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Health string
- Health of the host.
- Host
Group List<string>Ids - A list of IDs of the host groups to place VMs of the cluster on.
- Hosts
List<Mdb
Kafka Cluster Host> - A host of the Kafka cluster. The structure is documented below.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the Kafka cluster.
- Maintenance
Window MdbKafka Cluster Maintenance Window - Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- Network
Id string - ID of the network, to which the Kafka cluster belongs.
- Security
Group List<string>Ids - Security group ids, to which the Kafka cluster belongs.
- Status string
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - Subnet
Ids List<string> - IDs of the subnets, to which the Kafka cluster belongs.
- Topics
List<Mdb
Kafka Cluster Topic> - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - Users
List<Mdb
Kafka Cluster User> - A user of the Kafka cluster. The structure is documented below.
- Config
Mdb
Kafka Cluster Config Args - Configuration of the Kafka cluster. The structure is documented below.
- Created
At string - Timestamp of cluster creation.
- Deletion
Protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - Folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Health string
- Health of the host.
- Host
Group []stringIds - A list of IDs of the host groups to place VMs of the cluster on.
- Hosts
[]Mdb
Kafka Cluster Host Args - A host of the Kafka cluster. The structure is documented below.
- Labels map[string]string
- A set of key/value label pairs to assign to the Kafka cluster.
- Maintenance
Window MdbKafka Cluster Maintenance Window Args - Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- Network
Id string - ID of the network, to which the Kafka cluster belongs.
- Security
Group []stringIds - Security group ids, to which the Kafka cluster belongs.
- Status string
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - Subnet
Ids []string - IDs of the subnets, to which the Kafka cluster belongs.
- Topics
[]Mdb
Kafka Cluster Topic Args - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - Users
[]Mdb
Kafka Cluster User Args - A user of the Kafka cluster. The structure is documented below.
- config
Mdb
Kafka Cluster Config - Configuration of the Kafka cluster. The structure is documented below.
- created
At String - Timestamp of cluster creation.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health String
- Health of the host.
- host
Group List<String>Ids - A list of IDs of the host groups to place VMs of the cluster on.
- hosts
List<Mdb
Kafka Cluster Host> - A host of the Kafka cluster. The structure is documented below.
- labels Map<String,String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance
Window MdbKafka Cluster Maintenance Window - Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- network
Id String - ID of the network, to which the Kafka cluster belongs.
- security
Group List<String>Ids - Security group ids, to which the Kafka cluster belongs.
- status String
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - subnet
Ids List<String> - IDs of the subnets, to which the Kafka cluster belongs.
- topics
List<Mdb
Kafka Cluster Topic> - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users
List<Mdb
Kafka Cluster User> - A user of the Kafka cluster. The structure is documented below.
- config
Mdb
Kafka Cluster Config - Configuration of the Kafka cluster. The structure is documented below.
- created
At string - Timestamp of cluster creation.
- deletion
Protection boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description string
- Description of the Kafka cluster.
- environment string
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder
Id string - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health string
- Health of the host.
- host
Group string[]Ids - A list of IDs of the host groups to place VMs of the cluster on.
- hosts
Mdb
Kafka Cluster Host[] - A host of the Kafka cluster. The structure is documented below.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance
Window MdbKafka Cluster Maintenance Window - Maintenance policy of the Kafka cluster. The structure is documented below.
- name string
- The name of the topic.
- network
Id string - ID of the network, to which the Kafka cluster belongs.
- security
Group string[]Ids - Security group ids, to which the Kafka cluster belongs.
- status string
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - subnet
Ids string[] - IDs of the subnets, to which the Kafka cluster belongs.
- topics
Mdb
Kafka Cluster Topic[] - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users
Mdb
Kafka Cluster User[] - A user of the Kafka cluster. The structure is documented below.
- config
Mdb
Kafka Cluster Config Args - Configuration of the Kafka cluster. The structure is documented below.
- created_
at str - Timestamp of cluster creation.
- deletion_
protection bool - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description str
- Description of the Kafka cluster.
- environment str
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder_
id str - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health str
- Health of the host.
- host_
group_ Sequence[str]ids - A list of IDs of the host groups to place VMs of the cluster on.
- hosts
Sequence[Mdb
Kafka Cluster Host Args] - A host of the Kafka cluster. The structure is documented below.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance_
window MdbKafka Cluster Maintenance Window Args - Maintenance policy of the Kafka cluster. The structure is documented below.
- name str
- The name of the topic.
- network_
id str - ID of the network, to which the Kafka cluster belongs.
- security_
group_ Sequence[str]ids - Security group ids, to which the Kafka cluster belongs.
- status str
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - subnet_
ids Sequence[str] - IDs of the subnets, to which the Kafka cluster belongs.
- topics
Sequence[Mdb
Kafka Cluster Topic Args] - To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users
Sequence[Mdb
Kafka Cluster User Args] - A user of the Kafka cluster. The structure is documented below.
- config Property Map
- Configuration of the Kafka cluster. The structure is documented below.
- created
At String - Timestamp of cluster creation.
- deletion
Protection Boolean - Inhibits deletion of the cluster. Can be either
true
orfalse
. - description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either
PRESTABLE
orPRODUCTION
. The default isPRODUCTION
. - folder
Id String - The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health String
- Health of the host.
- host
Group List<String>Ids - A list of IDs of the host groups to place VMs of the cluster on.
- hosts List<Property Map>
- A host of the Kafka cluster. The structure is documented below.
- labels Map<String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance
Window Property Map - Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- network
Id String - ID of the network, to which the Kafka cluster belongs.
- security
Group List<String>Ids - Security group ids, to which the Kafka cluster belongs.
- status String
- Status of the cluster. Can be either
CREATING
,STARTING
,RUNNING
,UPDATING
,STOPPING
,STOPPED
,ERROR
orSTATUS_UNKNOWN
. For more information seestatus
field of JSON representation in the official documentation. - subnet
Ids List<String> - IDs of the subnets, to which the Kafka cluster belongs.
- topics List<Property Map>
- To manage topics, please switch to using a separate resource type
yandex.MdbKafkaTopic
. - users List<Property Map>
- A user of the Kafka cluster. The structure is documented below.
Supporting Types
MdbKafkaClusterConfig, MdbKafkaClusterConfigArgs
- Kafka
Mdb
Kafka Cluster Config Kafka - Configuration of the Kafka subcluster. The structure is documented below.
- Version string
- Version of the Kafka server software.
- Zones List<string>
- List of availability zones.
- Assign
Public boolIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - Brokers
Count int - Count of brokers per availability zone. The default is
1
. - Schema
Registry bool - Enables managed schema registry on cluster. The default is
false
. - Unmanaged
Topics bool - Allows to use Kafka AdminAPI to manage topics. The default is
false
. - Zookeeper
Mdb
Kafka Cluster Config Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- Kafka
Mdb
Kafka Cluster Config Kafka - Configuration of the Kafka subcluster. The structure is documented below.
- Version string
- Version of the Kafka server software.
- Zones []string
- List of availability zones.
- Assign
Public boolIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - Brokers
Count int - Count of brokers per availability zone. The default is
1
. - Schema
Registry bool - Enables managed schema registry on cluster. The default is
false
. - Unmanaged
Topics bool - Allows to use Kafka AdminAPI to manage topics. The default is
false
. - Zookeeper
Mdb
Kafka Cluster Config Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka
Mdb
Kafka Cluster Config Kafka - Configuration of the Kafka subcluster. The structure is documented below.
- version String
- Version of the Kafka server software.
- zones List<String>
- List of availability zones.
- assign
Public BooleanIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - brokers
Count Integer - Count of brokers per availability zone. The default is
1
. - schema
Registry Boolean - Enables managed schema registry on cluster. The default is
false
. - unmanaged
Topics Boolean - Allows to use Kafka AdminAPI to manage topics. The default is
false
. - zookeeper
Mdb
Kafka Cluster Config Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka
Mdb
Kafka Cluster Config Kafka - Configuration of the Kafka subcluster. The structure is documented below.
- version string
- Version of the Kafka server software.
- zones string[]
- List of availability zones.
- assign
Public booleanIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - brokers
Count number - Count of brokers per availability zone. The default is
1
. - schema
Registry boolean - Enables managed schema registry on cluster. The default is
false
. - unmanaged
Topics boolean - Allows to use Kafka AdminAPI to manage topics. The default is
false
. - zookeeper
Mdb
Kafka Cluster Config Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka
Mdb
Kafka Cluster Config Kafka - Configuration of the Kafka subcluster. The structure is documented below.
- version str
- Version of the Kafka server software.
- zones Sequence[str]
- List of availability zones.
- assign_
public_ boolip - Determines whether each broker will be assigned a public IP address. The default is
false
. - brokers_
count int - Count of brokers per availability zone. The default is
1
. - schema_
registry bool - Enables managed schema registry on cluster. The default is
false
. - unmanaged_
topics bool - Allows to use Kafka AdminAPI to manage topics. The default is
false
. - zookeeper
Mdb
Kafka Cluster Config Zookeeper - Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka Property Map
- Configuration of the Kafka subcluster. The structure is documented below.
- version String
- Version of the Kafka server software.
- zones List<String>
- List of availability zones.
- assign
Public BooleanIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - brokers
Count Number - Count of brokers per availability zone. The default is
1
. - schema
Registry Boolean - Enables managed schema registry on cluster. The default is
false
. - unmanaged
Topics Boolean - Allows to use Kafka AdminAPI to manage topics. The default is
false
. - zookeeper Property Map
- Configuration of the ZooKeeper subcluster. The structure is documented below.
MdbKafkaClusterConfigKafka, MdbKafkaClusterConfigKafkaArgs
- Resources
Mdb
Kafka Cluster Config Kafka Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Kafka
Config MdbKafka Cluster Config Kafka Kafka Config - User-defined settings for the Kafka cluster. The structure is documented below.
- Resources
Mdb
Kafka Cluster Config Kafka Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Kafka
Config MdbKafka Cluster Config Kafka Kafka Config - User-defined settings for the Kafka cluster. The structure is documented below.
- resources
Mdb
Kafka Cluster Config Kafka Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafka
Config MdbKafka Cluster Config Kafka Kafka Config - User-defined settings for the Kafka cluster. The structure is documented below.
- resources
Mdb
Kafka Cluster Config Kafka Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafka
Config MdbKafka Cluster Config Kafka Kafka Config - User-defined settings for the Kafka cluster. The structure is documented below.
- resources
Mdb
Kafka Cluster Config Kafka Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafka_
config MdbKafka Cluster Config Kafka Kafka Config - User-defined settings for the Kafka cluster. The structure is documented below.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafka
Config Property Map - User-defined settings for the Kafka cluster. The structure is documented below.
MdbKafkaClusterConfigKafkaKafkaConfig, MdbKafkaClusterConfigKafkaKafkaConfigArgs
- Auto
Create boolTopics Enable - Compression
Type string - Default
Replication stringFactor - Log
Flush stringInterval Messages - Log
Flush stringInterval Ms - Log
Flush stringScheduler Interval Ms - Log
Preallocate bool - Log
Retention stringBytes - Log
Retention stringHours - Log
Retention stringMinutes - Log
Retention stringMs - Log
Segment stringBytes - Num
Partitions string - Socket
Receive stringBuffer Bytes - Socket
Send stringBuffer Bytes
- Auto
Create boolTopics Enable - Compression
Type string - Default
Replication stringFactor - Log
Flush stringInterval Messages - Log
Flush stringInterval Ms - Log
Flush stringScheduler Interval Ms - Log
Preallocate bool - Log
Retention stringBytes - Log
Retention stringHours - Log
Retention stringMinutes - Log
Retention stringMs - Log
Segment stringBytes - Num
Partitions string - Socket
Receive stringBuffer Bytes - Socket
Send stringBuffer Bytes
- auto
Create BooleanTopics Enable - compression
Type String - default
Replication StringFactor - log
Flush StringInterval Messages - log
Flush StringInterval Ms - log
Flush StringScheduler Interval Ms - log
Preallocate Boolean - log
Retention StringBytes - log
Retention StringHours - log
Retention StringMinutes - log
Retention StringMs - log
Segment StringBytes - num
Partitions String - socket
Receive StringBuffer Bytes - socket
Send StringBuffer Bytes
- auto
Create booleanTopics Enable - compression
Type string - default
Replication stringFactor - log
Flush stringInterval Messages - log
Flush stringInterval Ms - log
Flush stringScheduler Interval Ms - log
Preallocate boolean - log
Retention stringBytes - log
Retention stringHours - log
Retention stringMinutes - log
Retention stringMs - log
Segment stringBytes - num
Partitions string - socket
Receive stringBuffer Bytes - socket
Send stringBuffer Bytes
- auto_
create_ booltopics_ enable - compression_
type str - default_
replication_ strfactor - log_
flush_ strinterval_ messages - log_
flush_ strinterval_ ms - log_
flush_ strscheduler_ interval_ ms - log_
preallocate bool - log_
retention_ strbytes - log_
retention_ strhours - log_
retention_ strminutes - log_
retention_ strms - log_
segment_ strbytes - num_
partitions str - socket_
receive_ strbuffer_ bytes - socket_
send_ strbuffer_ bytes
- auto
Create BooleanTopics Enable - compression
Type String - default
Replication StringFactor - log
Flush StringInterval Messages - log
Flush StringInterval Ms - log
Flush StringScheduler Interval Ms - log
Preallocate Boolean - log
Retention StringBytes - log
Retention StringHours - log
Retention StringMinutes - log
Retention StringMs - log
Segment StringBytes - num
Partitions String - socket
Receive StringBuffer Bytes - socket
Send StringBuffer Bytes
MdbKafkaClusterConfigKafkaResources, MdbKafkaClusterConfigKafkaResourcesArgs
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- disk
Size Integer - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
- disk
Size number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset stringId
- disk_
size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_
type_ strid - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_
preset_ strid
- disk
Size Number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
MdbKafkaClusterConfigZookeeper, MdbKafkaClusterConfigZookeeperArgs
- Resources
Mdb
Kafka Cluster Config Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Resources
Mdb
Kafka Cluster Config Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
Mdb
Kafka Cluster Config Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
Mdb
Kafka Cluster Config Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
Mdb
Kafka Cluster Config Zookeeper Resources - Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
MdbKafkaClusterConfigZookeeperResources, MdbKafkaClusterConfigZookeeperResourcesArgs
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- Disk
Size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- Disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- Resource
Preset stringId
- disk
Size Integer - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
- disk
Size number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type stringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset stringId
- disk_
size int - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_
type_ strid - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_
preset_ strid
- disk
Size Number - Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk
Type StringId - Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource
Preset StringId
MdbKafkaClusterHost, MdbKafkaClusterHostArgs
- Assign
Public boolIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - Health string
- Health of the host.
- Name string
- The name of the topic.
- Role string
- The role type to grant to the topic.
- Subnet
Id string - The ID of the subnet, to which the host belongs.
- Zone
Id string - The availability zone where the Kafka host was created.
- Assign
Public boolIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - Health string
- Health of the host.
- Name string
- The name of the topic.
- Role string
- The role type to grant to the topic.
- Subnet
Id string - The ID of the subnet, to which the host belongs.
- Zone
Id string - The availability zone where the Kafka host was created.
- assign
Public BooleanIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - health String
- Health of the host.
- name String
- The name of the topic.
- role String
- The role type to grant to the topic.
- subnet
Id String - The ID of the subnet, to which the host belongs.
- zone
Id String - The availability zone where the Kafka host was created.
- assign
Public booleanIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - health string
- Health of the host.
- name string
- The name of the topic.
- role string
- The role type to grant to the topic.
- subnet
Id string - The ID of the subnet, to which the host belongs.
- zone
Id string - The availability zone where the Kafka host was created.
- assign_
public_ boolip - Determines whether each broker will be assigned a public IP address. The default is
false
. - health str
- Health of the host.
- name str
- The name of the topic.
- role str
- The role type to grant to the topic.
- subnet_
id str - The ID of the subnet, to which the host belongs.
- zone_
id str - The availability zone where the Kafka host was created.
- assign
Public BooleanIp - Determines whether each broker will be assigned a public IP address. The default is
false
. - health String
- Health of the host.
- name String
- The name of the topic.
- role String
- The role type to grant to the topic.
- subnet
Id String - The ID of the subnet, to which the host belongs.
- zone
Id String - The availability zone where the Kafka host was created.
MdbKafkaClusterMaintenanceWindow, MdbKafkaClusterMaintenanceWindowArgs
- Type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - Day string
- Day of the week (in
DDD
format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" - Hour int
- Hour of the day in UTC (in
HH
format). Allowed value is between 1 and 24.
- Type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - Day string
- Day of the week (in
DDD
format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" - Hour int
- Hour of the day in UTC (in
HH
format). Allowed value is between 1 and 24.
- type String
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day String
- Day of the week (in
DDD
format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" - hour Integer
- Hour of the day in UTC (in
HH
format). Allowed value is between 1 and 24.
- type string
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day string
- Day of the week (in
DDD
format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" - hour number
- Hour of the day in UTC (in
HH
format). Allowed value is between 1 and 24.
- type str
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day str
- Day of the week (in
DDD
format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" - hour int
- Hour of the day in UTC (in
HH
format). Allowed value is between 1 and 24.
- type String
- Type of maintenance window. Can be either
ANYTIME
orWEEKLY
. A day and hour of window need to be specified with weekly window. - day String
- Day of the week (in
DDD
format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" - hour Number
- Hour of the day in UTC (in
HH
format). Allowed value is between 1 and 24.
MdbKafkaClusterTopic, MdbKafkaClusterTopicArgs
- Name string
- The name of the topic.
- Partitions int
- The number of the topic's partitions.
- Replication
Factor int - Amount of data copies (replicas) for the topic in the cluster.
- Topic
Config MdbKafka Cluster Topic Topic Config - User-defined settings for the topic. The structure is documented below.
- Name string
- The name of the topic.
- Partitions int
- The number of the topic's partitions.
- Replication
Factor int - Amount of data copies (replicas) for the topic in the cluster.
- Topic
Config MdbKafka Cluster Topic Topic Config - User-defined settings for the topic. The structure is documented below.
- name String
- The name of the topic.
- partitions Integer
- The number of the topic's partitions.
- replication
Factor Integer - Amount of data copies (replicas) for the topic in the cluster.
- topic
Config MdbKafka Cluster Topic Topic Config - User-defined settings for the topic. The structure is documented below.
- name string
- The name of the topic.
- partitions number
- The number of the topic's partitions.
- replication
Factor number - Amount of data copies (replicas) for the topic in the cluster.
- topic
Config MdbKafka Cluster Topic Topic Config - User-defined settings for the topic. The structure is documented below.
- name str
- The name of the topic.
- partitions int
- The number of the topic's partitions.
- replication_
factor int - Amount of data copies (replicas) for the topic in the cluster.
- topic_
config MdbKafka Cluster Topic Topic Config - User-defined settings for the topic. The structure is documented below.
- name String
- The name of the topic.
- partitions Number
- The number of the topic's partitions.
- replication
Factor Number - Amount of data copies (replicas) for the topic in the cluster.
- topic
Config Property Map - User-defined settings for the topic. The structure is documented below.
MdbKafkaClusterTopicTopicConfig, MdbKafkaClusterTopicTopicConfigArgs
- Cleanup
Policy string - Compression
Type string - Delete
Retention stringMs - File
Delete stringDelay Ms - Flush
Messages string - Flush
Ms string - Max
Message stringBytes - Min
Compaction stringLag Ms - Min
Insync stringReplicas - Preallocate bool
- Retention
Bytes string - Retention
Ms string - Segment
Bytes string
- Cleanup
Policy string - Compression
Type string - Delete
Retention stringMs - File
Delete stringDelay Ms - Flush
Messages string - Flush
Ms string - Max
Message stringBytes - Min
Compaction stringLag Ms - Min
Insync stringReplicas - Preallocate bool
- Retention
Bytes string - Retention
Ms string - Segment
Bytes string
- cleanup
Policy String - compression
Type String - delete
Retention StringMs - file
Delete StringDelay Ms - flush
Messages String - flush
Ms String - max
Message StringBytes - min
Compaction StringLag Ms - min
Insync StringReplicas - preallocate Boolean
- retention
Bytes String - retention
Ms String - segment
Bytes String
- cleanup
Policy string - compression
Type string - delete
Retention stringMs - file
Delete stringDelay Ms - flush
Messages string - flush
Ms string - max
Message stringBytes - min
Compaction stringLag Ms - min
Insync stringReplicas - preallocate boolean
- retention
Bytes string - retention
Ms string - segment
Bytes string
- cleanup_
policy str - compression_
type str - delete_
retention_ strms - file_
delete_ strdelay_ ms - flush_
messages str - flush_
ms str - max_
message_ strbytes - min_
compaction_ strlag_ ms - min_
insync_ strreplicas - preallocate bool
- retention_
bytes str - retention_
ms str - segment_
bytes str
- cleanup
Policy String - compression
Type String - delete
Retention StringMs - file
Delete StringDelay Ms - flush
Messages String - flush
Ms String - max
Message StringBytes - min
Compaction StringLag Ms - min
Insync StringReplicas - preallocate Boolean
- retention
Bytes String - retention
Ms String - segment
Bytes String
MdbKafkaClusterUser, MdbKafkaClusterUserArgs
- Name string
- The name of the topic.
- Password string
- The password of the user.
- Permissions
List<Mdb
Kafka Cluster User Permission> - Set of permissions granted to the user. The structure is documented below.
- Name string
- The name of the topic.
- Password string
- The password of the user.
- Permissions
[]Mdb
Kafka Cluster User Permission - Set of permissions granted to the user. The structure is documented below.
- name String
- The name of the topic.
- password String
- The password of the user.
- permissions
List<Mdb
Kafka Cluster User Permission> - Set of permissions granted to the user. The structure is documented below.
- name string
- The name of the topic.
- password string
- The password of the user.
- permissions
Mdb
Kafka Cluster User Permission[] - Set of permissions granted to the user. The structure is documented below.
- name str
- The name of the topic.
- password str
- The password of the user.
- permissions
Sequence[Mdb
Kafka Cluster User Permission] - Set of permissions granted to the user. The structure is documented below.
- name String
- The name of the topic.
- password String
- The password of the user.
- permissions List<Property Map>
- Set of permissions granted to the user. The structure is documented below.
MdbKafkaClusterUserPermission, MdbKafkaClusterUserPermissionArgs
- role str
- The role type to grant to the topic.
- topic_
name str - The name of the topic that the permission grants access to.
Import
A cluster can be imported using the id
of the resource, e.g.
$ pulumi import yandex:index/mdbKafkaCluster:MdbKafkaCluster foo cluster_id
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Yandex pulumi/pulumi-yandex
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
yandex
Terraform Provider.