azure-native.containerservice.AgentPool
Explore with Pulumi AI
Agent Pool. Azure REST API version: 2023-04-01. Prior API version in Azure Native 1.x: 2021-03-01.
Other available API versions: 2019-02-01, 2019-04-01, 2020-06-01, 2021-02-01, 2021-08-01, 2022-04-02-preview, 2023-05-02-preview, 2023-06-01, 2023-06-02-preview, 2023-07-01, 2023-07-02-preview, 2023-08-01, 2023-08-02-preview, 2023-09-01, 2023-09-02-preview, 2023-10-01, 2023-10-02-preview, 2023-11-01, 2023-11-02-preview, 2024-01-01, 2024-01-02-preview, 2024-02-01, 2024-02-02-preview, 2024-03-02-preview, 2024-04-02-preview, 2024-05-01, 2024-05-02-preview, 2024-06-02-preview, 2024-07-01, 2024-07-02-preview, 2024-08-01.
Example Usage
Create Agent Pool using an agent pool snapshot
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
CreationData = new AzureNative.ContainerService.Inputs.CreationDataArgs
{
SourceResourceId = "/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1",
},
EnableFIPS = true,
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
CreationData: &containerservice.CreationDataArgs{
SourceResourceId: pulumi.String("/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1"),
},
EnableFIPS: pulumi.Bool(true),
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import com.pulumi.azurenative.containerservice.inputs.CreationDataArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.creationData(CreationDataArgs.builder()
.sourceResourceId("/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1")
.build())
.enableFIPS(true)
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
creation_data={
"source_resource_id": "/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1",
},
enable_fips=True,
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
creationData: {
sourceResourceId: "/subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1",
},
enableFIPS: true,
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
creationData:
sourceResourceId: /subscriptions/subid1/resourceGroups/rg1/providers/Microsoft.ContainerService/snapshots/snapshot1
enableFIPS: true
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with Dedicated Host Group
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
HostGroupID = "/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1",
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
HostGroupID: pulumi.String("/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1"),
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.hostGroupID("/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1")
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
host_group_id="/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1",
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
hostGroupID: "/subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1",
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
hostGroupID: /subscriptions/subid1/resourcegroups/rg/providers/Microsoft.Compute/hostGroups/hostgroup1
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with EncryptionAtHost enabled
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
EnableEncryptionAtHost = true,
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
EnableEncryptionAtHost: pulumi.Bool(true),
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.enableEncryptionAtHost(true)
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
enable_encryption_at_host=True,
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
enableEncryptionAtHost: true,
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
enableEncryptionAtHost: true
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with Ephemeral OS Disk
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
OrchestratorVersion = "",
OsDiskSizeGB = 64,
OsDiskType = AzureNative.ContainerService.OSDiskType.Ephemeral,
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
OrchestratorVersion: pulumi.String(""),
OsDiskSizeGB: pulumi.Int(64),
OsDiskType: pulumi.String(containerservice.OSDiskTypeEphemeral),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.orchestratorVersion("")
.osDiskSizeGB(64)
.osDiskType("Ephemeral")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
orchestrator_version="",
os_disk_size_gb=64,
os_disk_type=azure_native.containerservice.OSDiskType.EPHEMERAL,
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
orchestratorVersion: "",
osDiskSizeGB: 64,
osDiskType: azure_native.containerservice.OSDiskType.Ephemeral,
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
orchestratorVersion:
osDiskSizeGB: 64
osDiskType: Ephemeral
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with FIPS enabled OS
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
EnableFIPS = true,
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
EnableFIPS: pulumi.Bool(true),
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.enableFIPS(true)
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
enable_fips=True,
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
enableFIPS: true,
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
enableFIPS: true
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with GPUMIG
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
GpuInstanceProfile = AzureNative.ContainerService.GPUInstanceProfile.MIG2g,
KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
{
AllowedUnsafeSysctls = new[]
{
"kernel.msg*",
"net.core.somaxconn",
},
CpuCfsQuota = true,
CpuCfsQuotaPeriod = "200ms",
CpuManagerPolicy = "static",
FailSwapOn = false,
ImageGcHighThreshold = 90,
ImageGcLowThreshold = 70,
TopologyManagerPolicy = "best-effort",
},
LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
{
SwapFileSizeMB = 1500,
Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
{
KernelThreadsMax = 99999,
NetCoreWmemDefault = 12345,
NetIpv4IpLocalPortRange = "20000 60000",
NetIpv4TcpTwReuse = true,
},
TransparentHugePageDefrag = "madvise",
TransparentHugePageEnabled = "always",
},
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_ND96asr_v4",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
GpuInstanceProfile: pulumi.String(containerservice.GPUInstanceProfileMIG2g),
KubeletConfig: &containerservice.KubeletConfigArgs{
AllowedUnsafeSysctls: pulumi.StringArray{
pulumi.String("kernel.msg*"),
pulumi.String("net.core.somaxconn"),
},
CpuCfsQuota: pulumi.Bool(true),
CpuCfsQuotaPeriod: pulumi.String("200ms"),
CpuManagerPolicy: pulumi.String("static"),
FailSwapOn: pulumi.Bool(false),
ImageGcHighThreshold: pulumi.Int(90),
ImageGcLowThreshold: pulumi.Int(70),
TopologyManagerPolicy: pulumi.String("best-effort"),
},
LinuxOSConfig: &containerservice.LinuxOSConfigArgs{
SwapFileSizeMB: pulumi.Int(1500),
Sysctls: &containerservice.SysctlConfigArgs{
KernelThreadsMax: pulumi.Int(99999),
NetCoreWmemDefault: pulumi.Int(12345),
NetIpv4IpLocalPortRange: pulumi.String("20000 60000"),
NetIpv4TcpTwReuse: pulumi.Bool(true),
},
TransparentHugePageDefrag: pulumi.String("madvise"),
TransparentHugePageEnabled: pulumi.String("always"),
},
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_ND96asr_v4"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import com.pulumi.azurenative.containerservice.inputs.KubeletConfigArgs;
import com.pulumi.azurenative.containerservice.inputs.LinuxOSConfigArgs;
import com.pulumi.azurenative.containerservice.inputs.SysctlConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.gpuInstanceProfile("MIG2g")
.kubeletConfig(KubeletConfigArgs.builder()
.allowedUnsafeSysctls(
"kernel.msg*",
"net.core.somaxconn")
.cpuCfsQuota(true)
.cpuCfsQuotaPeriod("200ms")
.cpuManagerPolicy("static")
.failSwapOn(false)
.imageGcHighThreshold(90)
.imageGcLowThreshold(70)
.topologyManagerPolicy("best-effort")
.build())
.linuxOSConfig(LinuxOSConfigArgs.builder()
.swapFileSizeMB(1500)
.sysctls(SysctlConfigArgs.builder()
.kernelThreadsMax(99999)
.netCoreWmemDefault(12345)
.netIpv4IpLocalPortRange("20000 60000")
.netIpv4TcpTwReuse(true)
.build())
.transparentHugePageDefrag("madvise")
.transparentHugePageEnabled("always")
.build())
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_ND96asr_v4")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
gpu_instance_profile=azure_native.containerservice.GPUInstanceProfile.MIG2G,
kubelet_config={
"allowed_unsafe_sysctls": [
"kernel.msg*",
"net.core.somaxconn",
],
"cpu_cfs_quota": True,
"cpu_cfs_quota_period": "200ms",
"cpu_manager_policy": "static",
"fail_swap_on": False,
"image_gc_high_threshold": 90,
"image_gc_low_threshold": 70,
"topology_manager_policy": "best-effort",
},
linux_os_config={
"swap_file_size_mb": 1500,
"sysctls": {
"kernel_threads_max": 99999,
"net_core_wmem_default": 12345,
"net_ipv4_ip_local_port_range": "20000 60000",
"net_ipv4_tcp_tw_reuse": True,
},
"transparent_huge_page_defrag": "madvise",
"transparent_huge_page_enabled": "always",
},
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_ND96asr_v4")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
gpuInstanceProfile: azure_native.containerservice.GPUInstanceProfile.MIG2g,
kubeletConfig: {
allowedUnsafeSysctls: [
"kernel.msg*",
"net.core.somaxconn",
],
cpuCfsQuota: true,
cpuCfsQuotaPeriod: "200ms",
cpuManagerPolicy: "static",
failSwapOn: false,
imageGcHighThreshold: 90,
imageGcLowThreshold: 70,
topologyManagerPolicy: "best-effort",
},
linuxOSConfig: {
swapFileSizeMB: 1500,
sysctls: {
kernelThreadsMax: 99999,
netCoreWmemDefault: 12345,
netIpv4IpLocalPortRange: "20000 60000",
netIpv4TcpTwReuse: true,
},
transparentHugePageDefrag: "madvise",
transparentHugePageEnabled: "always",
},
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_ND96asr_v4",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
gpuInstanceProfile: MIG2g
kubeletConfig:
allowedUnsafeSysctls:
- kernel.msg*
- net.core.somaxconn
cpuCfsQuota: true
cpuCfsQuotaPeriod: 200ms
cpuManagerPolicy: static
failSwapOn: false
imageGcHighThreshold: 90
imageGcLowThreshold: 70
topologyManagerPolicy: best-effort
linuxOSConfig:
swapFileSizeMB: 1500
sysctls:
kernelThreadsMax: 99999
netCoreWmemDefault: 12345
netIpv4IpLocalPortRange: 20000 60000
netIpv4TcpTwReuse: true
transparentHugePageDefrag: madvise
transparentHugePageEnabled: always
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_ND96asr_v4
Create Agent Pool with Krustlet and the WASI runtime
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
Mode = AzureNative.ContainerService.AgentPoolMode.User,
OrchestratorVersion = "",
OsDiskSizeGB = 64,
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
WorkloadRuntime = AzureNative.ContainerService.WorkloadRuntime.WasmWasi,
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
Mode: pulumi.String(containerservice.AgentPoolModeUser),
OrchestratorVersion: pulumi.String(""),
OsDiskSizeGB: pulumi.Int(64),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
WorkloadRuntime: pulumi.String(containerservice.WorkloadRuntimeWasmWasi),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.mode("User")
.orchestratorVersion("")
.osDiskSizeGB(64)
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.workloadRuntime("WasmWasi")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
mode=azure_native.containerservice.AgentPoolMode.USER,
orchestrator_version="",
os_disk_size_gb=64,
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2",
workload_runtime=azure_native.containerservice.WorkloadRuntime.WASM_WASI)
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
mode: azure_native.containerservice.AgentPoolMode.User,
orchestratorVersion: "",
osDiskSizeGB: 64,
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
workloadRuntime: azure_native.containerservice.WorkloadRuntime.WasmWasi,
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
mode: User
orchestratorVersion:
osDiskSizeGB: 64
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
workloadRuntime: WasmWasi
Create Agent Pool with KubeletConfig and LinuxOSConfig
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
{
AllowedUnsafeSysctls = new[]
{
"kernel.msg*",
"net.core.somaxconn",
},
CpuCfsQuota = true,
CpuCfsQuotaPeriod = "200ms",
CpuManagerPolicy = "static",
FailSwapOn = false,
ImageGcHighThreshold = 90,
ImageGcLowThreshold = 70,
TopologyManagerPolicy = "best-effort",
},
LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
{
SwapFileSizeMB = 1500,
Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
{
KernelThreadsMax = 99999,
NetCoreWmemDefault = 12345,
NetIpv4IpLocalPortRange = "20000 60000",
NetIpv4TcpTwReuse = true,
},
TransparentHugePageDefrag = "madvise",
TransparentHugePageEnabled = "always",
},
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
KubeletConfig: &containerservice.KubeletConfigArgs{
AllowedUnsafeSysctls: pulumi.StringArray{
pulumi.String("kernel.msg*"),
pulumi.String("net.core.somaxconn"),
},
CpuCfsQuota: pulumi.Bool(true),
CpuCfsQuotaPeriod: pulumi.String("200ms"),
CpuManagerPolicy: pulumi.String("static"),
FailSwapOn: pulumi.Bool(false),
ImageGcHighThreshold: pulumi.Int(90),
ImageGcLowThreshold: pulumi.Int(70),
TopologyManagerPolicy: pulumi.String("best-effort"),
},
LinuxOSConfig: &containerservice.LinuxOSConfigArgs{
SwapFileSizeMB: pulumi.Int(1500),
Sysctls: &containerservice.SysctlConfigArgs{
KernelThreadsMax: pulumi.Int(99999),
NetCoreWmemDefault: pulumi.Int(12345),
NetIpv4IpLocalPortRange: pulumi.String("20000 60000"),
NetIpv4TcpTwReuse: pulumi.Bool(true),
},
TransparentHugePageDefrag: pulumi.String("madvise"),
TransparentHugePageEnabled: pulumi.String("always"),
},
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import com.pulumi.azurenative.containerservice.inputs.KubeletConfigArgs;
import com.pulumi.azurenative.containerservice.inputs.LinuxOSConfigArgs;
import com.pulumi.azurenative.containerservice.inputs.SysctlConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.kubeletConfig(KubeletConfigArgs.builder()
.allowedUnsafeSysctls(
"kernel.msg*",
"net.core.somaxconn")
.cpuCfsQuota(true)
.cpuCfsQuotaPeriod("200ms")
.cpuManagerPolicy("static")
.failSwapOn(false)
.imageGcHighThreshold(90)
.imageGcLowThreshold(70)
.topologyManagerPolicy("best-effort")
.build())
.linuxOSConfig(LinuxOSConfigArgs.builder()
.swapFileSizeMB(1500)
.sysctls(SysctlConfigArgs.builder()
.kernelThreadsMax(99999)
.netCoreWmemDefault(12345)
.netIpv4IpLocalPortRange("20000 60000")
.netIpv4TcpTwReuse(true)
.build())
.transparentHugePageDefrag("madvise")
.transparentHugePageEnabled("always")
.build())
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
kubelet_config={
"allowed_unsafe_sysctls": [
"kernel.msg*",
"net.core.somaxconn",
],
"cpu_cfs_quota": True,
"cpu_cfs_quota_period": "200ms",
"cpu_manager_policy": "static",
"fail_swap_on": False,
"image_gc_high_threshold": 90,
"image_gc_low_threshold": 70,
"topology_manager_policy": "best-effort",
},
linux_os_config={
"swap_file_size_mb": 1500,
"sysctls": {
"kernel_threads_max": 99999,
"net_core_wmem_default": 12345,
"net_ipv4_ip_local_port_range": "20000 60000",
"net_ipv4_tcp_tw_reuse": True,
},
"transparent_huge_page_defrag": "madvise",
"transparent_huge_page_enabled": "always",
},
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
kubeletConfig: {
allowedUnsafeSysctls: [
"kernel.msg*",
"net.core.somaxconn",
],
cpuCfsQuota: true,
cpuCfsQuotaPeriod: "200ms",
cpuManagerPolicy: "static",
failSwapOn: false,
imageGcHighThreshold: 90,
imageGcLowThreshold: 70,
topologyManagerPolicy: "best-effort",
},
linuxOSConfig: {
swapFileSizeMB: 1500,
sysctls: {
kernelThreadsMax: 99999,
netCoreWmemDefault: 12345,
netIpv4IpLocalPortRange: "20000 60000",
netIpv4TcpTwReuse: true,
},
transparentHugePageDefrag: "madvise",
transparentHugePageEnabled: "always",
},
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
kubeletConfig:
allowedUnsafeSysctls:
- kernel.msg*
- net.core.somaxconn
cpuCfsQuota: true
cpuCfsQuotaPeriod: 200ms
cpuManagerPolicy: static
failSwapOn: false
imageGcHighThreshold: 90
imageGcLowThreshold: 70
topologyManagerPolicy: best-effort
linuxOSConfig:
swapFileSizeMB: 1500
sysctls:
kernelThreadsMax: 99999
netCoreWmemDefault: 12345
netIpv4IpLocalPortRange: 20000 60000
netIpv4TcpTwReuse: true
transparentHugePageDefrag: madvise
transparentHugePageEnabled: always
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with OSSKU
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
{
AllowedUnsafeSysctls = new[]
{
"kernel.msg*",
"net.core.somaxconn",
},
CpuCfsQuota = true,
CpuCfsQuotaPeriod = "200ms",
CpuManagerPolicy = "static",
FailSwapOn = false,
ImageGcHighThreshold = 90,
ImageGcLowThreshold = 70,
TopologyManagerPolicy = "best-effort",
},
LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
{
SwapFileSizeMB = 1500,
Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
{
KernelThreadsMax = 99999,
NetCoreWmemDefault = 12345,
NetIpv4IpLocalPortRange = "20000 60000",
NetIpv4TcpTwReuse = true,
},
TransparentHugePageDefrag = "madvise",
TransparentHugePageEnabled = "always",
},
OrchestratorVersion = "",
OsSKU = AzureNative.ContainerService.OSSKU.AzureLinux,
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
KubeletConfig: &containerservice.KubeletConfigArgs{
AllowedUnsafeSysctls: pulumi.StringArray{
pulumi.String("kernel.msg*"),
pulumi.String("net.core.somaxconn"),
},
CpuCfsQuota: pulumi.Bool(true),
CpuCfsQuotaPeriod: pulumi.String("200ms"),
CpuManagerPolicy: pulumi.String("static"),
FailSwapOn: pulumi.Bool(false),
ImageGcHighThreshold: pulumi.Int(90),
ImageGcLowThreshold: pulumi.Int(70),
TopologyManagerPolicy: pulumi.String("best-effort"),
},
LinuxOSConfig: &containerservice.LinuxOSConfigArgs{
SwapFileSizeMB: pulumi.Int(1500),
Sysctls: &containerservice.SysctlConfigArgs{
KernelThreadsMax: pulumi.Int(99999),
NetCoreWmemDefault: pulumi.Int(12345),
NetIpv4IpLocalPortRange: pulumi.String("20000 60000"),
NetIpv4TcpTwReuse: pulumi.Bool(true),
},
TransparentHugePageDefrag: pulumi.String("madvise"),
TransparentHugePageEnabled: pulumi.String("always"),
},
OrchestratorVersion: pulumi.String(""),
OsSKU: pulumi.String(containerservice.OSSKUAzureLinux),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import com.pulumi.azurenative.containerservice.inputs.KubeletConfigArgs;
import com.pulumi.azurenative.containerservice.inputs.LinuxOSConfigArgs;
import com.pulumi.azurenative.containerservice.inputs.SysctlConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.kubeletConfig(KubeletConfigArgs.builder()
.allowedUnsafeSysctls(
"kernel.msg*",
"net.core.somaxconn")
.cpuCfsQuota(true)
.cpuCfsQuotaPeriod("200ms")
.cpuManagerPolicy("static")
.failSwapOn(false)
.imageGcHighThreshold(90)
.imageGcLowThreshold(70)
.topologyManagerPolicy("best-effort")
.build())
.linuxOSConfig(LinuxOSConfigArgs.builder()
.swapFileSizeMB(1500)
.sysctls(SysctlConfigArgs.builder()
.kernelThreadsMax(99999)
.netCoreWmemDefault(12345)
.netIpv4IpLocalPortRange("20000 60000")
.netIpv4TcpTwReuse(true)
.build())
.transparentHugePageDefrag("madvise")
.transparentHugePageEnabled("always")
.build())
.orchestratorVersion("")
.osSKU("AzureLinux")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
kubelet_config={
"allowed_unsafe_sysctls": [
"kernel.msg*",
"net.core.somaxconn",
],
"cpu_cfs_quota": True,
"cpu_cfs_quota_period": "200ms",
"cpu_manager_policy": "static",
"fail_swap_on": False,
"image_gc_high_threshold": 90,
"image_gc_low_threshold": 70,
"topology_manager_policy": "best-effort",
},
linux_os_config={
"swap_file_size_mb": 1500,
"sysctls": {
"kernel_threads_max": 99999,
"net_core_wmem_default": 12345,
"net_ipv4_ip_local_port_range": "20000 60000",
"net_ipv4_tcp_tw_reuse": True,
},
"transparent_huge_page_defrag": "madvise",
"transparent_huge_page_enabled": "always",
},
orchestrator_version="",
os_sku=azure_native.containerservice.OSSKU.AZURE_LINUX,
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
kubeletConfig: {
allowedUnsafeSysctls: [
"kernel.msg*",
"net.core.somaxconn",
],
cpuCfsQuota: true,
cpuCfsQuotaPeriod: "200ms",
cpuManagerPolicy: "static",
failSwapOn: false,
imageGcHighThreshold: 90,
imageGcLowThreshold: 70,
topologyManagerPolicy: "best-effort",
},
linuxOSConfig: {
swapFileSizeMB: 1500,
sysctls: {
kernelThreadsMax: 99999,
netCoreWmemDefault: 12345,
netIpv4IpLocalPortRange: "20000 60000",
netIpv4TcpTwReuse: true,
},
transparentHugePageDefrag: "madvise",
transparentHugePageEnabled: "always",
},
orchestratorVersion: "",
osSKU: azure_native.containerservice.OSSKU.AzureLinux,
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
kubeletConfig:
allowedUnsafeSysctls:
- kernel.msg*
- net.core.somaxconn
cpuCfsQuota: true
cpuCfsQuotaPeriod: 200ms
cpuManagerPolicy: static
failSwapOn: false
imageGcHighThreshold: 90
imageGcLowThreshold: 70
topologyManagerPolicy: best-effort
linuxOSConfig:
swapFileSizeMB: 1500
sysctls:
kernelThreadsMax: 99999
netCoreWmemDefault: 12345
netIpv4IpLocalPortRange: 20000 60000
netIpv4TcpTwReuse: true
transparentHugePageDefrag: madvise
transparentHugePageEnabled: always
orchestratorVersion:
osSKU: AzureLinux
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with PPG
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ProximityPlacementGroupID = "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1",
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ProximityPlacementGroupID: pulumi.String("/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1"),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.orchestratorVersion("")
.osType("Linux")
.proximityPlacementGroupID("/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
proximity_placement_group_id="/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1",
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
proximityPlacementGroupID: "/subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1",
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
orchestratorVersion:
osType: Linux
proximityPlacementGroupID: /subscriptions/subid1/resourcegroups/rg1/providers/Microsoft.Compute/proximityPlacementGroups/ppg1
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with UltraSSD enabled
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
EnableUltraSSD = true,
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_DS2_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
EnableUltraSSD: pulumi.Bool(true),
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_DS2_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.enableUltraSSD(true)
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_DS2_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
enable_ultra_ssd=True,
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_DS2_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
enableUltraSSD: true,
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_DS2_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
enableUltraSSD: true
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_DS2_v2
Create Agent Pool with Windows OSSKU
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "wnp2",
Count = 3,
OrchestratorVersion = "1.23.3",
OsSKU = AzureNative.ContainerService.OSSKU.Windows2022,
OsType = AzureNative.ContainerService.OSType.Windows,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
VmSize = "Standard_D4s_v3",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("wnp2"),
Count: pulumi.Int(3),
OrchestratorVersion: pulumi.String("1.23.3"),
OsSKU: pulumi.String(containerservice.OSSKUWindows2022),
OsType: pulumi.String(containerservice.OSTypeWindows),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
VmSize: pulumi.String("Standard_D4s_v3"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("wnp2")
.count(3)
.orchestratorVersion("1.23.3")
.osSKU("Windows2022")
.osType("Windows")
.resourceGroupName("rg1")
.resourceName("clustername1")
.vmSize("Standard_D4s_v3")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="wnp2",
count=3,
orchestrator_version="1.23.3",
os_sku=azure_native.containerservice.OSSKU.WINDOWS2022,
os_type=azure_native.containerservice.OSType.WINDOWS,
resource_group_name="rg1",
resource_name_="clustername1",
vm_size="Standard_D4s_v3")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "wnp2",
count: 3,
orchestratorVersion: "1.23.3",
osSKU: azure_native.containerservice.OSSKU.Windows2022,
osType: azure_native.containerservice.OSType.Windows,
resourceGroupName: "rg1",
resourceName: "clustername1",
vmSize: "Standard_D4s_v3",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: wnp2
count: 3
orchestratorVersion: 1.23.3
osSKU: Windows2022
osType: Windows
resourceGroupName: rg1
resourceName: clustername1
vmSize: Standard_D4s_v3
Create Spot Agent Pool
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
NodeLabels =
{
{ "key1", "val1" },
},
NodeTaints = new[]
{
"Key1=Value1:NoSchedule",
},
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
ScaleSetEvictionPolicy = AzureNative.ContainerService.ScaleSetEvictionPolicy.Delete,
ScaleSetPriority = AzureNative.ContainerService.ScaleSetPriority.Spot,
Tags =
{
{ "name1", "val1" },
},
VmSize = "Standard_DS1_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
NodeLabels: pulumi.StringMap{
"key1": pulumi.String("val1"),
},
NodeTaints: pulumi.StringArray{
pulumi.String("Key1=Value1:NoSchedule"),
},
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
ScaleSetEvictionPolicy: pulumi.String(containerservice.ScaleSetEvictionPolicyDelete),
ScaleSetPriority: pulumi.String(containerservice.ScaleSetPrioritySpot),
Tags: pulumi.StringMap{
"name1": pulumi.String("val1"),
},
VmSize: pulumi.String("Standard_DS1_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.nodeLabels(Map.of("key1", "val1"))
.nodeTaints("Key1=Value1:NoSchedule")
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.scaleSetEvictionPolicy("Delete")
.scaleSetPriority("Spot")
.tags(Map.of("name1", "val1"))
.vmSize("Standard_DS1_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
node_labels={
"key1": "val1",
},
node_taints=["Key1=Value1:NoSchedule"],
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
scale_set_eviction_policy=azure_native.containerservice.ScaleSetEvictionPolicy.DELETE,
scale_set_priority=azure_native.containerservice.ScaleSetPriority.SPOT,
tags={
"name1": "val1",
},
vm_size="Standard_DS1_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
nodeLabels: {
key1: "val1",
},
nodeTaints: ["Key1=Value1:NoSchedule"],
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
scaleSetEvictionPolicy: azure_native.containerservice.ScaleSetEvictionPolicy.Delete,
scaleSetPriority: azure_native.containerservice.ScaleSetPriority.Spot,
tags: {
name1: "val1",
},
vmSize: "Standard_DS1_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
nodeLabels:
key1: val1
nodeTaints:
- Key1=Value1:NoSchedule
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
scaleSetEvictionPolicy: Delete
scaleSetPriority: Spot
tags:
name1: val1
vmSize: Standard_DS1_v2
Create/Update Agent Pool
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
Mode = AzureNative.ContainerService.AgentPoolMode.User,
NodeLabels =
{
{ "key1", "val1" },
},
NodeTaints = new[]
{
"Key1=Value1:NoSchedule",
},
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
ScaleSetEvictionPolicy = AzureNative.ContainerService.ScaleSetEvictionPolicy.Delete,
ScaleSetPriority = AzureNative.ContainerService.ScaleSetPriority.Spot,
Tags =
{
{ "name1", "val1" },
},
VmSize = "Standard_DS1_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
Mode: pulumi.String(containerservice.AgentPoolModeUser),
NodeLabels: pulumi.StringMap{
"key1": pulumi.String("val1"),
},
NodeTaints: pulumi.StringArray{
pulumi.String("Key1=Value1:NoSchedule"),
},
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
ScaleSetEvictionPolicy: pulumi.String(containerservice.ScaleSetEvictionPolicyDelete),
ScaleSetPriority: pulumi.String(containerservice.ScaleSetPrioritySpot),
Tags: pulumi.StringMap{
"name1": pulumi.String("val1"),
},
VmSize: pulumi.String("Standard_DS1_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.mode("User")
.nodeLabels(Map.of("key1", "val1"))
.nodeTaints("Key1=Value1:NoSchedule")
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.scaleSetEvictionPolicy("Delete")
.scaleSetPriority("Spot")
.tags(Map.of("name1", "val1"))
.vmSize("Standard_DS1_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
mode=azure_native.containerservice.AgentPoolMode.USER,
node_labels={
"key1": "val1",
},
node_taints=["Key1=Value1:NoSchedule"],
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
scale_set_eviction_policy=azure_native.containerservice.ScaleSetEvictionPolicy.DELETE,
scale_set_priority=azure_native.containerservice.ScaleSetPriority.SPOT,
tags={
"name1": "val1",
},
vm_size="Standard_DS1_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
mode: azure_native.containerservice.AgentPoolMode.User,
nodeLabels: {
key1: "val1",
},
nodeTaints: ["Key1=Value1:NoSchedule"],
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
scaleSetEvictionPolicy: azure_native.containerservice.ScaleSetEvictionPolicy.Delete,
scaleSetPriority: azure_native.containerservice.ScaleSetPriority.Spot,
tags: {
name1: "val1",
},
vmSize: "Standard_DS1_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
mode: User
nodeLabels:
key1: val1
nodeTaints:
- Key1=Value1:NoSchedule
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
scaleSetEvictionPolicy: Delete
scaleSetPriority: Spot
tags:
name1: val1
vmSize: Standard_DS1_v2
Start Agent Pool
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
PowerState = new AzureNative.ContainerService.Inputs.PowerStateArgs
{
Code = AzureNative.ContainerService.Code.Running,
},
ResourceGroupName = "rg1",
ResourceName = "clustername1",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
PowerState: &containerservice.PowerStateArgs{
Code: pulumi.String(containerservice.CodeRunning),
},
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import com.pulumi.azurenative.containerservice.inputs.PowerStateArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.powerState(PowerStateArgs.builder()
.code("Running")
.build())
.resourceGroupName("rg1")
.resourceName("clustername1")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
power_state={
"code": azure_native.containerservice.Code.RUNNING,
},
resource_group_name="rg1",
resource_name_="clustername1")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
powerState: {
code: azure_native.containerservice.Code.Running,
},
resourceGroupName: "rg1",
resourceName: "clustername1",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
powerState:
code: Running
resourceGroupName: rg1
resourceName: clustername1
Stop Agent Pool
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
PowerState = new AzureNative.ContainerService.Inputs.PowerStateArgs
{
Code = AzureNative.ContainerService.Code.Stopped,
},
ResourceGroupName = "rg1",
ResourceName = "clustername1",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
PowerState: &containerservice.PowerStateArgs{
Code: pulumi.String(containerservice.CodeStopped),
},
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import com.pulumi.azurenative.containerservice.inputs.PowerStateArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.powerState(PowerStateArgs.builder()
.code("Stopped")
.build())
.resourceGroupName("rg1")
.resourceName("clustername1")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
power_state={
"code": azure_native.containerservice.Code.STOPPED,
},
resource_group_name="rg1",
resource_name_="clustername1")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
powerState: {
code: azure_native.containerservice.Code.Stopped,
},
resourceGroupName: "rg1",
resourceName: "clustername1",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
powerState:
code: Stopped
resourceGroupName: rg1
resourceName: clustername1
Update Agent Pool
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() =>
{
var agentPool = new AzureNative.ContainerService.AgentPool("agentPool", new()
{
AgentPoolName = "agentpool1",
Count = 3,
EnableAutoScaling = true,
MaxCount = 2,
MinCount = 2,
NodeTaints = new[]
{
"Key1=Value1:NoSchedule",
},
OrchestratorVersion = "",
OsType = AzureNative.ContainerService.OSType.Linux,
ResourceGroupName = "rg1",
ResourceName = "clustername1",
ScaleSetEvictionPolicy = AzureNative.ContainerService.ScaleSetEvictionPolicy.Delete,
ScaleSetPriority = AzureNative.ContainerService.ScaleSetPriority.Spot,
VmSize = "Standard_DS1_v2",
});
});
package main
import (
containerservice "github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := containerservice.NewAgentPool(ctx, "agentPool", &containerservice.AgentPoolArgs{
AgentPoolName: pulumi.String("agentpool1"),
Count: pulumi.Int(3),
EnableAutoScaling: pulumi.Bool(true),
MaxCount: pulumi.Int(2),
MinCount: pulumi.Int(2),
NodeTaints: pulumi.StringArray{
pulumi.String("Key1=Value1:NoSchedule"),
},
OrchestratorVersion: pulumi.String(""),
OsType: pulumi.String(containerservice.OSTypeLinux),
ResourceGroupName: pulumi.String("rg1"),
ResourceName: pulumi.String("clustername1"),
ScaleSetEvictionPolicy: pulumi.String(containerservice.ScaleSetEvictionPolicyDelete),
ScaleSetPriority: pulumi.String(containerservice.ScaleSetPrioritySpot),
VmSize: pulumi.String("Standard_DS1_v2"),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.containerservice.AgentPool;
import com.pulumi.azurenative.containerservice.AgentPoolArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var agentPool = new AgentPool("agentPool", AgentPoolArgs.builder()
.agentPoolName("agentpool1")
.count(3)
.enableAutoScaling(true)
.maxCount(2)
.minCount(2)
.nodeTaints("Key1=Value1:NoSchedule")
.orchestratorVersion("")
.osType("Linux")
.resourceGroupName("rg1")
.resourceName("clustername1")
.scaleSetEvictionPolicy("Delete")
.scaleSetPriority("Spot")
.vmSize("Standard_DS1_v2")
.build());
}
}
import pulumi
import pulumi_azure_native as azure_native
agent_pool = azure_native.containerservice.AgentPool("agentPool",
agent_pool_name="agentpool1",
count=3,
enable_auto_scaling=True,
max_count=2,
min_count=2,
node_taints=["Key1=Value1:NoSchedule"],
orchestrator_version="",
os_type=azure_native.containerservice.OSType.LINUX,
resource_group_name="rg1",
resource_name_="clustername1",
scale_set_eviction_policy=azure_native.containerservice.ScaleSetEvictionPolicy.DELETE,
scale_set_priority=azure_native.containerservice.ScaleSetPriority.SPOT,
vm_size="Standard_DS1_v2")
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const agentPool = new azure_native.containerservice.AgentPool("agentPool", {
agentPoolName: "agentpool1",
count: 3,
enableAutoScaling: true,
maxCount: 2,
minCount: 2,
nodeTaints: ["Key1=Value1:NoSchedule"],
orchestratorVersion: "",
osType: azure_native.containerservice.OSType.Linux,
resourceGroupName: "rg1",
resourceName: "clustername1",
scaleSetEvictionPolicy: azure_native.containerservice.ScaleSetEvictionPolicy.Delete,
scaleSetPriority: azure_native.containerservice.ScaleSetPriority.Spot,
vmSize: "Standard_DS1_v2",
});
resources:
agentPool:
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: agentpool1
count: 3
enableAutoScaling: true
maxCount: 2
minCount: 2
nodeTaints:
- Key1=Value1:NoSchedule
orchestratorVersion:
osType: Linux
resourceGroupName: rg1
resourceName: clustername1
scaleSetEvictionPolicy: Delete
scaleSetPriority: Spot
vmSize: Standard_DS1_v2
Create AgentPool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new AgentPool(name: string, args: AgentPoolArgs, opts?: CustomResourceOptions);
@overload
def AgentPool(resource_name: str,
args: AgentPoolArgs,
opts: Optional[ResourceOptions] = None)
@overload
def AgentPool(resource_name: str,
opts: Optional[ResourceOptions] = None,
resource_group_name: Optional[str] = None,
resource_name_: Optional[str] = None,
host_group_id: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
enable_auto_scaling: Optional[bool] = None,
enable_encryption_at_host: Optional[bool] = None,
enable_fips: Optional[bool] = None,
enable_node_public_ip: Optional[bool] = None,
enable_ultra_ssd: Optional[bool] = None,
gpu_instance_profile: Optional[Union[str, GPUInstanceProfile]] = None,
agent_pool_name: Optional[str] = None,
kubelet_config: Optional[KubeletConfigArgs] = None,
kubelet_disk_type: Optional[Union[str, KubeletDiskType]] = None,
linux_os_config: Optional[LinuxOSConfigArgs] = None,
max_count: Optional[int] = None,
os_disk_size_gb: Optional[int] = None,
min_count: Optional[int] = None,
mode: Optional[Union[str, AgentPoolMode]] = None,
node_labels: Optional[Mapping[str, str]] = None,
node_public_ip_prefix_id: Optional[str] = None,
vnet_subnet_id: Optional[str] = None,
creation_data: Optional[CreationDataArgs] = None,
max_pods: Optional[int] = None,
os_disk_type: Optional[Union[str, OSDiskType]] = None,
os_sku: Optional[Union[str, OSSKU]] = None,
os_type: Optional[Union[str, OSType]] = None,
pod_subnet_id: Optional[str] = None,
power_state: Optional[PowerStateArgs] = None,
proximity_placement_group_id: Optional[str] = None,
count: Optional[int] = None,
availability_zones: Optional[Sequence[str]] = None,
scale_down_mode: Optional[Union[str, ScaleDownMode]] = None,
scale_set_eviction_policy: Optional[Union[str, ScaleSetEvictionPolicy]] = None,
scale_set_priority: Optional[Union[str, ScaleSetPriority]] = None,
spot_max_price: Optional[float] = None,
orchestrator_version: Optional[str] = None,
type: Optional[Union[str, AgentPoolType]] = None,
upgrade_settings: Optional[AgentPoolUpgradeSettingsArgs] = None,
vm_size: Optional[str] = None,
node_taints: Optional[Sequence[str]] = None,
workload_runtime: Optional[Union[str, WorkloadRuntime]] = None)
func NewAgentPool(ctx *Context, name string, args AgentPoolArgs, opts ...ResourceOption) (*AgentPool, error)
public AgentPool(string name, AgentPoolArgs args, CustomResourceOptions? opts = null)
public AgentPool(String name, AgentPoolArgs args)
public AgentPool(String name, AgentPoolArgs args, CustomResourceOptions options)
type: azure-native:containerservice:AgentPool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args AgentPoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args AgentPoolArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args AgentPoolArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args AgentPoolArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args AgentPoolArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var azure_nativeAgentPoolResource = new AzureNative.ContainerService.AgentPool("azure-nativeAgentPoolResource", new()
{
ResourceGroupName = "string",
ResourceName = "string",
HostGroupID = "string",
Tags =
{
{ "string", "string" },
},
EnableAutoScaling = false,
EnableEncryptionAtHost = false,
EnableFIPS = false,
EnableNodePublicIP = false,
EnableUltraSSD = false,
GpuInstanceProfile = "string",
AgentPoolName = "string",
KubeletConfig = new AzureNative.ContainerService.Inputs.KubeletConfigArgs
{
AllowedUnsafeSysctls = new[]
{
"string",
},
ContainerLogMaxFiles = 0,
ContainerLogMaxSizeMB = 0,
CpuCfsQuota = false,
CpuCfsQuotaPeriod = "string",
CpuManagerPolicy = "string",
FailSwapOn = false,
ImageGcHighThreshold = 0,
ImageGcLowThreshold = 0,
PodMaxPids = 0,
TopologyManagerPolicy = "string",
},
KubeletDiskType = "string",
LinuxOSConfig = new AzureNative.ContainerService.Inputs.LinuxOSConfigArgs
{
SwapFileSizeMB = 0,
Sysctls = new AzureNative.ContainerService.Inputs.SysctlConfigArgs
{
FsAioMaxNr = 0,
FsFileMax = 0,
FsInotifyMaxUserWatches = 0,
FsNrOpen = 0,
KernelThreadsMax = 0,
NetCoreNetdevMaxBacklog = 0,
NetCoreOptmemMax = 0,
NetCoreRmemDefault = 0,
NetCoreRmemMax = 0,
NetCoreSomaxconn = 0,
NetCoreWmemDefault = 0,
NetCoreWmemMax = 0,
NetIpv4IpLocalPortRange = "string",
NetIpv4NeighDefaultGcThresh1 = 0,
NetIpv4NeighDefaultGcThresh2 = 0,
NetIpv4NeighDefaultGcThresh3 = 0,
NetIpv4TcpFinTimeout = 0,
NetIpv4TcpKeepaliveProbes = 0,
NetIpv4TcpKeepaliveTime = 0,
NetIpv4TcpMaxSynBacklog = 0,
NetIpv4TcpMaxTwBuckets = 0,
NetIpv4TcpTwReuse = false,
NetIpv4TcpkeepaliveIntvl = 0,
NetNetfilterNfConntrackBuckets = 0,
NetNetfilterNfConntrackMax = 0,
VmMaxMapCount = 0,
VmSwappiness = 0,
VmVfsCachePressure = 0,
},
TransparentHugePageDefrag = "string",
TransparentHugePageEnabled = "string",
},
MaxCount = 0,
OsDiskSizeGB = 0,
MinCount = 0,
Mode = "string",
NodeLabels =
{
{ "string", "string" },
},
NodePublicIPPrefixID = "string",
VnetSubnetID = "string",
CreationData = new AzureNative.ContainerService.Inputs.CreationDataArgs
{
SourceResourceId = "string",
},
MaxPods = 0,
OsDiskType = "string",
OsSKU = "string",
OsType = "string",
PodSubnetID = "string",
PowerState = new AzureNative.ContainerService.Inputs.PowerStateArgs
{
Code = "string",
},
ProximityPlacementGroupID = "string",
Count = 0,
AvailabilityZones = new[]
{
"string",
},
ScaleDownMode = "string",
ScaleSetEvictionPolicy = "string",
ScaleSetPriority = "string",
SpotMaxPrice = 0,
OrchestratorVersion = "string",
Type = "string",
UpgradeSettings = new AzureNative.ContainerService.Inputs.AgentPoolUpgradeSettingsArgs
{
MaxSurge = "string",
},
VmSize = "string",
NodeTaints = new[]
{
"string",
},
WorkloadRuntime = "string",
});
example, err := containerservice.NewAgentPool(ctx, "azure-nativeAgentPoolResource", &containerservice.AgentPoolArgs{
ResourceGroupName: pulumi.String("string"),
ResourceName: pulumi.String("string"),
HostGroupID: pulumi.String("string"),
Tags: pulumi.StringMap{
"string": pulumi.String("string"),
},
EnableAutoScaling: pulumi.Bool(false),
EnableEncryptionAtHost: pulumi.Bool(false),
EnableFIPS: pulumi.Bool(false),
EnableNodePublicIP: pulumi.Bool(false),
EnableUltraSSD: pulumi.Bool(false),
GpuInstanceProfile: pulumi.String("string"),
AgentPoolName: pulumi.String("string"),
KubeletConfig: &containerservice.KubeletConfigArgs{
AllowedUnsafeSysctls: pulumi.StringArray{
pulumi.String("string"),
},
ContainerLogMaxFiles: pulumi.Int(0),
ContainerLogMaxSizeMB: pulumi.Int(0),
CpuCfsQuota: pulumi.Bool(false),
CpuCfsQuotaPeriod: pulumi.String("string"),
CpuManagerPolicy: pulumi.String("string"),
FailSwapOn: pulumi.Bool(false),
ImageGcHighThreshold: pulumi.Int(0),
ImageGcLowThreshold: pulumi.Int(0),
PodMaxPids: pulumi.Int(0),
TopologyManagerPolicy: pulumi.String("string"),
},
KubeletDiskType: pulumi.String("string"),
LinuxOSConfig: &containerservice.LinuxOSConfigArgs{
SwapFileSizeMB: pulumi.Int(0),
Sysctls: &containerservice.SysctlConfigArgs{
FsAioMaxNr: pulumi.Int(0),
FsFileMax: pulumi.Int(0),
FsInotifyMaxUserWatches: pulumi.Int(0),
FsNrOpen: pulumi.Int(0),
KernelThreadsMax: pulumi.Int(0),
NetCoreNetdevMaxBacklog: pulumi.Int(0),
NetCoreOptmemMax: pulumi.Int(0),
NetCoreRmemDefault: pulumi.Int(0),
NetCoreRmemMax: pulumi.Int(0),
NetCoreSomaxconn: pulumi.Int(0),
NetCoreWmemDefault: pulumi.Int(0),
NetCoreWmemMax: pulumi.Int(0),
NetIpv4IpLocalPortRange: pulumi.String("string"),
NetIpv4NeighDefaultGcThresh1: pulumi.Int(0),
NetIpv4NeighDefaultGcThresh2: pulumi.Int(0),
NetIpv4NeighDefaultGcThresh3: pulumi.Int(0),
NetIpv4TcpFinTimeout: pulumi.Int(0),
NetIpv4TcpKeepaliveProbes: pulumi.Int(0),
NetIpv4TcpKeepaliveTime: pulumi.Int(0),
NetIpv4TcpMaxSynBacklog: pulumi.Int(0),
NetIpv4TcpMaxTwBuckets: pulumi.Int(0),
NetIpv4TcpTwReuse: pulumi.Bool(false),
NetIpv4TcpkeepaliveIntvl: pulumi.Int(0),
NetNetfilterNfConntrackBuckets: pulumi.Int(0),
NetNetfilterNfConntrackMax: pulumi.Int(0),
VmMaxMapCount: pulumi.Int(0),
VmSwappiness: pulumi.Int(0),
VmVfsCachePressure: pulumi.Int(0),
},
TransparentHugePageDefrag: pulumi.String("string"),
TransparentHugePageEnabled: pulumi.String("string"),
},
MaxCount: pulumi.Int(0),
OsDiskSizeGB: pulumi.Int(0),
MinCount: pulumi.Int(0),
Mode: pulumi.String("string"),
NodeLabels: pulumi.StringMap{
"string": pulumi.String("string"),
},
NodePublicIPPrefixID: pulumi.String("string"),
VnetSubnetID: pulumi.String("string"),
CreationData: &containerservice.CreationDataArgs{
SourceResourceId: pulumi.String("string"),
},
MaxPods: pulumi.Int(0),
OsDiskType: pulumi.String("string"),
OsSKU: pulumi.String("string"),
OsType: pulumi.String("string"),
PodSubnetID: pulumi.String("string"),
PowerState: &containerservice.PowerStateArgs{
Code: pulumi.String("string"),
},
ProximityPlacementGroupID: pulumi.String("string"),
Count: pulumi.Int(0),
AvailabilityZones: pulumi.StringArray{
pulumi.String("string"),
},
ScaleDownMode: pulumi.String("string"),
ScaleSetEvictionPolicy: pulumi.String("string"),
ScaleSetPriority: pulumi.String("string"),
SpotMaxPrice: pulumi.Float64(0),
OrchestratorVersion: pulumi.String("string"),
Type: pulumi.String("string"),
UpgradeSettings: &containerservice.AgentPoolUpgradeSettingsArgs{
MaxSurge: pulumi.String("string"),
},
VmSize: pulumi.String("string"),
NodeTaints: pulumi.StringArray{
pulumi.String("string"),
},
WorkloadRuntime: pulumi.String("string"),
})
var azure_nativeAgentPoolResource = new AgentPool("azure-nativeAgentPoolResource", AgentPoolArgs.builder()
.resourceGroupName("string")
.resourceName("string")
.hostGroupID("string")
.tags(Map.of("string", "string"))
.enableAutoScaling(false)
.enableEncryptionAtHost(false)
.enableFIPS(false)
.enableNodePublicIP(false)
.enableUltraSSD(false)
.gpuInstanceProfile("string")
.agentPoolName("string")
.kubeletConfig(KubeletConfigArgs.builder()
.allowedUnsafeSysctls("string")
.containerLogMaxFiles(0)
.containerLogMaxSizeMB(0)
.cpuCfsQuota(false)
.cpuCfsQuotaPeriod("string")
.cpuManagerPolicy("string")
.failSwapOn(false)
.imageGcHighThreshold(0)
.imageGcLowThreshold(0)
.podMaxPids(0)
.topologyManagerPolicy("string")
.build())
.kubeletDiskType("string")
.linuxOSConfig(LinuxOSConfigArgs.builder()
.swapFileSizeMB(0)
.sysctls(SysctlConfigArgs.builder()
.fsAioMaxNr(0)
.fsFileMax(0)
.fsInotifyMaxUserWatches(0)
.fsNrOpen(0)
.kernelThreadsMax(0)
.netCoreNetdevMaxBacklog(0)
.netCoreOptmemMax(0)
.netCoreRmemDefault(0)
.netCoreRmemMax(0)
.netCoreSomaxconn(0)
.netCoreWmemDefault(0)
.netCoreWmemMax(0)
.netIpv4IpLocalPortRange("string")
.netIpv4NeighDefaultGcThresh1(0)
.netIpv4NeighDefaultGcThresh2(0)
.netIpv4NeighDefaultGcThresh3(0)
.netIpv4TcpFinTimeout(0)
.netIpv4TcpKeepaliveProbes(0)
.netIpv4TcpKeepaliveTime(0)
.netIpv4TcpMaxSynBacklog(0)
.netIpv4TcpMaxTwBuckets(0)
.netIpv4TcpTwReuse(false)
.netIpv4TcpkeepaliveIntvl(0)
.netNetfilterNfConntrackBuckets(0)
.netNetfilterNfConntrackMax(0)
.vmMaxMapCount(0)
.vmSwappiness(0)
.vmVfsCachePressure(0)
.build())
.transparentHugePageDefrag("string")
.transparentHugePageEnabled("string")
.build())
.maxCount(0)
.osDiskSizeGB(0)
.minCount(0)
.mode("string")
.nodeLabels(Map.of("string", "string"))
.nodePublicIPPrefixID("string")
.vnetSubnetID("string")
.creationData(CreationDataArgs.builder()
.sourceResourceId("string")
.build())
.maxPods(0)
.osDiskType("string")
.osSKU("string")
.osType("string")
.podSubnetID("string")
.powerState(PowerStateArgs.builder()
.code("string")
.build())
.proximityPlacementGroupID("string")
.count(0)
.availabilityZones("string")
.scaleDownMode("string")
.scaleSetEvictionPolicy("string")
.scaleSetPriority("string")
.spotMaxPrice(0)
.orchestratorVersion("string")
.type("string")
.upgradeSettings(AgentPoolUpgradeSettingsArgs.builder()
.maxSurge("string")
.build())
.vmSize("string")
.nodeTaints("string")
.workloadRuntime("string")
.build());
azure_native_agent_pool_resource = azure_native.containerservice.AgentPool("azure-nativeAgentPoolResource",
resource_group_name="string",
resource_name_="string",
host_group_id="string",
tags={
"string": "string",
},
enable_auto_scaling=False,
enable_encryption_at_host=False,
enable_fips=False,
enable_node_public_ip=False,
enable_ultra_ssd=False,
gpu_instance_profile="string",
agent_pool_name="string",
kubelet_config={
"allowedUnsafeSysctls": ["string"],
"containerLogMaxFiles": 0,
"containerLogMaxSizeMB": 0,
"cpuCfsQuota": False,
"cpuCfsQuotaPeriod": "string",
"cpuManagerPolicy": "string",
"failSwapOn": False,
"imageGcHighThreshold": 0,
"imageGcLowThreshold": 0,
"podMaxPids": 0,
"topologyManagerPolicy": "string",
},
kubelet_disk_type="string",
linux_os_config={
"swapFileSizeMB": 0,
"sysctls": {
"fsAioMaxNr": 0,
"fsFileMax": 0,
"fsInotifyMaxUserWatches": 0,
"fsNrOpen": 0,
"kernelThreadsMax": 0,
"netCoreNetdevMaxBacklog": 0,
"netCoreOptmemMax": 0,
"netCoreRmemDefault": 0,
"netCoreRmemMax": 0,
"netCoreSomaxconn": 0,
"netCoreWmemDefault": 0,
"netCoreWmemMax": 0,
"netIpv4IpLocalPortRange": "string",
"netIpv4NeighDefaultGcThresh1": 0,
"netIpv4NeighDefaultGcThresh2": 0,
"netIpv4NeighDefaultGcThresh3": 0,
"netIpv4TcpFinTimeout": 0,
"netIpv4TcpKeepaliveProbes": 0,
"netIpv4TcpKeepaliveTime": 0,
"netIpv4TcpMaxSynBacklog": 0,
"netIpv4TcpMaxTwBuckets": 0,
"netIpv4TcpTwReuse": False,
"netIpv4TcpkeepaliveIntvl": 0,
"netNetfilterNfConntrackBuckets": 0,
"netNetfilterNfConntrackMax": 0,
"vmMaxMapCount": 0,
"vmSwappiness": 0,
"vmVfsCachePressure": 0,
},
"transparentHugePageDefrag": "string",
"transparentHugePageEnabled": "string",
},
max_count=0,
os_disk_size_gb=0,
min_count=0,
mode="string",
node_labels={
"string": "string",
},
node_public_ip_prefix_id="string",
vnet_subnet_id="string",
creation_data={
"sourceResourceId": "string",
},
max_pods=0,
os_disk_type="string",
os_sku="string",
os_type="string",
pod_subnet_id="string",
power_state={
"code": "string",
},
proximity_placement_group_id="string",
count=0,
availability_zones=["string"],
scale_down_mode="string",
scale_set_eviction_policy="string",
scale_set_priority="string",
spot_max_price=0,
orchestrator_version="string",
type="string",
upgrade_settings={
"maxSurge": "string",
},
vm_size="string",
node_taints=["string"],
workload_runtime="string")
const azure_nativeAgentPoolResource = new azure_native.containerservice.AgentPool("azure-nativeAgentPoolResource", {
resourceGroupName: "string",
resourceName: "string",
hostGroupID: "string",
tags: {
string: "string",
},
enableAutoScaling: false,
enableEncryptionAtHost: false,
enableFIPS: false,
enableNodePublicIP: false,
enableUltraSSD: false,
gpuInstanceProfile: "string",
agentPoolName: "string",
kubeletConfig: {
allowedUnsafeSysctls: ["string"],
containerLogMaxFiles: 0,
containerLogMaxSizeMB: 0,
cpuCfsQuota: false,
cpuCfsQuotaPeriod: "string",
cpuManagerPolicy: "string",
failSwapOn: false,
imageGcHighThreshold: 0,
imageGcLowThreshold: 0,
podMaxPids: 0,
topologyManagerPolicy: "string",
},
kubeletDiskType: "string",
linuxOSConfig: {
swapFileSizeMB: 0,
sysctls: {
fsAioMaxNr: 0,
fsFileMax: 0,
fsInotifyMaxUserWatches: 0,
fsNrOpen: 0,
kernelThreadsMax: 0,
netCoreNetdevMaxBacklog: 0,
netCoreOptmemMax: 0,
netCoreRmemDefault: 0,
netCoreRmemMax: 0,
netCoreSomaxconn: 0,
netCoreWmemDefault: 0,
netCoreWmemMax: 0,
netIpv4IpLocalPortRange: "string",
netIpv4NeighDefaultGcThresh1: 0,
netIpv4NeighDefaultGcThresh2: 0,
netIpv4NeighDefaultGcThresh3: 0,
netIpv4TcpFinTimeout: 0,
netIpv4TcpKeepaliveProbes: 0,
netIpv4TcpKeepaliveTime: 0,
netIpv4TcpMaxSynBacklog: 0,
netIpv4TcpMaxTwBuckets: 0,
netIpv4TcpTwReuse: false,
netIpv4TcpkeepaliveIntvl: 0,
netNetfilterNfConntrackBuckets: 0,
netNetfilterNfConntrackMax: 0,
vmMaxMapCount: 0,
vmSwappiness: 0,
vmVfsCachePressure: 0,
},
transparentHugePageDefrag: "string",
transparentHugePageEnabled: "string",
},
maxCount: 0,
osDiskSizeGB: 0,
minCount: 0,
mode: "string",
nodeLabels: {
string: "string",
},
nodePublicIPPrefixID: "string",
vnetSubnetID: "string",
creationData: {
sourceResourceId: "string",
},
maxPods: 0,
osDiskType: "string",
osSKU: "string",
osType: "string",
podSubnetID: "string",
powerState: {
code: "string",
},
proximityPlacementGroupID: "string",
count: 0,
availabilityZones: ["string"],
scaleDownMode: "string",
scaleSetEvictionPolicy: "string",
scaleSetPriority: "string",
spotMaxPrice: 0,
orchestratorVersion: "string",
type: "string",
upgradeSettings: {
maxSurge: "string",
},
vmSize: "string",
nodeTaints: ["string"],
workloadRuntime: "string",
});
type: azure-native:containerservice:AgentPool
properties:
agentPoolName: string
availabilityZones:
- string
count: 0
creationData:
sourceResourceId: string
enableAutoScaling: false
enableEncryptionAtHost: false
enableFIPS: false
enableNodePublicIP: false
enableUltraSSD: false
gpuInstanceProfile: string
hostGroupID: string
kubeletConfig:
allowedUnsafeSysctls:
- string
containerLogMaxFiles: 0
containerLogMaxSizeMB: 0
cpuCfsQuota: false
cpuCfsQuotaPeriod: string
cpuManagerPolicy: string
failSwapOn: false
imageGcHighThreshold: 0
imageGcLowThreshold: 0
podMaxPids: 0
topologyManagerPolicy: string
kubeletDiskType: string
linuxOSConfig:
swapFileSizeMB: 0
sysctls:
fsAioMaxNr: 0
fsFileMax: 0
fsInotifyMaxUserWatches: 0
fsNrOpen: 0
kernelThreadsMax: 0
netCoreNetdevMaxBacklog: 0
netCoreOptmemMax: 0
netCoreRmemDefault: 0
netCoreRmemMax: 0
netCoreSomaxconn: 0
netCoreWmemDefault: 0
netCoreWmemMax: 0
netIpv4IpLocalPortRange: string
netIpv4NeighDefaultGcThresh1: 0
netIpv4NeighDefaultGcThresh2: 0
netIpv4NeighDefaultGcThresh3: 0
netIpv4TcpFinTimeout: 0
netIpv4TcpKeepaliveProbes: 0
netIpv4TcpKeepaliveTime: 0
netIpv4TcpMaxSynBacklog: 0
netIpv4TcpMaxTwBuckets: 0
netIpv4TcpTwReuse: false
netIpv4TcpkeepaliveIntvl: 0
netNetfilterNfConntrackBuckets: 0
netNetfilterNfConntrackMax: 0
vmMaxMapCount: 0
vmSwappiness: 0
vmVfsCachePressure: 0
transparentHugePageDefrag: string
transparentHugePageEnabled: string
maxCount: 0
maxPods: 0
minCount: 0
mode: string
nodeLabels:
string: string
nodePublicIPPrefixID: string
nodeTaints:
- string
orchestratorVersion: string
osDiskSizeGB: 0
osDiskType: string
osSKU: string
osType: string
podSubnetID: string
powerState:
code: string
proximityPlacementGroupID: string
resourceGroupName: string
resourceName: string
scaleDownMode: string
scaleSetEvictionPolicy: string
scaleSetPriority: string
spotMaxPrice: 0
tags:
string: string
type: string
upgradeSettings:
maxSurge: string
vmSize: string
vnetSubnetID: string
workloadRuntime: string
AgentPool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The AgentPool resource accepts the following input properties:
- Resource
Group stringName - The name of the resource group. The name is case insensitive.
- Resource
Name string - The name of the managed cluster resource.
- Agent
Pool stringName - The name of the agent pool.
- Availability
Zones List<string> - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
- Count int
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.
- Creation
Data Pulumi.Azure Native. Container Service. Inputs. Creation Data - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.
- Enable
Auto boolScaling - Whether to enable auto-scaler
- Enable
Encryption boolAt Host - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption
- Enable
FIPS bool - See Add a FIPS-enabled node pool for more details.
- Enable
Node boolPublic IP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.
- Enable
Ultra boolSSD - Whether to enable UltraSSD
- Gpu
Instance string | Pulumi.Profile Azure Native. Container Service. GPUInstance Profile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
- Host
Group stringID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.
- Kubelet
Config Pulumi.Azure Native. Container Service. Inputs. Kubelet Config - The Kubelet configuration on the agent pool nodes.
- Kubelet
Disk string | Pulumi.Type Azure Native. Container Service. Kubelet Disk Type - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
- Linux
OSConfig Pulumi.Azure Native. Container Service. Inputs. Linux OSConfig - The OS configuration of Linux agent nodes.
- Max
Count int - The maximum number of nodes for auto-scaling
- Max
Pods int - The maximum number of pods that can run on a node.
- Min
Count int - The minimum number of nodes for auto-scaling
- Mode
string | Pulumi.
Azure Native. Container Service. Agent Pool Mode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools
- Node
Labels Dictionary<string, string> - The node labels to be persisted across all nodes in agent pool.
- Node
Public stringIPPrefix ID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}
- Node
Taints List<string> - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
- Orchestrator
Version string - Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.
- Os
Disk intSize GB - OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
- Os
Disk string | Pulumi.Type Azure Native. Container Service. OSDisk Type - The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.
- Os
SKU string | Pulumi.Azure Native. Container Service. OSSKU - Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.
- Os
Type string | Pulumi.Azure Native. Container Service. OSType - The operating system type. The default is Linux.
- Pod
Subnet stringID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- Power
State Pulumi.Azure Native. Container Service. Inputs. Power State - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded
- Proximity
Placement stringGroup ID - The ID for Proximity Placement Group.
- Scale
Down string | Pulumi.Mode Azure Native. Container Service. Scale Down Mode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.
- Scale
Set string | Pulumi.Eviction Policy Azure Native. Container Service. Scale Set Eviction Policy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.
- Scale
Set string | Pulumi.Priority Azure Native. Container Service. Scale Set Priority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.
- Spot
Max doublePrice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing
- Dictionary<string, string>
- The tags to be persisted on the agent pool virtual machine scale set.
- Type
string | Pulumi.
Azure Native. Container Service. Agent Pool Type - The type of Agent Pool.
- Upgrade
Settings Pulumi.Azure Native. Container Service. Inputs. Agent Pool Upgrade Settings - Settings for upgrading the agentpool
- Vm
Size string - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions
- Vnet
Subnet stringID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- Workload
Runtime string | Pulumi.Azure Native. Container Service. Workload Runtime - Determines the type of workload a node can run.
- Resource
Group stringName - The name of the resource group. The name is case insensitive.
- Resource
Name string - The name of the managed cluster resource.
- Agent
Pool stringName - The name of the agent pool.
- Availability
Zones []string - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
- Count int
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.
- Creation
Data CreationData Args - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.
- Enable
Auto boolScaling - Whether to enable auto-scaler
- Enable
Encryption boolAt Host - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption
- Enable
FIPS bool - See Add a FIPS-enabled node pool for more details.
- Enable
Node boolPublic IP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.
- Enable
Ultra boolSSD - Whether to enable UltraSSD
- Gpu
Instance string | GPUInstanceProfile Profile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
- Host
Group stringID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.
- Kubelet
Config KubeletConfig Args - The Kubelet configuration on the agent pool nodes.
- Kubelet
Disk string | KubeletType Disk Type - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
- Linux
OSConfig LinuxOSConfig Args - The OS configuration of Linux agent nodes.
- Max
Count int - The maximum number of nodes for auto-scaling
- Max
Pods int - The maximum number of pods that can run on a node.
- Min
Count int - The minimum number of nodes for auto-scaling
- Mode
string | Agent
Pool Mode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools
- Node
Labels map[string]string - The node labels to be persisted across all nodes in agent pool.
- Node
Public stringIPPrefix ID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}
- Node
Taints []string - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
- Orchestrator
Version string - Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.
- Os
Disk intSize GB - OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
- Os
Disk string | OSDiskType Type - The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.
- Os
SKU string | OSSKU - Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.
- Os
Type string | OSType - The operating system type. The default is Linux.
- Pod
Subnet stringID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- Power
State PowerState Args - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded
- Proximity
Placement stringGroup ID - The ID for Proximity Placement Group.
- Scale
Down string | ScaleMode Down Mode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.
- Scale
Set string | ScaleEviction Policy Set Eviction Policy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.
- Scale
Set string | ScalePriority Set Priority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.
- Spot
Max float64Price - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing
- map[string]string
- The tags to be persisted on the agent pool virtual machine scale set.
- Type
string | Agent
Pool Type - The type of Agent Pool.
- Upgrade
Settings AgentPool Upgrade Settings Args - Settings for upgrading the agentpool
- Vm
Size string - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions
- Vnet
Subnet stringID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- Workload
Runtime string | WorkloadRuntime - Determines the type of workload a node can run.
- resource
Group StringName - The name of the resource group. The name is case insensitive.
- resource
Name String - The name of the managed cluster resource.
- agent
Pool StringName - The name of the agent pool.
- availability
Zones List<String> - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
- count Integer
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.
- creation
Data CreationData - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.
- enable
Auto BooleanScaling - Whether to enable auto-scaler
- enable
Encryption BooleanAt Host - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption
- enable
FIPS Boolean - See Add a FIPS-enabled node pool for more details.
- enable
Node BooleanPublic IP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.
- enable
Ultra BooleanSSD - Whether to enable UltraSSD
- gpu
Instance String | GPUInstanceProfile Profile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
- host
Group StringID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.
- kubelet
Config KubeletConfig - The Kubelet configuration on the agent pool nodes.
- kubelet
Disk String | KubeletType Disk Type - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
- linux
OSConfig LinuxOSConfig - The OS configuration of Linux agent nodes.
- max
Count Integer - The maximum number of nodes for auto-scaling
- max
Pods Integer - The maximum number of pods that can run on a node.
- min
Count Integer - The minimum number of nodes for auto-scaling
- mode
String | Agent
Pool Mode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools
- node
Labels Map<String,String> - The node labels to be persisted across all nodes in agent pool.
- node
Public StringIPPrefix ID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}
- node
Taints List<String> - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
- orchestrator
Version String - Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.
- os
Disk IntegerSize GB - OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
- os
Disk String | OSDiskType Type - The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.
- os
SKU String | OSSKU - Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.
- os
Type String | OSType - The operating system type. The default is Linux.
- pod
Subnet StringID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- power
State PowerState - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded
- proximity
Placement StringGroup ID - The ID for Proximity Placement Group.
- scale
Down String | ScaleMode Down Mode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.
- scale
Set String | ScaleEviction Policy Set Eviction Policy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.
- scale
Set String | ScalePriority Set Priority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.
- spot
Max DoublePrice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing
- Map<String,String>
- The tags to be persisted on the agent pool virtual machine scale set.
- type
String | Agent
Pool Type - The type of Agent Pool.
- upgrade
Settings AgentPool Upgrade Settings - Settings for upgrading the agentpool
- vm
Size String - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions
- vnet
Subnet StringID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- workload
Runtime String | WorkloadRuntime - Determines the type of workload a node can run.
- resource
Group stringName - The name of the resource group. The name is case insensitive.
- resource
Name string - The name of the managed cluster resource.
- agent
Pool stringName - The name of the agent pool.
- availability
Zones string[] - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
- count number
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.
- creation
Data CreationData - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.
- enable
Auto booleanScaling - Whether to enable auto-scaler
- enable
Encryption booleanAt Host - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption
- enable
FIPS boolean - See Add a FIPS-enabled node pool for more details.
- enable
Node booleanPublic IP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.
- enable
Ultra booleanSSD - Whether to enable UltraSSD
- gpu
Instance string | GPUInstanceProfile Profile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
- host
Group stringID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.
- kubelet
Config KubeletConfig - The Kubelet configuration on the agent pool nodes.
- kubelet
Disk string | KubeletType Disk Type - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
- linux
OSConfig LinuxOSConfig - The OS configuration of Linux agent nodes.
- max
Count number - The maximum number of nodes for auto-scaling
- max
Pods number - The maximum number of pods that can run on a node.
- min
Count number - The minimum number of nodes for auto-scaling
- mode
string | Agent
Pool Mode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools
- node
Labels {[key: string]: string} - The node labels to be persisted across all nodes in agent pool.
- node
Public stringIPPrefix ID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}
- node
Taints string[] - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
- orchestrator
Version string - Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.
- os
Disk numberSize GB - OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
- os
Disk string | OSDiskType Type - The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.
- os
SKU string | OSSKU - Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.
- os
Type string | OSType - The operating system type. The default is Linux.
- pod
Subnet stringID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- power
State PowerState - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded
- proximity
Placement stringGroup ID - The ID for Proximity Placement Group.
- scale
Down string | ScaleMode Down Mode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.
- scale
Set string | ScaleEviction Policy Set Eviction Policy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.
- scale
Set string | ScalePriority Set Priority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.
- spot
Max numberPrice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing
- {[key: string]: string}
- The tags to be persisted on the agent pool virtual machine scale set.
- type
string | Agent
Pool Type - The type of Agent Pool.
- upgrade
Settings AgentPool Upgrade Settings - Settings for upgrading the agentpool
- vm
Size string - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions
- vnet
Subnet stringID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- workload
Runtime string | WorkloadRuntime - Determines the type of workload a node can run.
- resource_
group_ strname - The name of the resource group. The name is case insensitive.
- resource_
name str - The name of the managed cluster resource.
- agent_
pool_ strname - The name of the agent pool.
- availability_
zones Sequence[str] - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
- count int
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.
- creation_
data CreationData Args - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.
- enable_
auto_ boolscaling - Whether to enable auto-scaler
- enable_
encryption_ boolat_ host - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption
- enable_
fips bool - See Add a FIPS-enabled node pool for more details.
- enable_
node_ boolpublic_ ip - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.
- enable_
ultra_ boolssd - Whether to enable UltraSSD
- gpu_
instance_ str | GPUInstanceprofile Profile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
- host_
group_ strid - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.
- kubelet_
config KubeletConfig Args - The Kubelet configuration on the agent pool nodes.
- kubelet_
disk_ str | Kubelettype Disk Type - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
- linux_
os_ Linuxconfig OSConfig Args - The OS configuration of Linux agent nodes.
- max_
count int - The maximum number of nodes for auto-scaling
- max_
pods int - The maximum number of pods that can run on a node.
- min_
count int - The minimum number of nodes for auto-scaling
- mode
str | Agent
Pool Mode - A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools
- node_
labels Mapping[str, str] - The node labels to be persisted across all nodes in agent pool.
- node_
public_ strip_ prefix_ id - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}
- node_
taints Sequence[str] - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
- orchestrator_
version str - Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.
- os_
disk_ intsize_ gb - OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
- os_
disk_ str | OSDisktype Type - The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.
- os_
sku str | OSSKU - Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.
- os_
type str | OSType - The operating system type. The default is Linux.
- pod_
subnet_ strid - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- power_
state PowerState Args - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded
- proximity_
placement_ strgroup_ id - The ID for Proximity Placement Group.
- scale_
down_ str | Scalemode Down Mode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.
- scale_
set_ str | Scaleeviction_ policy Set Eviction Policy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.
- scale_
set_ str | Scalepriority Set Priority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.
- spot_
max_ floatprice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing
- Mapping[str, str]
- The tags to be persisted on the agent pool virtual machine scale set.
- type
str | Agent
Pool Type - The type of Agent Pool.
- upgrade_
settings AgentPool Upgrade Settings Args - Settings for upgrading the agentpool
- vm_
size str - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions
- vnet_
subnet_ strid - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- workload_
runtime str | WorkloadRuntime - Determines the type of workload a node can run.
- resource
Group StringName - The name of the resource group. The name is case insensitive.
- resource
Name String - The name of the managed cluster resource.
- agent
Pool StringName - The name of the agent pool.
- availability
Zones List<String> - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
- count Number
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1.
- creation
Data Property Map - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot.
- enable
Auto BooleanScaling - Whether to enable auto-scaler
- enable
Encryption BooleanAt Host - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption
- enable
FIPS Boolean - See Add a FIPS-enabled node pool for more details.
- enable
Node BooleanPublic IP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see assigning a public IP per node. The default is false.
- enable
Ultra BooleanSSD - Whether to enable UltraSSD
- gpu
Instance String | "MIG1g" | "MIG2g" | "MIG3g" | "MIG4g" | "MIG7g"Profile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
- host
Group StringID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see Azure dedicated hosts.
- kubelet
Config Property Map - The Kubelet configuration on the agent pool nodes.
- kubelet
Disk String | "OS" | "Temporary"Type - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
- linux
OSConfig Property Map - The OS configuration of Linux agent nodes.
- max
Count Number - The maximum number of nodes for auto-scaling
- max
Pods Number - The maximum number of pods that can run on a node.
- min
Count Number - The minimum number of nodes for auto-scaling
- mode String | "System" | "User"
- A cluster must have at least one 'System' Agent Pool at all times. For additional information on agent pool restrictions and best practices, see: https://docs.microsoft.com/azure/aks/use-system-pools
- node
Labels Map<String> - The node labels to be persisted across all nodes in agent pool.
- node
Public StringIPPrefix ID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}
- node
Taints List<String> - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
- orchestrator
Version String - Both patch version <major.minor.patch> (e.g. 1.20.13) and <major.minor> (e.g. 1.20) are supported. When <major.minor> is specified, the latest supported GA patch version is chosen automatically. Updating the cluster with the same <major.minor> once it has been created (e.g. 1.14.x -> 1.14) will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see upgrading a node pool.
- os
Disk NumberSize GB - OS Disk Size in GB to be used to specify the disk size for every machine in the master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
- os
Disk String | "Managed" | "Ephemeral"Type - The default is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see Ephemeral OS.
- os
SKU String | "Ubuntu" | "AzureLinux" | "CBLMariner" | "Windows2019" | "Windows2022" - Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType is Windows.
- os
Type String | "Linux" | "Windows" - The operating system type. The default is Linux.
- pod
Subnet StringID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- power
State Property Map - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded
- proximity
Placement StringGroup ID - The ID for Proximity Placement Group.
- scale
Down String | "Delete" | "Deallocate"Mode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete.
- scale
Set String | "Delete" | "Deallocate"Eviction Policy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'.
- scale
Set String | "Spot" | "Regular"Priority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'.
- spot
Max NumberPrice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see spot VMs pricing
- Map<String>
- The tags to be persisted on the agent pool virtual machine scale set.
- type
String | "Virtual
Machine Scale Sets" | "Availability Set" - The type of Agent Pool.
- upgrade
Settings Property Map - Settings for upgrading the agentpool
- vm
Size String - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions
- vnet
Subnet StringID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}
- workload
Runtime String | "OCIContainer" | "WasmWasi" - Determines the type of workload a node can run.
Outputs
All input properties are implicitly available as output properties. Additionally, the AgentPool resource produces the following output properties:
- Current
Orchestrator stringVersion - If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
- Node
Image stringVersion - The version of node image
- Provisioning
State string - The current deployment or provisioning state.
- Current
Orchestrator stringVersion - If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
- Node
Image stringVersion - The version of node image
- Provisioning
State string - The current deployment or provisioning state.
- current
Orchestrator StringVersion - If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
- node
Image StringVersion - The version of node image
- provisioning
State String - The current deployment or provisioning state.
- current
Orchestrator stringVersion - If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
- node
Image stringVersion - The version of node image
- provisioning
State string - The current deployment or provisioning state.
- current_
orchestrator_ strversion - If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
- node_
image_ strversion - The version of node image
- provisioning_
state str - The current deployment or provisioning state.
- current
Orchestrator StringVersion - If orchestratorVersion is a fully specified version <major.minor.patch>, this field will be exactly equal to it. If orchestratorVersion is <major.minor>, this field will contain the full <major.minor.patch> version being used.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
- node
Image StringVersion - The version of node image
- provisioning
State String - The current deployment or provisioning state.
Supporting Types
AgentPoolMode, AgentPoolModeArgs
- System
- SystemSystem agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.
- User
- UserUser agent pools are primarily for hosting your application pods.
- Agent
Pool Mode System - SystemSystem agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.
- Agent
Pool Mode User - UserUser agent pools are primarily for hosting your application pods.
- System
- SystemSystem agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.
- User
- UserUser agent pools are primarily for hosting your application pods.
- System
- SystemSystem agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.
- User
- UserUser agent pools are primarily for hosting your application pods.
- SYSTEM
- SystemSystem agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.
- USER
- UserUser agent pools are primarily for hosting your application pods.
- "System"
- SystemSystem agent pools are primarily for hosting critical system pods such as CoreDNS and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at least 2vCPUs and 4GB of memory.
- "User"
- UserUser agent pools are primarily for hosting your application pods.
AgentPoolType, AgentPoolTypeArgs
- Virtual
Machine Scale Sets - VirtualMachineScaleSetsCreate an Agent Pool backed by a Virtual Machine Scale Set.
- Availability
Set - AvailabilitySetUse of this is strongly discouraged.
- Agent
Pool Type Virtual Machine Scale Sets - VirtualMachineScaleSetsCreate an Agent Pool backed by a Virtual Machine Scale Set.
- Agent
Pool Type Availability Set - AvailabilitySetUse of this is strongly discouraged.
- Virtual
Machine Scale Sets - VirtualMachineScaleSetsCreate an Agent Pool backed by a Virtual Machine Scale Set.
- Availability
Set - AvailabilitySetUse of this is strongly discouraged.
- Virtual
Machine Scale Sets - VirtualMachineScaleSetsCreate an Agent Pool backed by a Virtual Machine Scale Set.
- Availability
Set - AvailabilitySetUse of this is strongly discouraged.
- VIRTUAL_MACHINE_SCALE_SETS
- VirtualMachineScaleSetsCreate an Agent Pool backed by a Virtual Machine Scale Set.
- AVAILABILITY_SET
- AvailabilitySetUse of this is strongly discouraged.
- "Virtual
Machine Scale Sets" - VirtualMachineScaleSetsCreate an Agent Pool backed by a Virtual Machine Scale Set.
- "Availability
Set" - AvailabilitySetUse of this is strongly discouraged.
AgentPoolUpgradeSettings, AgentPoolUpgradeSettingsArgs
- Max
Surge string - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- Max
Surge string - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max
Surge String - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max
Surge string - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max_
surge str - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max
Surge String - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
AgentPoolUpgradeSettingsResponse, AgentPoolUpgradeSettingsResponseArgs
- Max
Surge string - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- Max
Surge string - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max
Surge String - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max
Surge string - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max_
surge str - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
- max
Surge String - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade
Code, CodeArgs
- Running
- RunningThe cluster is running.
- Stopped
- StoppedThe cluster is stopped.
- Code
Running - RunningThe cluster is running.
- Code
Stopped - StoppedThe cluster is stopped.
- Running
- RunningThe cluster is running.
- Stopped
- StoppedThe cluster is stopped.
- Running
- RunningThe cluster is running.
- Stopped
- StoppedThe cluster is stopped.
- RUNNING
- RunningThe cluster is running.
- STOPPED
- StoppedThe cluster is stopped.
- "Running"
- RunningThe cluster is running.
- "Stopped"
- StoppedThe cluster is stopped.
CreationData, CreationDataArgs
- Source
Resource stringId - This is the ARM ID of the source object to be used to create the target object.
- Source
Resource stringId - This is the ARM ID of the source object to be used to create the target object.
- source
Resource StringId - This is the ARM ID of the source object to be used to create the target object.
- source
Resource stringId - This is the ARM ID of the source object to be used to create the target object.
- source_
resource_ strid - This is the ARM ID of the source object to be used to create the target object.
- source
Resource StringId - This is the ARM ID of the source object to be used to create the target object.
CreationDataResponse, CreationDataResponseArgs
- Source
Resource stringId - This is the ARM ID of the source object to be used to create the target object.
- Source
Resource stringId - This is the ARM ID of the source object to be used to create the target object.
- source
Resource StringId - This is the ARM ID of the source object to be used to create the target object.
- source
Resource stringId - This is the ARM ID of the source object to be used to create the target object.
- source_
resource_ strid - This is the ARM ID of the source object to be used to create the target object.
- source
Resource StringId - This is the ARM ID of the source object to be used to create the target object.
GPUInstanceProfile, GPUInstanceProfileArgs
- MIG1g
- MIG1g
- MIG2g
- MIG2g
- MIG3g
- MIG3g
- MIG4g
- MIG4g
- MIG7g
- MIG7g
- GPUInstance
Profile MIG1g - MIG1g
- GPUInstance
Profile MIG2g - MIG2g
- GPUInstance
Profile MIG3g - MIG3g
- GPUInstance
Profile MIG4g - MIG4g
- GPUInstance
Profile MIG7g - MIG7g
- MIG1g
- MIG1g
- MIG2g
- MIG2g
- MIG3g
- MIG3g
- MIG4g
- MIG4g
- MIG7g
- MIG7g
- MIG1g
- MIG1g
- MIG2g
- MIG2g
- MIG3g
- MIG3g
- MIG4g
- MIG4g
- MIG7g
- MIG7g
- MIG1G
- MIG1g
- MIG2G
- MIG2g
- MIG3G
- MIG3g
- MIG4G
- MIG4g
- MIG7G
- MIG7g
- "MIG1g"
- MIG1g
- "MIG2g"
- MIG2g
- "MIG3g"
- MIG3g
- "MIG4g"
- MIG4g
- "MIG7g"
- MIG7g
KubeletConfig, KubeletConfigArgs
- Allowed
Unsafe List<string>Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - Container
Log intMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- Container
Log intMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- Cpu
Cfs boolQuota - The default is true.
- Cpu
Cfs stringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- Cpu
Manager stringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- Fail
Swap boolOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- Image
Gc intHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- Image
Gc intLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- Pod
Max intPids - The maximum number of processes per pod.
- Topology
Manager stringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- Allowed
Unsafe []stringSysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - Container
Log intMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- Container
Log intMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- Cpu
Cfs boolQuota - The default is true.
- Cpu
Cfs stringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- Cpu
Manager stringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- Fail
Swap boolOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- Image
Gc intHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- Image
Gc intLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- Pod
Max intPids - The maximum number of processes per pod.
- Topology
Manager stringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed
Unsafe List<String>Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container
Log IntegerMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container
Log IntegerMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu
Cfs BooleanQuota - The default is true.
- cpu
Cfs StringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu
Manager StringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail
Swap BooleanOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image
Gc IntegerHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- image
Gc IntegerLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod
Max IntegerPids - The maximum number of processes per pod.
- topology
Manager StringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed
Unsafe string[]Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container
Log numberMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container
Log numberMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu
Cfs booleanQuota - The default is true.
- cpu
Cfs stringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu
Manager stringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail
Swap booleanOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image
Gc numberHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- image
Gc numberLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod
Max numberPids - The maximum number of processes per pod.
- topology
Manager stringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed_
unsafe_ Sequence[str]sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container_
log_ intmax_ files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container_
log_ intmax_ size_ mb - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu_
cfs_ boolquota - The default is true.
- cpu_
cfs_ strquota_ period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu_
manager_ strpolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail_
swap_ boolon - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image_
gc_ inthigh_ threshold - To disable image garbage collection, set to 100. The default is 85%
- image_
gc_ intlow_ threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod_
max_ intpids - The maximum number of processes per pod.
- topology_
manager_ strpolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed
Unsafe List<String>Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container
Log NumberMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container
Log NumberMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu
Cfs BooleanQuota - The default is true.
- cpu
Cfs StringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu
Manager StringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail
Swap BooleanOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image
Gc NumberHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- image
Gc NumberLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod
Max NumberPids - The maximum number of processes per pod.
- topology
Manager StringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
KubeletConfigResponse, KubeletConfigResponseArgs
- Allowed
Unsafe List<string>Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - Container
Log intMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- Container
Log intMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- Cpu
Cfs boolQuota - The default is true.
- Cpu
Cfs stringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- Cpu
Manager stringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- Fail
Swap boolOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- Image
Gc intHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- Image
Gc intLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- Pod
Max intPids - The maximum number of processes per pod.
- Topology
Manager stringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- Allowed
Unsafe []stringSysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - Container
Log intMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- Container
Log intMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- Cpu
Cfs boolQuota - The default is true.
- Cpu
Cfs stringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- Cpu
Manager stringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- Fail
Swap boolOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- Image
Gc intHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- Image
Gc intLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- Pod
Max intPids - The maximum number of processes per pod.
- Topology
Manager stringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed
Unsafe List<String>Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container
Log IntegerMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container
Log IntegerMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu
Cfs BooleanQuota - The default is true.
- cpu
Cfs StringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu
Manager StringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail
Swap BooleanOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image
Gc IntegerHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- image
Gc IntegerLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod
Max IntegerPids - The maximum number of processes per pod.
- topology
Manager StringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed
Unsafe string[]Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container
Log numberMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container
Log numberMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu
Cfs booleanQuota - The default is true.
- cpu
Cfs stringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu
Manager stringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail
Swap booleanOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image
Gc numberHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- image
Gc numberLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod
Max numberPids - The maximum number of processes per pod.
- topology
Manager stringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed_
unsafe_ Sequence[str]sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container_
log_ intmax_ files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container_
log_ intmax_ size_ mb - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu_
cfs_ boolquota - The default is true.
- cpu_
cfs_ strquota_ period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu_
manager_ strpolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail_
swap_ boolon - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image_
gc_ inthigh_ threshold - To disable image garbage collection, set to 100. The default is 85%
- image_
gc_ intlow_ threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod_
max_ intpids - The maximum number of processes per pod.
- topology_
manager_ strpolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
- allowed
Unsafe List<String>Sysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in
*
). - container
Log NumberMax Files - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
- container
Log NumberMax Size MB - The maximum size (e.g. 10Mi) of container log file before it is rotated.
- cpu
Cfs BooleanQuota - The default is true.
- cpu
Cfs StringQuota Period - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
- cpu
Manager StringPolicy - The default is 'none'. See Kubernetes CPU management policies for more information. Allowed values are 'none' and 'static'.
- fail
Swap BooleanOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
- image
Gc NumberHigh Threshold - To disable image garbage collection, set to 100. The default is 85%
- image
Gc NumberLow Threshold - This cannot be set higher than imageGcHighThreshold. The default is 80%
- pod
Max NumberPids - The maximum number of processes per pod.
- topology
Manager StringPolicy - For more information see Kubernetes Topology Manager. The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
KubeletDiskType, KubeletDiskTypeArgs
- OS
- OSKubelet will use the OS disk for its data.
- Temporary
- TemporaryKubelet will use the temporary disk for its data.
- Kubelet
Disk Type OS - OSKubelet will use the OS disk for its data.
- Kubelet
Disk Type Temporary - TemporaryKubelet will use the temporary disk for its data.
- OS
- OSKubelet will use the OS disk for its data.
- Temporary
- TemporaryKubelet will use the temporary disk for its data.
- OS
- OSKubelet will use the OS disk for its data.
- Temporary
- TemporaryKubelet will use the temporary disk for its data.
- OS
- OSKubelet will use the OS disk for its data.
- TEMPORARY
- TemporaryKubelet will use the temporary disk for its data.
- "OS"
- OSKubelet will use the OS disk for its data.
- "Temporary"
- TemporaryKubelet will use the temporary disk for its data.
LinuxOSConfig, LinuxOSConfigArgs
- Swap
File intSize MB - The size in MB of a swap file that will be created on each node.
- Sysctls
Pulumi.
Azure Native. Container Service. Inputs. Sysctl Config - Sysctl settings for Linux agent nodes.
- Transparent
Huge stringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- Transparent
Huge stringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- Swap
File intSize MB - The size in MB of a swap file that will be created on each node.
- Sysctls
Sysctl
Config - Sysctl settings for Linux agent nodes.
- Transparent
Huge stringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- Transparent
Huge stringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap
File IntegerSize MB - The size in MB of a swap file that will be created on each node.
- sysctls
Sysctl
Config - Sysctl settings for Linux agent nodes.
- transparent
Huge StringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent
Huge StringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap
File numberSize MB - The size in MB of a swap file that will be created on each node.
- sysctls
Sysctl
Config - Sysctl settings for Linux agent nodes.
- transparent
Huge stringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent
Huge stringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap_
file_ intsize_ mb - The size in MB of a swap file that will be created on each node.
- sysctls
Sysctl
Config - Sysctl settings for Linux agent nodes.
- transparent_
huge_ strpage_ defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent_
huge_ strpage_ enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap
File NumberSize MB - The size in MB of a swap file that will be created on each node.
- sysctls Property Map
- Sysctl settings for Linux agent nodes.
- transparent
Huge StringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent
Huge StringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
LinuxOSConfigResponse, LinuxOSConfigResponseArgs
- Swap
File intSize MB - The size in MB of a swap file that will be created on each node.
- Sysctls
Pulumi.
Azure Native. Container Service. Inputs. Sysctl Config Response - Sysctl settings for Linux agent nodes.
- Transparent
Huge stringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- Transparent
Huge stringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- Swap
File intSize MB - The size in MB of a swap file that will be created on each node.
- Sysctls
Sysctl
Config Response - Sysctl settings for Linux agent nodes.
- Transparent
Huge stringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- Transparent
Huge stringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap
File IntegerSize MB - The size in MB of a swap file that will be created on each node.
- sysctls
Sysctl
Config Response - Sysctl settings for Linux agent nodes.
- transparent
Huge StringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent
Huge StringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap
File numberSize MB - The size in MB of a swap file that will be created on each node.
- sysctls
Sysctl
Config Response - Sysctl settings for Linux agent nodes.
- transparent
Huge stringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent
Huge stringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap_
file_ intsize_ mb - The size in MB of a swap file that will be created on each node.
- sysctls
Sysctl
Config Response - Sysctl settings for Linux agent nodes.
- transparent_
huge_ strpage_ defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent_
huge_ strpage_ enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
- swap
File NumberSize MB - The size in MB of a swap file that will be created on each node.
- sysctls Property Map
- Sysctl settings for Linux agent nodes.
- transparent
Huge StringPage Defrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see Transparent Hugepages.
- transparent
Huge StringPage Enabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see Transparent Hugepages.
OSDiskType, OSDiskTypeArgs
- Managed
- ManagedAzure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.
- Ephemeral
- EphemeralEphemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.
- OSDisk
Type Managed - ManagedAzure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.
- OSDisk
Type Ephemeral - EphemeralEphemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.
- Managed
- ManagedAzure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.
- Ephemeral
- EphemeralEphemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.
- Managed
- ManagedAzure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.
- Ephemeral
- EphemeralEphemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.
- MANAGED
- ManagedAzure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.
- EPHEMERAL
- EphemeralEphemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.
- "Managed"
- ManagedAzure replicates the operating system disk for a virtual machine to Azure storage to avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to have local state persisted, this behavior offers limited value while providing some drawbacks, including slower node provisioning and higher read/write latency.
- "Ephemeral"
- EphemeralEphemeral OS disks are stored only on the host machine, just like a temporary disk. This provides lower read/write latency, along with faster node scaling and cluster upgrades.
OSSKU, OSSKUArgs
- Ubuntu
- UbuntuUse Ubuntu as the OS for node images.
- Azure
Linux - AzureLinuxUse AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.
- CBLMariner
- CBLMarinerDeprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.
- Windows2019
- Windows2019Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.
- Windows2022
- Windows2022Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.
- OSSKUUbuntu
- UbuntuUse Ubuntu as the OS for node images.
- OSSKUAzure
Linux - AzureLinuxUse AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.
- OSSKUCBLMariner
- CBLMarinerDeprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.
- OSSKUWindows2019
- Windows2019Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.
- OSSKUWindows2022
- Windows2022Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.
- Ubuntu
- UbuntuUse Ubuntu as the OS for node images.
- Azure
Linux - AzureLinuxUse AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.
- CBLMariner
- CBLMarinerDeprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.
- Windows2019
- Windows2019Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.
- Windows2022
- Windows2022Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.
- Ubuntu
- UbuntuUse Ubuntu as the OS for node images.
- Azure
Linux - AzureLinuxUse AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.
- CBLMariner
- CBLMarinerDeprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.
- Windows2019
- Windows2019Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.
- Windows2022
- Windows2022Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.
- UBUNTU
- UbuntuUse Ubuntu as the OS for node images.
- AZURE_LINUX
- AzureLinuxUse AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.
- CBL_MARINER
- CBLMarinerDeprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.
- WINDOWS2019
- Windows2019Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.
- WINDOWS2022
- Windows2022Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.
- "Ubuntu"
- UbuntuUse Ubuntu as the OS for node images.
- "Azure
Linux" - AzureLinuxUse AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro built by Microsoft, visit https://aka.ms/azurelinux for more information.
- "CBLMariner"
- CBLMarinerDeprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead.
- "Windows2019"
- Windows2019Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only supports Windows2019 containers; it cannot run Windows2022 containers and vice versa.
- "Windows2022"
- Windows2022Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only supports Windows2022 containers; it cannot run Windows2019 containers and vice versa.
OSType, OSTypeArgs
- Linux
- Linux
- Windows
- Windows
- OSType
Linux - Linux
- OSType
Windows - Windows
- Linux
- Linux
- Windows
- Windows
- Linux
- Linux
- Windows
- Windows
- LINUX
- Linux
- WINDOWS
- Windows
- "Linux"
- Linux
- "Windows"
- Windows
PowerState, PowerStateArgs
- Code
string | Pulumi.
Azure Native. Container Service. Code - Tells whether the cluster is Running or Stopped
- code String | "Running" | "Stopped"
- Tells whether the cluster is Running or Stopped
PowerStateResponse, PowerStateResponseArgs
- Code string
- Tells whether the cluster is Running or Stopped
- Code string
- Tells whether the cluster is Running or Stopped
- code String
- Tells whether the cluster is Running or Stopped
- code string
- Tells whether the cluster is Running or Stopped
- code str
- Tells whether the cluster is Running or Stopped
- code String
- Tells whether the cluster is Running or Stopped
ScaleDownMode, ScaleDownModeArgs
- Delete
- DeleteCreate new instances during scale up and remove instances during scale down.
- Deallocate
- DeallocateAttempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.
- Scale
Down Mode Delete - DeleteCreate new instances during scale up and remove instances during scale down.
- Scale
Down Mode Deallocate - DeallocateAttempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.
- Delete
- DeleteCreate new instances during scale up and remove instances during scale down.
- Deallocate
- DeallocateAttempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.
- Delete
- DeleteCreate new instances during scale up and remove instances during scale down.
- Deallocate
- DeallocateAttempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.
- DELETE
- DeleteCreate new instances during scale up and remove instances during scale down.
- DEALLOCATE
- DeallocateAttempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.
- "Delete"
- DeleteCreate new instances during scale up and remove instances during scale down.
- "Deallocate"
- DeallocateAttempt to start deallocated instances (if they exist) during scale up and deallocate instances during scale down.
ScaleSetEvictionPolicy, ScaleSetEvictionPolicyArgs
- Delete
- DeleteNodes in the underlying Scale Set of the node pool are deleted when they're evicted.
- Deallocate
- DeallocateNodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.
- Scale
Set Eviction Policy Delete - DeleteNodes in the underlying Scale Set of the node pool are deleted when they're evicted.
- Scale
Set Eviction Policy Deallocate - DeallocateNodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.
- Delete
- DeleteNodes in the underlying Scale Set of the node pool are deleted when they're evicted.
- Deallocate
- DeallocateNodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.
- Delete
- DeleteNodes in the underlying Scale Set of the node pool are deleted when they're evicted.
- Deallocate
- DeallocateNodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.
- DELETE
- DeleteNodes in the underlying Scale Set of the node pool are deleted when they're evicted.
- DEALLOCATE
- DeallocateNodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.
- "Delete"
- DeleteNodes in the underlying Scale Set of the node pool are deleted when they're evicted.
- "Deallocate"
- DeallocateNodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can cause issues with cluster scaling or upgrading.
ScaleSetPriority, ScaleSetPriorityArgs
- Spot
- SpotSpot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.
- Regular
- RegularRegular VMs will be used.
- Scale
Set Priority Spot - SpotSpot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.
- Scale
Set Priority Regular - RegularRegular VMs will be used.
- Spot
- SpotSpot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.
- Regular
- RegularRegular VMs will be used.
- Spot
- SpotSpot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.
- Regular
- RegularRegular VMs will be used.
- SPOT
- SpotSpot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.
- REGULAR
- RegularRegular VMs will be used.
- "Spot"
- SpotSpot priority VMs will be used. There is no SLA for spot nodes. See spot on AKS for more information.
- "Regular"
- RegularRegular VMs will be used.
SysctlConfig, SysctlConfigArgs
- Fs
Aio intMax Nr - Sysctl setting fs.aio-max-nr.
- Fs
File intMax - Sysctl setting fs.file-max.
- Fs
Inotify intMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- Fs
Nr intOpen - Sysctl setting fs.nr_open.
- Kernel
Threads intMax - Sysctl setting kernel.threads-max.
- Net
Core intNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- Net
Core intOptmem Max - Sysctl setting net.core.optmem_max.
- Net
Core intRmem Default - Sysctl setting net.core.rmem_default.
- Net
Core intRmem Max - Sysctl setting net.core.rmem_max.
- Net
Core intSomaxconn - Sysctl setting net.core.somaxconn.
- Net
Core intWmem Default - Sysctl setting net.core.wmem_default.
- Net
Core intWmem Max - Sysctl setting net.core.wmem_max.
- Net
Ipv4Ip stringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- Net
Ipv4Neigh intDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- Net
Ipv4Neigh intDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- Net
Ipv4Neigh intDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- Net
Ipv4Tcp intFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- Net
Ipv4Tcp intKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- Net
Ipv4Tcp intKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- Net
Ipv4Tcp intMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- Net
Ipv4Tcp intMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- Net
Ipv4Tcp boolTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- Net
Ipv4Tcpkeepalive intIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- Net
Netfilter intNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- Net
Netfilter intNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- Vm
Max intMap Count - Sysctl setting vm.max_map_count.
- Vm
Swappiness int - Sysctl setting vm.swappiness.
- Vm
Vfs intCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- Fs
Aio intMax Nr - Sysctl setting fs.aio-max-nr.
- Fs
File intMax - Sysctl setting fs.file-max.
- Fs
Inotify intMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- Fs
Nr intOpen - Sysctl setting fs.nr_open.
- Kernel
Threads intMax - Sysctl setting kernel.threads-max.
- Net
Core intNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- Net
Core intOptmem Max - Sysctl setting net.core.optmem_max.
- Net
Core intRmem Default - Sysctl setting net.core.rmem_default.
- Net
Core intRmem Max - Sysctl setting net.core.rmem_max.
- Net
Core intSomaxconn - Sysctl setting net.core.somaxconn.
- Net
Core intWmem Default - Sysctl setting net.core.wmem_default.
- Net
Core intWmem Max - Sysctl setting net.core.wmem_max.
- Net
Ipv4Ip stringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- Net
Ipv4Neigh intDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- Net
Ipv4Neigh intDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- Net
Ipv4Neigh intDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- Net
Ipv4Tcp intFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- Net
Ipv4Tcp intKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- Net
Ipv4Tcp intKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- Net
Ipv4Tcp intMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- Net
Ipv4Tcp intMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- Net
Ipv4Tcp boolTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- Net
Ipv4Tcpkeepalive intIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- Net
Netfilter intNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- Net
Netfilter intNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- Vm
Max intMap Count - Sysctl setting vm.max_map_count.
- Vm
Swappiness int - Sysctl setting vm.swappiness.
- Vm
Vfs intCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- fs
Aio IntegerMax Nr - Sysctl setting fs.aio-max-nr.
- fs
File IntegerMax - Sysctl setting fs.file-max.
- fs
Inotify IntegerMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- fs
Nr IntegerOpen - Sysctl setting fs.nr_open.
- kernel
Threads IntegerMax - Sysctl setting kernel.threads-max.
- net
Core IntegerNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- net
Core IntegerOptmem Max - Sysctl setting net.core.optmem_max.
- net
Core IntegerRmem Default - Sysctl setting net.core.rmem_default.
- net
Core IntegerRmem Max - Sysctl setting net.core.rmem_max.
- net
Core IntegerSomaxconn - Sysctl setting net.core.somaxconn.
- net
Core IntegerWmem Default - Sysctl setting net.core.wmem_default.
- net
Core IntegerWmem Max - Sysctl setting net.core.wmem_max.
- net
Ipv4Ip StringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- net
Ipv4Neigh IntegerDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net
Ipv4Neigh IntegerDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net
Ipv4Neigh IntegerDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net
Ipv4Tcp IntegerFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net
Ipv4Tcp IntegerKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net
Ipv4Tcp IntegerKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net
Ipv4Tcp IntegerMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net
Ipv4Tcp IntegerMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net
Ipv4Tcp BooleanTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net
Ipv4Tcpkeepalive IntegerIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net
Netfilter IntegerNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net
Netfilter IntegerNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm
Max IntegerMap Count - Sysctl setting vm.max_map_count.
- vm
Swappiness Integer - Sysctl setting vm.swappiness.
- vm
Vfs IntegerCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- fs
Aio numberMax Nr - Sysctl setting fs.aio-max-nr.
- fs
File numberMax - Sysctl setting fs.file-max.
- fs
Inotify numberMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- fs
Nr numberOpen - Sysctl setting fs.nr_open.
- kernel
Threads numberMax - Sysctl setting kernel.threads-max.
- net
Core numberNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- net
Core numberOptmem Max - Sysctl setting net.core.optmem_max.
- net
Core numberRmem Default - Sysctl setting net.core.rmem_default.
- net
Core numberRmem Max - Sysctl setting net.core.rmem_max.
- net
Core numberSomaxconn - Sysctl setting net.core.somaxconn.
- net
Core numberWmem Default - Sysctl setting net.core.wmem_default.
- net
Core numberWmem Max - Sysctl setting net.core.wmem_max.
- net
Ipv4Ip stringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- net
Ipv4Neigh numberDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net
Ipv4Neigh numberDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net
Ipv4Neigh numberDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net
Ipv4Tcp numberFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net
Ipv4Tcp numberKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net
Ipv4Tcp numberKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net
Ipv4Tcp numberMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net
Ipv4Tcp numberMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net
Ipv4Tcp booleanTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net
Ipv4Tcpkeepalive numberIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net
Netfilter numberNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net
Netfilter numberNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm
Max numberMap Count - Sysctl setting vm.max_map_count.
- vm
Swappiness number - Sysctl setting vm.swappiness.
- vm
Vfs numberCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- fs_
aio_ intmax_ nr - Sysctl setting fs.aio-max-nr.
- fs_
file_ intmax - Sysctl setting fs.file-max.
- fs_
inotify_ intmax_ user_ watches - Sysctl setting fs.inotify.max_user_watches.
- fs_
nr_ intopen - Sysctl setting fs.nr_open.
- kernel_
threads_ intmax - Sysctl setting kernel.threads-max.
- net_
core_ intnetdev_ max_ backlog - Sysctl setting net.core.netdev_max_backlog.
- net_
core_ intoptmem_ max - Sysctl setting net.core.optmem_max.
- net_
core_ intrmem_ default - Sysctl setting net.core.rmem_default.
- net_
core_ intrmem_ max - Sysctl setting net.core.rmem_max.
- net_
core_ intsomaxconn - Sysctl setting net.core.somaxconn.
- net_
core_ intwmem_ default - Sysctl setting net.core.wmem_default.
- net_
core_ intwmem_ max - Sysctl setting net.core.wmem_max.
- net_
ipv4_ strip_ local_ port_ range - Sysctl setting net.ipv4.ip_local_port_range.
- net_
ipv4_ intneigh_ default_ gc_ thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net_
ipv4_ intneigh_ default_ gc_ thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net_
ipv4_ intneigh_ default_ gc_ thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net_
ipv4_ inttcp_ fin_ timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net_
ipv4_ inttcp_ keepalive_ probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net_
ipv4_ inttcp_ keepalive_ time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net_
ipv4_ inttcp_ max_ syn_ backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net_
ipv4_ inttcp_ max_ tw_ buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net_
ipv4_ booltcp_ tw_ reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net_
ipv4_ inttcpkeepalive_ intvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net_
netfilter_ intnf_ conntrack_ buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net_
netfilter_ intnf_ conntrack_ max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm_
max_ intmap_ count - Sysctl setting vm.max_map_count.
- vm_
swappiness int - Sysctl setting vm.swappiness.
- vm_
vfs_ intcache_ pressure - Sysctl setting vm.vfs_cache_pressure.
- fs
Aio NumberMax Nr - Sysctl setting fs.aio-max-nr.
- fs
File NumberMax - Sysctl setting fs.file-max.
- fs
Inotify NumberMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- fs
Nr NumberOpen - Sysctl setting fs.nr_open.
- kernel
Threads NumberMax - Sysctl setting kernel.threads-max.
- net
Core NumberNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- net
Core NumberOptmem Max - Sysctl setting net.core.optmem_max.
- net
Core NumberRmem Default - Sysctl setting net.core.rmem_default.
- net
Core NumberRmem Max - Sysctl setting net.core.rmem_max.
- net
Core NumberSomaxconn - Sysctl setting net.core.somaxconn.
- net
Core NumberWmem Default - Sysctl setting net.core.wmem_default.
- net
Core NumberWmem Max - Sysctl setting net.core.wmem_max.
- net
Ipv4Ip StringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- net
Ipv4Neigh NumberDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net
Ipv4Neigh NumberDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net
Ipv4Neigh NumberDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net
Ipv4Tcp NumberFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net
Ipv4Tcp NumberKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net
Ipv4Tcp NumberKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net
Ipv4Tcp NumberMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net
Ipv4Tcp NumberMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net
Ipv4Tcp BooleanTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net
Ipv4Tcpkeepalive NumberIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net
Netfilter NumberNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net
Netfilter NumberNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm
Max NumberMap Count - Sysctl setting vm.max_map_count.
- vm
Swappiness Number - Sysctl setting vm.swappiness.
- vm
Vfs NumberCache Pressure - Sysctl setting vm.vfs_cache_pressure.
SysctlConfigResponse, SysctlConfigResponseArgs
- Fs
Aio intMax Nr - Sysctl setting fs.aio-max-nr.
- Fs
File intMax - Sysctl setting fs.file-max.
- Fs
Inotify intMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- Fs
Nr intOpen - Sysctl setting fs.nr_open.
- Kernel
Threads intMax - Sysctl setting kernel.threads-max.
- Net
Core intNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- Net
Core intOptmem Max - Sysctl setting net.core.optmem_max.
- Net
Core intRmem Default - Sysctl setting net.core.rmem_default.
- Net
Core intRmem Max - Sysctl setting net.core.rmem_max.
- Net
Core intSomaxconn - Sysctl setting net.core.somaxconn.
- Net
Core intWmem Default - Sysctl setting net.core.wmem_default.
- Net
Core intWmem Max - Sysctl setting net.core.wmem_max.
- Net
Ipv4Ip stringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- Net
Ipv4Neigh intDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- Net
Ipv4Neigh intDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- Net
Ipv4Neigh intDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- Net
Ipv4Tcp intFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- Net
Ipv4Tcp intKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- Net
Ipv4Tcp intKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- Net
Ipv4Tcp intMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- Net
Ipv4Tcp intMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- Net
Ipv4Tcp boolTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- Net
Ipv4Tcpkeepalive intIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- Net
Netfilter intNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- Net
Netfilter intNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- Vm
Max intMap Count - Sysctl setting vm.max_map_count.
- Vm
Swappiness int - Sysctl setting vm.swappiness.
- Vm
Vfs intCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- Fs
Aio intMax Nr - Sysctl setting fs.aio-max-nr.
- Fs
File intMax - Sysctl setting fs.file-max.
- Fs
Inotify intMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- Fs
Nr intOpen - Sysctl setting fs.nr_open.
- Kernel
Threads intMax - Sysctl setting kernel.threads-max.
- Net
Core intNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- Net
Core intOptmem Max - Sysctl setting net.core.optmem_max.
- Net
Core intRmem Default - Sysctl setting net.core.rmem_default.
- Net
Core intRmem Max - Sysctl setting net.core.rmem_max.
- Net
Core intSomaxconn - Sysctl setting net.core.somaxconn.
- Net
Core intWmem Default - Sysctl setting net.core.wmem_default.
- Net
Core intWmem Max - Sysctl setting net.core.wmem_max.
- Net
Ipv4Ip stringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- Net
Ipv4Neigh intDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- Net
Ipv4Neigh intDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- Net
Ipv4Neigh intDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- Net
Ipv4Tcp intFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- Net
Ipv4Tcp intKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- Net
Ipv4Tcp intKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- Net
Ipv4Tcp intMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- Net
Ipv4Tcp intMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- Net
Ipv4Tcp boolTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- Net
Ipv4Tcpkeepalive intIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- Net
Netfilter intNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- Net
Netfilter intNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- Vm
Max intMap Count - Sysctl setting vm.max_map_count.
- Vm
Swappiness int - Sysctl setting vm.swappiness.
- Vm
Vfs intCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- fs
Aio IntegerMax Nr - Sysctl setting fs.aio-max-nr.
- fs
File IntegerMax - Sysctl setting fs.file-max.
- fs
Inotify IntegerMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- fs
Nr IntegerOpen - Sysctl setting fs.nr_open.
- kernel
Threads IntegerMax - Sysctl setting kernel.threads-max.
- net
Core IntegerNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- net
Core IntegerOptmem Max - Sysctl setting net.core.optmem_max.
- net
Core IntegerRmem Default - Sysctl setting net.core.rmem_default.
- net
Core IntegerRmem Max - Sysctl setting net.core.rmem_max.
- net
Core IntegerSomaxconn - Sysctl setting net.core.somaxconn.
- net
Core IntegerWmem Default - Sysctl setting net.core.wmem_default.
- net
Core IntegerWmem Max - Sysctl setting net.core.wmem_max.
- net
Ipv4Ip StringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- net
Ipv4Neigh IntegerDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net
Ipv4Neigh IntegerDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net
Ipv4Neigh IntegerDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net
Ipv4Tcp IntegerFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net
Ipv4Tcp IntegerKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net
Ipv4Tcp IntegerKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net
Ipv4Tcp IntegerMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net
Ipv4Tcp IntegerMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net
Ipv4Tcp BooleanTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net
Ipv4Tcpkeepalive IntegerIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net
Netfilter IntegerNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net
Netfilter IntegerNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm
Max IntegerMap Count - Sysctl setting vm.max_map_count.
- vm
Swappiness Integer - Sysctl setting vm.swappiness.
- vm
Vfs IntegerCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- fs
Aio numberMax Nr - Sysctl setting fs.aio-max-nr.
- fs
File numberMax - Sysctl setting fs.file-max.
- fs
Inotify numberMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- fs
Nr numberOpen - Sysctl setting fs.nr_open.
- kernel
Threads numberMax - Sysctl setting kernel.threads-max.
- net
Core numberNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- net
Core numberOptmem Max - Sysctl setting net.core.optmem_max.
- net
Core numberRmem Default - Sysctl setting net.core.rmem_default.
- net
Core numberRmem Max - Sysctl setting net.core.rmem_max.
- net
Core numberSomaxconn - Sysctl setting net.core.somaxconn.
- net
Core numberWmem Default - Sysctl setting net.core.wmem_default.
- net
Core numberWmem Max - Sysctl setting net.core.wmem_max.
- net
Ipv4Ip stringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- net
Ipv4Neigh numberDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net
Ipv4Neigh numberDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net
Ipv4Neigh numberDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net
Ipv4Tcp numberFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net
Ipv4Tcp numberKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net
Ipv4Tcp numberKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net
Ipv4Tcp numberMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net
Ipv4Tcp numberMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net
Ipv4Tcp booleanTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net
Ipv4Tcpkeepalive numberIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net
Netfilter numberNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net
Netfilter numberNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm
Max numberMap Count - Sysctl setting vm.max_map_count.
- vm
Swappiness number - Sysctl setting vm.swappiness.
- vm
Vfs numberCache Pressure - Sysctl setting vm.vfs_cache_pressure.
- fs_
aio_ intmax_ nr - Sysctl setting fs.aio-max-nr.
- fs_
file_ intmax - Sysctl setting fs.file-max.
- fs_
inotify_ intmax_ user_ watches - Sysctl setting fs.inotify.max_user_watches.
- fs_
nr_ intopen - Sysctl setting fs.nr_open.
- kernel_
threads_ intmax - Sysctl setting kernel.threads-max.
- net_
core_ intnetdev_ max_ backlog - Sysctl setting net.core.netdev_max_backlog.
- net_
core_ intoptmem_ max - Sysctl setting net.core.optmem_max.
- net_
core_ intrmem_ default - Sysctl setting net.core.rmem_default.
- net_
core_ intrmem_ max - Sysctl setting net.core.rmem_max.
- net_
core_ intsomaxconn - Sysctl setting net.core.somaxconn.
- net_
core_ intwmem_ default - Sysctl setting net.core.wmem_default.
- net_
core_ intwmem_ max - Sysctl setting net.core.wmem_max.
- net_
ipv4_ strip_ local_ port_ range - Sysctl setting net.ipv4.ip_local_port_range.
- net_
ipv4_ intneigh_ default_ gc_ thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net_
ipv4_ intneigh_ default_ gc_ thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net_
ipv4_ intneigh_ default_ gc_ thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net_
ipv4_ inttcp_ fin_ timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net_
ipv4_ inttcp_ keepalive_ probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net_
ipv4_ inttcp_ keepalive_ time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net_
ipv4_ inttcp_ max_ syn_ backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net_
ipv4_ inttcp_ max_ tw_ buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net_
ipv4_ booltcp_ tw_ reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net_
ipv4_ inttcpkeepalive_ intvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net_
netfilter_ intnf_ conntrack_ buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net_
netfilter_ intnf_ conntrack_ max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm_
max_ intmap_ count - Sysctl setting vm.max_map_count.
- vm_
swappiness int - Sysctl setting vm.swappiness.
- vm_
vfs_ intcache_ pressure - Sysctl setting vm.vfs_cache_pressure.
- fs
Aio NumberMax Nr - Sysctl setting fs.aio-max-nr.
- fs
File NumberMax - Sysctl setting fs.file-max.
- fs
Inotify NumberMax User Watches - Sysctl setting fs.inotify.max_user_watches.
- fs
Nr NumberOpen - Sysctl setting fs.nr_open.
- kernel
Threads NumberMax - Sysctl setting kernel.threads-max.
- net
Core NumberNetdev Max Backlog - Sysctl setting net.core.netdev_max_backlog.
- net
Core NumberOptmem Max - Sysctl setting net.core.optmem_max.
- net
Core NumberRmem Default - Sysctl setting net.core.rmem_default.
- net
Core NumberRmem Max - Sysctl setting net.core.rmem_max.
- net
Core NumberSomaxconn - Sysctl setting net.core.somaxconn.
- net
Core NumberWmem Default - Sysctl setting net.core.wmem_default.
- net
Core NumberWmem Max - Sysctl setting net.core.wmem_max.
- net
Ipv4Ip StringLocal Port Range - Sysctl setting net.ipv4.ip_local_port_range.
- net
Ipv4Neigh NumberDefault Gc Thresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
- net
Ipv4Neigh NumberDefault Gc Thresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
- net
Ipv4Neigh NumberDefault Gc Thresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
- net
Ipv4Tcp NumberFin Timeout - Sysctl setting net.ipv4.tcp_fin_timeout.
- net
Ipv4Tcp NumberKeepalive Probes - Sysctl setting net.ipv4.tcp_keepalive_probes.
- net
Ipv4Tcp NumberKeepalive Time - Sysctl setting net.ipv4.tcp_keepalive_time.
- net
Ipv4Tcp NumberMax Syn Backlog - Sysctl setting net.ipv4.tcp_max_syn_backlog.
- net
Ipv4Tcp NumberMax Tw Buckets - Sysctl setting net.ipv4.tcp_max_tw_buckets.
- net
Ipv4Tcp BooleanTw Reuse - Sysctl setting net.ipv4.tcp_tw_reuse.
- net
Ipv4Tcpkeepalive NumberIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl.
- net
Netfilter NumberNf Conntrack Buckets - Sysctl setting net.netfilter.nf_conntrack_buckets.
- net
Netfilter NumberNf Conntrack Max - Sysctl setting net.netfilter.nf_conntrack_max.
- vm
Max NumberMap Count - Sysctl setting vm.max_map_count.
- vm
Swappiness Number - Sysctl setting vm.swappiness.
- vm
Vfs NumberCache Pressure - Sysctl setting vm.vfs_cache_pressure.
WorkloadRuntime, WorkloadRuntimeArgs
- OCIContainer
- OCIContainerNodes will use Kubelet to run standard OCI container workloads.
- Wasm
Wasi - WasmWasiNodes will use Krustlet to run WASM workloads using the WASI provider (Preview).
- Workload
Runtime OCIContainer - OCIContainerNodes will use Kubelet to run standard OCI container workloads.
- Workload
Runtime Wasm Wasi - WasmWasiNodes will use Krustlet to run WASM workloads using the WASI provider (Preview).
- OCIContainer
- OCIContainerNodes will use Kubelet to run standard OCI container workloads.
- Wasm
Wasi - WasmWasiNodes will use Krustlet to run WASM workloads using the WASI provider (Preview).
- OCIContainer
- OCIContainerNodes will use Kubelet to run standard OCI container workloads.
- Wasm
Wasi - WasmWasiNodes will use Krustlet to run WASM workloads using the WASI provider (Preview).
- OCI_CONTAINER
- OCIContainerNodes will use Kubelet to run standard OCI container workloads.
- WASM_WASI
- WasmWasiNodes will use Krustlet to run WASM workloads using the WASI provider (Preview).
- "OCIContainer"
- OCIContainerNodes will use Kubelet to run standard OCI container workloads.
- "Wasm
Wasi" - WasmWasiNodes will use Krustlet to run WASM workloads using the WASI provider (Preview).
Import
An existing resource can be imported using its type token, name, and identifier, e.g.
$ pulumi import azure-native:containerservice:AgentPool agentpool1 /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Native pulumi/pulumi-azure-native
- License
- Apache-2.0