gcp.compute.NodeGroup
Explore with Pulumi AI
Represents a NodeGroup resource to manage a group of sole-tenant nodes.
To get more information about NodeGroup, see:
- API documentation
- How-to Guides
Warning: Due to limitations of the API, this provider cannot update the number of nodes in a node group and changes to node group size either through provider config or through external changes will cause the provider to delete and recreate the node group.
Example Usage
Node Group Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
name: "soletenant-tmpl",
region: "us-central1",
nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
name: "soletenant-group",
zone: "us-central1-a",
description: "example google_compute_node_group for the Google Provider",
initialSize: 1,
nodeTemplate: soletenant_tmpl.id,
});
import pulumi
import pulumi_gcp as gcp
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
name="soletenant-tmpl",
region="us-central1",
node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
name="soletenant-group",
zone="us-central1-a",
description="example google_compute_node_group for the Google Provider",
initial_size=1,
node_template=soletenant_tmpl.id)
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-tmpl"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n1-node-96-624"),
})
if err != nil {
return err
}
_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
Name: pulumi.String("soletenant-group"),
Zone: pulumi.String("us-central1-a"),
Description: pulumi.String("example google_compute_node_group for the Google Provider"),
InitialSize: pulumi.Int(1),
NodeTemplate: soletenant_tmpl.ID(),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
{
Name = "soletenant-tmpl",
Region = "us-central1",
NodeType = "n1-node-96-624",
});
var nodes = new Gcp.Compute.NodeGroup("nodes", new()
{
Name = "soletenant-group",
Zone = "us-central1-a",
Description = "example google_compute_node_group for the Google Provider",
InitialSize = 1,
NodeTemplate = soletenant_tmpl.Id,
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
.name("soletenant-tmpl")
.region("us-central1")
.nodeType("n1-node-96-624")
.build());
var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
.name("soletenant-group")
.zone("us-central1-a")
.description("example google_compute_node_group for the Google Provider")
.initialSize(1)
.nodeTemplate(soletenant_tmpl.id())
.build());
}
}
resources:
soletenant-tmpl:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-tmpl
region: us-central1
nodeType: n1-node-96-624
nodes:
type: gcp:compute:NodeGroup
properties:
name: soletenant-group
zone: us-central1-a
description: example google_compute_node_group for the Google Provider
initialSize: 1
nodeTemplate: ${["soletenant-tmpl"].id}
Node Group Maintenance Interval
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
name: "soletenant-tmpl",
region: "us-central1",
nodeType: "c2-node-60-240",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
name: "soletenant-group",
zone: "us-central1-a",
description: "example google_compute_node_group for Terraform Google Provider",
initialSize: 1,
nodeTemplate: soletenant_tmpl.id,
maintenanceInterval: "RECURRENT",
});
import pulumi
import pulumi_gcp as gcp
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
name="soletenant-tmpl",
region="us-central1",
node_type="c2-node-60-240")
nodes = gcp.compute.NodeGroup("nodes",
name="soletenant-group",
zone="us-central1-a",
description="example google_compute_node_group for Terraform Google Provider",
initial_size=1,
node_template=soletenant_tmpl.id,
maintenance_interval="RECURRENT")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-tmpl"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("c2-node-60-240"),
})
if err != nil {
return err
}
_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
Name: pulumi.String("soletenant-group"),
Zone: pulumi.String("us-central1-a"),
Description: pulumi.String("example google_compute_node_group for Terraform Google Provider"),
InitialSize: pulumi.Int(1),
NodeTemplate: soletenant_tmpl.ID(),
MaintenanceInterval: pulumi.String("RECURRENT"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
{
Name = "soletenant-tmpl",
Region = "us-central1",
NodeType = "c2-node-60-240",
});
var nodes = new Gcp.Compute.NodeGroup("nodes", new()
{
Name = "soletenant-group",
Zone = "us-central1-a",
Description = "example google_compute_node_group for Terraform Google Provider",
InitialSize = 1,
NodeTemplate = soletenant_tmpl.Id,
MaintenanceInterval = "RECURRENT",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
.name("soletenant-tmpl")
.region("us-central1")
.nodeType("c2-node-60-240")
.build());
var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
.name("soletenant-group")
.zone("us-central1-a")
.description("example google_compute_node_group for Terraform Google Provider")
.initialSize(1)
.nodeTemplate(soletenant_tmpl.id())
.maintenanceInterval("RECURRENT")
.build());
}
}
resources:
soletenant-tmpl:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-tmpl
region: us-central1
nodeType: c2-node-60-240
nodes:
type: gcp:compute:NodeGroup
properties:
name: soletenant-group
zone: us-central1-a
description: example google_compute_node_group for Terraform Google Provider
initialSize: 1
nodeTemplate: ${["soletenant-tmpl"].id}
maintenanceInterval: RECURRENT
Node Group Autoscaling Policy
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
name: "soletenant-tmpl",
region: "us-central1",
nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
name: "soletenant-group",
zone: "us-central1-a",
description: "example google_compute_node_group for Google Provider",
maintenancePolicy: "RESTART_IN_PLACE",
maintenanceWindow: {
startTime: "08:00",
},
initialSize: 1,
nodeTemplate: soletenant_tmpl.id,
autoscalingPolicy: {
mode: "ONLY_SCALE_OUT",
minNodes: 1,
maxNodes: 10,
},
});
import pulumi
import pulumi_gcp as gcp
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
name="soletenant-tmpl",
region="us-central1",
node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
name="soletenant-group",
zone="us-central1-a",
description="example google_compute_node_group for Google Provider",
maintenance_policy="RESTART_IN_PLACE",
maintenance_window={
"start_time": "08:00",
},
initial_size=1,
node_template=soletenant_tmpl.id,
autoscaling_policy={
"mode": "ONLY_SCALE_OUT",
"min_nodes": 1,
"max_nodes": 10,
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-tmpl"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n1-node-96-624"),
})
if err != nil {
return err
}
_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
Name: pulumi.String("soletenant-group"),
Zone: pulumi.String("us-central1-a"),
Description: pulumi.String("example google_compute_node_group for Google Provider"),
MaintenancePolicy: pulumi.String("RESTART_IN_PLACE"),
MaintenanceWindow: &compute.NodeGroupMaintenanceWindowArgs{
StartTime: pulumi.String("08:00"),
},
InitialSize: pulumi.Int(1),
NodeTemplate: soletenant_tmpl.ID(),
AutoscalingPolicy: &compute.NodeGroupAutoscalingPolicyArgs{
Mode: pulumi.String("ONLY_SCALE_OUT"),
MinNodes: pulumi.Int(1),
MaxNodes: pulumi.Int(10),
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
{
Name = "soletenant-tmpl",
Region = "us-central1",
NodeType = "n1-node-96-624",
});
var nodes = new Gcp.Compute.NodeGroup("nodes", new()
{
Name = "soletenant-group",
Zone = "us-central1-a",
Description = "example google_compute_node_group for Google Provider",
MaintenancePolicy = "RESTART_IN_PLACE",
MaintenanceWindow = new Gcp.Compute.Inputs.NodeGroupMaintenanceWindowArgs
{
StartTime = "08:00",
},
InitialSize = 1,
NodeTemplate = soletenant_tmpl.Id,
AutoscalingPolicy = new Gcp.Compute.Inputs.NodeGroupAutoscalingPolicyArgs
{
Mode = "ONLY_SCALE_OUT",
MinNodes = 1,
MaxNodes = 10,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupMaintenanceWindowArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupAutoscalingPolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
.name("soletenant-tmpl")
.region("us-central1")
.nodeType("n1-node-96-624")
.build());
var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
.name("soletenant-group")
.zone("us-central1-a")
.description("example google_compute_node_group for Google Provider")
.maintenancePolicy("RESTART_IN_PLACE")
.maintenanceWindow(NodeGroupMaintenanceWindowArgs.builder()
.startTime("08:00")
.build())
.initialSize(1)
.nodeTemplate(soletenant_tmpl.id())
.autoscalingPolicy(NodeGroupAutoscalingPolicyArgs.builder()
.mode("ONLY_SCALE_OUT")
.minNodes(1)
.maxNodes(10)
.build())
.build());
}
}
resources:
soletenant-tmpl:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-tmpl
region: us-central1
nodeType: n1-node-96-624
nodes:
type: gcp:compute:NodeGroup
properties:
name: soletenant-group
zone: us-central1-a
description: example google_compute_node_group for Google Provider
maintenancePolicy: RESTART_IN_PLACE
maintenanceWindow:
startTime: 08:00
initialSize: 1
nodeTemplate: ${["soletenant-tmpl"].id}
autoscalingPolicy:
mode: ONLY_SCALE_OUT
minNodes: 1
maxNodes: 10
Node Group Share Settings
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const guestProject = new gcp.organizations.Project("guest_project", {
projectId: "project-id",
name: "project-name",
orgId: "123456789",
deletionPolicy: "DELETE",
});
const soletenant_tmpl = new gcp.compute.NodeTemplate("soletenant-tmpl", {
name: "soletenant-tmpl",
region: "us-central1",
nodeType: "n1-node-96-624",
});
const nodes = new gcp.compute.NodeGroup("nodes", {
name: "soletenant-group",
zone: "us-central1-f",
description: "example google_compute_node_group for Terraform Google Provider",
initialSize: 1,
nodeTemplate: soletenant_tmpl.id,
shareSettings: {
shareType: "SPECIFIC_PROJECTS",
projectMaps: [{
id: guestProject.projectId,
projectId: guestProject.projectId,
}],
},
});
import pulumi
import pulumi_gcp as gcp
guest_project = gcp.organizations.Project("guest_project",
project_id="project-id",
name="project-name",
org_id="123456789",
deletion_policy="DELETE")
soletenant_tmpl = gcp.compute.NodeTemplate("soletenant-tmpl",
name="soletenant-tmpl",
region="us-central1",
node_type="n1-node-96-624")
nodes = gcp.compute.NodeGroup("nodes",
name="soletenant-group",
zone="us-central1-f",
description="example google_compute_node_group for Terraform Google Provider",
initial_size=1,
node_template=soletenant_tmpl.id,
share_settings={
"share_type": "SPECIFIC_PROJECTS",
"project_maps": [{
"id": guest_project.project_id,
"project_id": guest_project.project_id,
}],
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
guestProject, err := organizations.NewProject(ctx, "guest_project", &organizations.ProjectArgs{
ProjectId: pulumi.String("project-id"),
Name: pulumi.String("project-name"),
OrgId: pulumi.String("123456789"),
DeletionPolicy: pulumi.String("DELETE"),
})
if err != nil {
return err
}
_, err = compute.NewNodeTemplate(ctx, "soletenant-tmpl", &compute.NodeTemplateArgs{
Name: pulumi.String("soletenant-tmpl"),
Region: pulumi.String("us-central1"),
NodeType: pulumi.String("n1-node-96-624"),
})
if err != nil {
return err
}
_, err = compute.NewNodeGroup(ctx, "nodes", &compute.NodeGroupArgs{
Name: pulumi.String("soletenant-group"),
Zone: pulumi.String("us-central1-f"),
Description: pulumi.String("example google_compute_node_group for Terraform Google Provider"),
InitialSize: pulumi.Int(1),
NodeTemplate: soletenant_tmpl.ID(),
ShareSettings: &compute.NodeGroupShareSettingsArgs{
ShareType: pulumi.String("SPECIFIC_PROJECTS"),
ProjectMaps: compute.NodeGroupShareSettingsProjectMapArray{
&compute.NodeGroupShareSettingsProjectMapArgs{
Id: guestProject.ProjectId,
ProjectId: guestProject.ProjectId,
},
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var guestProject = new Gcp.Organizations.Project("guest_project", new()
{
ProjectId = "project-id",
Name = "project-name",
OrgId = "123456789",
DeletionPolicy = "DELETE",
});
var soletenant_tmpl = new Gcp.Compute.NodeTemplate("soletenant-tmpl", new()
{
Name = "soletenant-tmpl",
Region = "us-central1",
NodeType = "n1-node-96-624",
});
var nodes = new Gcp.Compute.NodeGroup("nodes", new()
{
Name = "soletenant-group",
Zone = "us-central1-f",
Description = "example google_compute_node_group for Terraform Google Provider",
InitialSize = 1,
NodeTemplate = soletenant_tmpl.Id,
ShareSettings = new Gcp.Compute.Inputs.NodeGroupShareSettingsArgs
{
ShareType = "SPECIFIC_PROJECTS",
ProjectMaps = new[]
{
new Gcp.Compute.Inputs.NodeGroupShareSettingsProjectMapArgs
{
Id = guestProject.ProjectId,
ProjectId = guestProject.ProjectId,
},
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.organizations.Project;
import com.pulumi.gcp.organizations.ProjectArgs;
import com.pulumi.gcp.compute.NodeTemplate;
import com.pulumi.gcp.compute.NodeTemplateArgs;
import com.pulumi.gcp.compute.NodeGroup;
import com.pulumi.gcp.compute.NodeGroupArgs;
import com.pulumi.gcp.compute.inputs.NodeGroupShareSettingsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var guestProject = new Project("guestProject", ProjectArgs.builder()
.projectId("project-id")
.name("project-name")
.orgId("123456789")
.deletionPolicy("DELETE")
.build());
var soletenant_tmpl = new NodeTemplate("soletenant-tmpl", NodeTemplateArgs.builder()
.name("soletenant-tmpl")
.region("us-central1")
.nodeType("n1-node-96-624")
.build());
var nodes = new NodeGroup("nodes", NodeGroupArgs.builder()
.name("soletenant-group")
.zone("us-central1-f")
.description("example google_compute_node_group for Terraform Google Provider")
.initialSize(1)
.nodeTemplate(soletenant_tmpl.id())
.shareSettings(NodeGroupShareSettingsArgs.builder()
.shareType("SPECIFIC_PROJECTS")
.projectMaps(NodeGroupShareSettingsProjectMapArgs.builder()
.id(guestProject.projectId())
.projectId(guestProject.projectId())
.build())
.build())
.build());
}
}
resources:
guestProject:
type: gcp:organizations:Project
name: guest_project
properties:
projectId: project-id
name: project-name
orgId: '123456789'
deletionPolicy: DELETE
soletenant-tmpl:
type: gcp:compute:NodeTemplate
properties:
name: soletenant-tmpl
region: us-central1
nodeType: n1-node-96-624
nodes:
type: gcp:compute:NodeGroup
properties:
name: soletenant-group
zone: us-central1-f
description: example google_compute_node_group for Terraform Google Provider
initialSize: 1
nodeTemplate: ${["soletenant-tmpl"].id}
shareSettings:
shareType: SPECIFIC_PROJECTS
projectMaps:
- id: ${guestProject.projectId}
projectId: ${guestProject.projectId}
Create NodeGroup Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new NodeGroup(name: string, args: NodeGroupArgs, opts?: CustomResourceOptions);
@overload
def NodeGroup(resource_name: str,
args: NodeGroupArgs,
opts: Optional[ResourceOptions] = None)
@overload
def NodeGroup(resource_name: str,
opts: Optional[ResourceOptions] = None,
node_template: Optional[str] = None,
autoscaling_policy: Optional[NodeGroupAutoscalingPolicyArgs] = None,
description: Optional[str] = None,
initial_size: Optional[int] = None,
maintenance_interval: Optional[str] = None,
maintenance_policy: Optional[str] = None,
maintenance_window: Optional[NodeGroupMaintenanceWindowArgs] = None,
name: Optional[str] = None,
project: Optional[str] = None,
share_settings: Optional[NodeGroupShareSettingsArgs] = None,
zone: Optional[str] = None)
func NewNodeGroup(ctx *Context, name string, args NodeGroupArgs, opts ...ResourceOption) (*NodeGroup, error)
public NodeGroup(string name, NodeGroupArgs args, CustomResourceOptions? opts = null)
public NodeGroup(String name, NodeGroupArgs args)
public NodeGroup(String name, NodeGroupArgs args, CustomResourceOptions options)
type: gcp:compute:NodeGroup
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args NodeGroupArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var nodeGroupResource = new Gcp.Compute.NodeGroup("nodeGroupResource", new()
{
NodeTemplate = "string",
AutoscalingPolicy = new Gcp.Compute.Inputs.NodeGroupAutoscalingPolicyArgs
{
MaxNodes = 0,
MinNodes = 0,
Mode = "string",
},
Description = "string",
InitialSize = 0,
MaintenanceInterval = "string",
MaintenancePolicy = "string",
MaintenanceWindow = new Gcp.Compute.Inputs.NodeGroupMaintenanceWindowArgs
{
StartTime = "string",
},
Name = "string",
Project = "string",
ShareSettings = new Gcp.Compute.Inputs.NodeGroupShareSettingsArgs
{
ShareType = "string",
ProjectMaps = new[]
{
new Gcp.Compute.Inputs.NodeGroupShareSettingsProjectMapArgs
{
Id = "string",
ProjectId = "string",
},
},
},
Zone = "string",
});
example, err := compute.NewNodeGroup(ctx, "nodeGroupResource", &compute.NodeGroupArgs{
NodeTemplate: pulumi.String("string"),
AutoscalingPolicy: &compute.NodeGroupAutoscalingPolicyArgs{
MaxNodes: pulumi.Int(0),
MinNodes: pulumi.Int(0),
Mode: pulumi.String("string"),
},
Description: pulumi.String("string"),
InitialSize: pulumi.Int(0),
MaintenanceInterval: pulumi.String("string"),
MaintenancePolicy: pulumi.String("string"),
MaintenanceWindow: &compute.NodeGroupMaintenanceWindowArgs{
StartTime: pulumi.String("string"),
},
Name: pulumi.String("string"),
Project: pulumi.String("string"),
ShareSettings: &compute.NodeGroupShareSettingsArgs{
ShareType: pulumi.String("string"),
ProjectMaps: compute.NodeGroupShareSettingsProjectMapArray{
&compute.NodeGroupShareSettingsProjectMapArgs{
Id: pulumi.String("string"),
ProjectId: pulumi.String("string"),
},
},
},
Zone: pulumi.String("string"),
})
var nodeGroupResource = new NodeGroup("nodeGroupResource", NodeGroupArgs.builder()
.nodeTemplate("string")
.autoscalingPolicy(NodeGroupAutoscalingPolicyArgs.builder()
.maxNodes(0)
.minNodes(0)
.mode("string")
.build())
.description("string")
.initialSize(0)
.maintenanceInterval("string")
.maintenancePolicy("string")
.maintenanceWindow(NodeGroupMaintenanceWindowArgs.builder()
.startTime("string")
.build())
.name("string")
.project("string")
.shareSettings(NodeGroupShareSettingsArgs.builder()
.shareType("string")
.projectMaps(NodeGroupShareSettingsProjectMapArgs.builder()
.id("string")
.projectId("string")
.build())
.build())
.zone("string")
.build());
node_group_resource = gcp.compute.NodeGroup("nodeGroupResource",
node_template="string",
autoscaling_policy={
"maxNodes": 0,
"minNodes": 0,
"mode": "string",
},
description="string",
initial_size=0,
maintenance_interval="string",
maintenance_policy="string",
maintenance_window={
"startTime": "string",
},
name="string",
project="string",
share_settings={
"shareType": "string",
"projectMaps": [{
"id": "string",
"projectId": "string",
}],
},
zone="string")
const nodeGroupResource = new gcp.compute.NodeGroup("nodeGroupResource", {
nodeTemplate: "string",
autoscalingPolicy: {
maxNodes: 0,
minNodes: 0,
mode: "string",
},
description: "string",
initialSize: 0,
maintenanceInterval: "string",
maintenancePolicy: "string",
maintenanceWindow: {
startTime: "string",
},
name: "string",
project: "string",
shareSettings: {
shareType: "string",
projectMaps: [{
id: "string",
projectId: "string",
}],
},
zone: "string",
});
type: gcp:compute:NodeGroup
properties:
autoscalingPolicy:
maxNodes: 0
minNodes: 0
mode: string
description: string
initialSize: 0
maintenanceInterval: string
maintenancePolicy: string
maintenanceWindow:
startTime: string
name: string
nodeTemplate: string
project: string
shareSettings:
projectMaps:
- id: string
projectId: string
shareType: string
zone: string
NodeGroup Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The NodeGroup resource accepts the following input properties:
- Node
Template string - The URL of the node template to which this node group belongs.
- Autoscaling
Policy NodeGroup Autoscaling Policy - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - Description string
- An optional textual description of the resource.
- Initial
Size int - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - Maintenance
Interval string - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- Maintenance
Policy string - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- Maintenance
Window NodeGroup Maintenance Window - contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Node
Group Share Settings - Share settings for the node group. Structure is documented below.
- Zone string
- Zone where this node group is located
- Node
Template string - The URL of the node template to which this node group belongs.
- Autoscaling
Policy NodeGroup Autoscaling Policy Args - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - Description string
- An optional textual description of the resource.
- Initial
Size int - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - Maintenance
Interval string - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- Maintenance
Policy string - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- Maintenance
Window NodeGroup Maintenance Window Args - contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Node
Group Share Settings Args - Share settings for the node group. Structure is documented below.
- Zone string
- Zone where this node group is located
- node
Template String - The URL of the node template to which this node group belongs.
- autoscaling
Policy NodeGroup Autoscaling Policy - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - description String
- An optional textual description of the resource.
- initial
Size Integer - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance
Interval String - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance
Policy String - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance
Window NodeGroup Maintenance Window - contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Node
Group Share Settings - Share settings for the node group. Structure is documented below.
- zone String
- Zone where this node group is located
- node
Template string - The URL of the node template to which this node group belongs.
- autoscaling
Policy NodeGroup Autoscaling Policy - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - description string
- An optional textual description of the resource.
- initial
Size number - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance
Interval string - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance
Policy string - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance
Window NodeGroup Maintenance Window - contains properties for the timeframe of maintenance Structure is documented below.
- name string
- Name of the resource.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Node
Group Share Settings - Share settings for the node group. Structure is documented below.
- zone string
- Zone where this node group is located
- node_
template str - The URL of the node template to which this node group belongs.
- autoscaling_
policy NodeGroup Autoscaling Policy Args - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - description str
- An optional textual description of the resource.
- initial_
size int - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance_
interval str - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance_
policy str - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance_
window NodeGroup Maintenance Window Args - contains properties for the timeframe of maintenance Structure is documented below.
- name str
- Name of the resource.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Node
Group Share Settings Args - Share settings for the node group. Structure is documented below.
- zone str
- Zone where this node group is located
- node
Template String - The URL of the node template to which this node group belongs.
- autoscaling
Policy Property Map - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - description String
- An optional textual description of the resource.
- initial
Size Number - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance
Interval String - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance
Policy String - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance
Window Property Map - contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Property Map
- Share settings for the node group. Structure is documented below.
- zone String
- Zone where this node group is located
Outputs
All input properties are implicitly available as output properties. Additionally, the NodeGroup resource produces the following output properties:
- Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Id string
- The provider-assigned unique ID for this managed resource.
- Self
Link string - The URI of the created resource.
- Size int
- The total number of nodes in the node group.
- Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Id string
- The provider-assigned unique ID for this managed resource.
- Self
Link string - The URI of the created resource.
- Size int
- The total number of nodes in the node group.
- creation
Timestamp String - Creation timestamp in RFC3339 text format.
- id String
- The provider-assigned unique ID for this managed resource.
- self
Link String - The URI of the created resource.
- size Integer
- The total number of nodes in the node group.
- creation
Timestamp string - Creation timestamp in RFC3339 text format.
- id string
- The provider-assigned unique ID for this managed resource.
- self
Link string - The URI of the created resource.
- size number
- The total number of nodes in the node group.
- creation_
timestamp str - Creation timestamp in RFC3339 text format.
- id str
- The provider-assigned unique ID for this managed resource.
- self_
link str - The URI of the created resource.
- size int
- The total number of nodes in the node group.
- creation
Timestamp String - Creation timestamp in RFC3339 text format.
- id String
- The provider-assigned unique ID for this managed resource.
- self
Link String - The URI of the created resource.
- size Number
- The total number of nodes in the node group.
Look up Existing NodeGroup Resource
Get an existing NodeGroup resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: NodeGroupState, opts?: CustomResourceOptions): NodeGroup
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
autoscaling_policy: Optional[NodeGroupAutoscalingPolicyArgs] = None,
creation_timestamp: Optional[str] = None,
description: Optional[str] = None,
initial_size: Optional[int] = None,
maintenance_interval: Optional[str] = None,
maintenance_policy: Optional[str] = None,
maintenance_window: Optional[NodeGroupMaintenanceWindowArgs] = None,
name: Optional[str] = None,
node_template: Optional[str] = None,
project: Optional[str] = None,
self_link: Optional[str] = None,
share_settings: Optional[NodeGroupShareSettingsArgs] = None,
size: Optional[int] = None,
zone: Optional[str] = None) -> NodeGroup
func GetNodeGroup(ctx *Context, name string, id IDInput, state *NodeGroupState, opts ...ResourceOption) (*NodeGroup, error)
public static NodeGroup Get(string name, Input<string> id, NodeGroupState? state, CustomResourceOptions? opts = null)
public static NodeGroup get(String name, Output<String> id, NodeGroupState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Autoscaling
Policy NodeGroup Autoscaling Policy - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Description string
- An optional textual description of the resource.
- Initial
Size int - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - Maintenance
Interval string - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- Maintenance
Policy string - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- Maintenance
Window NodeGroup Maintenance Window - contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- Node
Template string - The URL of the node template to which this node group belongs.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Self
Link string - The URI of the created resource.
- Node
Group Share Settings - Share settings for the node group. Structure is documented below.
- Size int
- The total number of nodes in the node group.
- Zone string
- Zone where this node group is located
- Autoscaling
Policy NodeGroup Autoscaling Policy Args - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - Creation
Timestamp string - Creation timestamp in RFC3339 text format.
- Description string
- An optional textual description of the resource.
- Initial
Size int - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - Maintenance
Interval string - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- Maintenance
Policy string - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- Maintenance
Window NodeGroup Maintenance Window Args - contains properties for the timeframe of maintenance Structure is documented below.
- Name string
- Name of the resource.
- Node
Template string - The URL of the node template to which this node group belongs.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Self
Link string - The URI of the created resource.
- Node
Group Share Settings Args - Share settings for the node group. Structure is documented below.
- Size int
- The total number of nodes in the node group.
- Zone string
- Zone where this node group is located
- autoscaling
Policy NodeGroup Autoscaling Policy - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - creation
Timestamp String - Creation timestamp in RFC3339 text format.
- description String
- An optional textual description of the resource.
- initial
Size Integer - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance
Interval String - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance
Policy String - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance
Window NodeGroup Maintenance Window - contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- node
Template String - The URL of the node template to which this node group belongs.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- self
Link String - The URI of the created resource.
- Node
Group Share Settings - Share settings for the node group. Structure is documented below.
- size Integer
- The total number of nodes in the node group.
- zone String
- Zone where this node group is located
- autoscaling
Policy NodeGroup Autoscaling Policy - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - creation
Timestamp string - Creation timestamp in RFC3339 text format.
- description string
- An optional textual description of the resource.
- initial
Size number - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance
Interval string - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance
Policy string - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance
Window NodeGroup Maintenance Window - contains properties for the timeframe of maintenance Structure is documented below.
- name string
- Name of the resource.
- node
Template string - The URL of the node template to which this node group belongs.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- self
Link string - The URI of the created resource.
- Node
Group Share Settings - Share settings for the node group. Structure is documented below.
- size number
- The total number of nodes in the node group.
- zone string
- Zone where this node group is located
- autoscaling_
policy NodeGroup Autoscaling Policy Args - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - creation_
timestamp str - Creation timestamp in RFC3339 text format.
- description str
- An optional textual description of the resource.
- initial_
size int - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance_
interval str - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance_
policy str - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance_
window NodeGroup Maintenance Window Args - contains properties for the timeframe of maintenance Structure is documented below.
- name str
- Name of the resource.
- node_
template str - The URL of the node template to which this node group belongs.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- self_
link str - The URI of the created resource.
- Node
Group Share Settings Args - Share settings for the node group. Structure is documented below.
- size int
- The total number of nodes in the node group.
- zone str
- Zone where this node group is located
- autoscaling
Policy Property Map - If you use sole-tenant nodes for your workloads, you can use the node
group autoscaler to automatically manage the sizes of your node groups.
One of
initial_size
orautoscaling_policy
must be configured on resource creation. Structure is documented below. - creation
Timestamp String - Creation timestamp in RFC3339 text format.
- description String
- An optional textual description of the resource.
- initial
Size Number - The initial number of nodes in the node group. One of
initial_size
orautoscaling_policy
must be configured on resource creation. - maintenance
Interval String - Specifies the frequency of planned maintenance events. Set to one of the following:
- AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available.
- RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs.
Possible values are:
AS_NEEDED
,RECURRENT
.
- maintenance
Policy String - Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.
- maintenance
Window Property Map - contains properties for the timeframe of maintenance Structure is documented below.
- name String
- Name of the resource.
- node
Template String - The URL of the node template to which this node group belongs.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- self
Link String - The URI of the created resource.
- Property Map
- Share settings for the node group. Structure is documented below.
- size Number
- The total number of nodes in the node group.
- zone String
- Zone where this node group is located
Supporting Types
NodeGroupAutoscalingPolicy, NodeGroupAutoscalingPolicyArgs
- Max
Nodes int - Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- Min
Nodes int - Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- Mode string
- The autoscaling mode. Set to one of the following:
- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are:
OFF
,ON
,ONLY_SCALE_OUT
.
- Max
Nodes int - Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- Min
Nodes int - Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- Mode string
- The autoscaling mode. Set to one of the following:
- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are:
OFF
,ON
,ONLY_SCALE_OUT
.
- max
Nodes Integer - Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- min
Nodes Integer - Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode String
- The autoscaling mode. Set to one of the following:
- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are:
OFF
,ON
,ONLY_SCALE_OUT
.
- max
Nodes number - Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- min
Nodes number - Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode string
- The autoscaling mode. Set to one of the following:
- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are:
OFF
,ON
,ONLY_SCALE_OUT
.
- max_
nodes int - Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- min_
nodes int - Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode str
- The autoscaling mode. Set to one of the following:
- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are:
OFF
,ON
,ONLY_SCALE_OUT
.
- max
Nodes Number - Maximum size of the node group. Set to a value less than or equal to 100 and greater than or equal to min-nodes.
- min
Nodes Number - Minimum size of the node group. Must be less than or equal to max-nodes. The default value is 0.
- mode String
- The autoscaling mode. Set to one of the following:
- OFF: Disables the autoscaler.
- ON: Enables scaling in and scaling out.
- ONLY_SCALE_OUT: Enables only scaling out.
You must use this mode if your node groups are configured to
restart their hosted VMs on minimal servers.
Possible values are:
OFF
,ON
,ONLY_SCALE_OUT
.
NodeGroupMaintenanceWindow, NodeGroupMaintenanceWindowArgs
- Start
Time string - instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- Start
Time string - instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- start
Time String - instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- start
Time string - instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- start_
time str - instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
- start
Time String - instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.
NodeGroupShareSettings, NodeGroupShareSettingsArgs
- string
- Node group sharing type.
Possible values are:
ORGANIZATION
,SPECIFIC_PROJECTS
,LOCAL
. - Project
Maps List<NodeGroup Share Settings Project Map> - A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- string
- Node group sharing type.
Possible values are:
ORGANIZATION
,SPECIFIC_PROJECTS
,LOCAL
. - Project
Maps []NodeGroup Share Settings Project Map - A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- String
- Node group sharing type.
Possible values are:
ORGANIZATION
,SPECIFIC_PROJECTS
,LOCAL
. - project
Maps List<NodeGroup Share Settings Project Map> - A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- string
- Node group sharing type.
Possible values are:
ORGANIZATION
,SPECIFIC_PROJECTS
,LOCAL
. - project
Maps NodeGroup Share Settings Project Map[] - A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- str
- Node group sharing type.
Possible values are:
ORGANIZATION
,SPECIFIC_PROJECTS
,LOCAL
. - project_
maps Sequence[NodeGroup Share Settings Project Map] - A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
- String
- Node group sharing type.
Possible values are:
ORGANIZATION
,SPECIFIC_PROJECTS
,LOCAL
. - project
Maps List<Property Map> - A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. Structure is documented below.
NodeGroupShareSettingsProjectMap, NodeGroupShareSettingsProjectMapArgs
- id str
- The identifier for this object. Format specified above.
- project_
id str - The project id/number should be the same as the key of this project config in the project map.
Import
NodeGroup can be imported using any of these accepted formats:
projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}
{{project}}/{{zone}}/{{name}}
{{zone}}/{{name}}
{{name}}
When using the pulumi import
command, NodeGroup can be imported using one of the formats above. For example:
$ pulumi import gcp:compute/nodeGroup:NodeGroup default projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}
$ pulumi import gcp:compute/nodeGroup:NodeGroup default {{project}}/{{zone}}/{{name}}
$ pulumi import gcp:compute/nodeGroup:NodeGroup default {{zone}}/{{name}}
$ pulumi import gcp:compute/nodeGroup:NodeGroup default {{name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.