gcp.dataproc.AutoscalingPolicy
Explore with Pulumi AI
Describes an autoscaling policy for Dataproc cluster autoscaler.
Example Usage
Dataproc Autoscaling Policy
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const asp = new gcp.dataproc.AutoscalingPolicy("asp", {
policyId: "dataproc-policy",
location: "us-central1",
workerConfig: {
maxInstances: 3,
},
basicAlgorithm: {
yarnConfig: {
gracefulDecommissionTimeout: "30s",
scaleUpFactor: 0.5,
scaleDownFactor: 0.5,
},
},
});
const basic = new gcp.dataproc.Cluster("basic", {
name: "dataproc-policy",
region: "us-central1",
clusterConfig: {
autoscalingConfig: {
policyUri: asp.name,
},
},
});
import pulumi
import pulumi_gcp as gcp
asp = gcp.dataproc.AutoscalingPolicy("asp",
policy_id="dataproc-policy",
location="us-central1",
worker_config={
"max_instances": 3,
},
basic_algorithm={
"yarn_config": {
"graceful_decommission_timeout": "30s",
"scale_up_factor": 0.5,
"scale_down_factor": 0.5,
},
})
basic = gcp.dataproc.Cluster("basic",
name="dataproc-policy",
region="us-central1",
cluster_config={
"autoscaling_config": {
"policy_uri": asp.name,
},
})
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
asp, err := dataproc.NewAutoscalingPolicy(ctx, "asp", &dataproc.AutoscalingPolicyArgs{
PolicyId: pulumi.String("dataproc-policy"),
Location: pulumi.String("us-central1"),
WorkerConfig: &dataproc.AutoscalingPolicyWorkerConfigArgs{
MaxInstances: pulumi.Int(3),
},
BasicAlgorithm: &dataproc.AutoscalingPolicyBasicAlgorithmArgs{
YarnConfig: &dataproc.AutoscalingPolicyBasicAlgorithmYarnConfigArgs{
GracefulDecommissionTimeout: pulumi.String("30s"),
ScaleUpFactor: pulumi.Float64(0.5),
ScaleDownFactor: pulumi.Float64(0.5),
},
},
})
if err != nil {
return err
}
_, err = dataproc.NewCluster(ctx, "basic", &dataproc.ClusterArgs{
Name: pulumi.String("dataproc-policy"),
Region: pulumi.String("us-central1"),
ClusterConfig: &dataproc.ClusterClusterConfigArgs{
AutoscalingConfig: &dataproc.ClusterClusterConfigAutoscalingConfigArgs{
PolicyUri: asp.Name,
},
},
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var asp = new Gcp.Dataproc.AutoscalingPolicy("asp", new()
{
PolicyId = "dataproc-policy",
Location = "us-central1",
WorkerConfig = new Gcp.Dataproc.Inputs.AutoscalingPolicyWorkerConfigArgs
{
MaxInstances = 3,
},
BasicAlgorithm = new Gcp.Dataproc.Inputs.AutoscalingPolicyBasicAlgorithmArgs
{
YarnConfig = new Gcp.Dataproc.Inputs.AutoscalingPolicyBasicAlgorithmYarnConfigArgs
{
GracefulDecommissionTimeout = "30s",
ScaleUpFactor = 0.5,
ScaleDownFactor = 0.5,
},
},
});
var basic = new Gcp.Dataproc.Cluster("basic", new()
{
Name = "dataproc-policy",
Region = "us-central1",
ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
{
AutoscalingConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigAutoscalingConfigArgs
{
PolicyUri = asp.Name,
},
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataproc.AutoscalingPolicy;
import com.pulumi.gcp.dataproc.AutoscalingPolicyArgs;
import com.pulumi.gcp.dataproc.inputs.AutoscalingPolicyWorkerConfigArgs;
import com.pulumi.gcp.dataproc.inputs.AutoscalingPolicyBasicAlgorithmArgs;
import com.pulumi.gcp.dataproc.inputs.AutoscalingPolicyBasicAlgorithmYarnConfigArgs;
import com.pulumi.gcp.dataproc.Cluster;
import com.pulumi.gcp.dataproc.ClusterArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigAutoscalingConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var asp = new AutoscalingPolicy("asp", AutoscalingPolicyArgs.builder()
.policyId("dataproc-policy")
.location("us-central1")
.workerConfig(AutoscalingPolicyWorkerConfigArgs.builder()
.maxInstances(3)
.build())
.basicAlgorithm(AutoscalingPolicyBasicAlgorithmArgs.builder()
.yarnConfig(AutoscalingPolicyBasicAlgorithmYarnConfigArgs.builder()
.gracefulDecommissionTimeout("30s")
.scaleUpFactor(0.5)
.scaleDownFactor(0.5)
.build())
.build())
.build());
var basic = new Cluster("basic", ClusterArgs.builder()
.name("dataproc-policy")
.region("us-central1")
.clusterConfig(ClusterClusterConfigArgs.builder()
.autoscalingConfig(ClusterClusterConfigAutoscalingConfigArgs.builder()
.policyUri(asp.name())
.build())
.build())
.build());
}
}
resources:
basic:
type: gcp:dataproc:Cluster
properties:
name: dataproc-policy
region: us-central1
clusterConfig:
autoscalingConfig:
policyUri: ${asp.name}
asp:
type: gcp:dataproc:AutoscalingPolicy
properties:
policyId: dataproc-policy
location: us-central1
workerConfig:
maxInstances: 3
basicAlgorithm:
yarnConfig:
gracefulDecommissionTimeout: 30s
scaleUpFactor: 0.5
scaleDownFactor: 0.5
Create AutoscalingPolicy Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new AutoscalingPolicy(name: string, args: AutoscalingPolicyArgs, opts?: CustomResourceOptions);
@overload
def AutoscalingPolicy(resource_name: str,
args: AutoscalingPolicyArgs,
opts: Optional[ResourceOptions] = None)
@overload
def AutoscalingPolicy(resource_name: str,
opts: Optional[ResourceOptions] = None,
policy_id: Optional[str] = None,
basic_algorithm: Optional[AutoscalingPolicyBasicAlgorithmArgs] = None,
location: Optional[str] = None,
project: Optional[str] = None,
secondary_worker_config: Optional[AutoscalingPolicySecondaryWorkerConfigArgs] = None,
worker_config: Optional[AutoscalingPolicyWorkerConfigArgs] = None)
func NewAutoscalingPolicy(ctx *Context, name string, args AutoscalingPolicyArgs, opts ...ResourceOption) (*AutoscalingPolicy, error)
public AutoscalingPolicy(string name, AutoscalingPolicyArgs args, CustomResourceOptions? opts = null)
public AutoscalingPolicy(String name, AutoscalingPolicyArgs args)
public AutoscalingPolicy(String name, AutoscalingPolicyArgs args, CustomResourceOptions options)
type: gcp:dataproc:AutoscalingPolicy
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args AutoscalingPolicyArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args AutoscalingPolicyArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args AutoscalingPolicyArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args AutoscalingPolicyArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args AutoscalingPolicyArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var autoscalingPolicyResource = new Gcp.Dataproc.AutoscalingPolicy("autoscalingPolicyResource", new()
{
PolicyId = "string",
BasicAlgorithm = new Gcp.Dataproc.Inputs.AutoscalingPolicyBasicAlgorithmArgs
{
YarnConfig = new Gcp.Dataproc.Inputs.AutoscalingPolicyBasicAlgorithmYarnConfigArgs
{
GracefulDecommissionTimeout = "string",
ScaleDownFactor = 0,
ScaleUpFactor = 0,
ScaleDownMinWorkerFraction = 0,
ScaleUpMinWorkerFraction = 0,
},
CooldownPeriod = "string",
},
Location = "string",
Project = "string",
SecondaryWorkerConfig = new Gcp.Dataproc.Inputs.AutoscalingPolicySecondaryWorkerConfigArgs
{
MaxInstances = 0,
MinInstances = 0,
Weight = 0,
},
WorkerConfig = new Gcp.Dataproc.Inputs.AutoscalingPolicyWorkerConfigArgs
{
MaxInstances = 0,
MinInstances = 0,
Weight = 0,
},
});
example, err := dataproc.NewAutoscalingPolicy(ctx, "autoscalingPolicyResource", &dataproc.AutoscalingPolicyArgs{
PolicyId: pulumi.String("string"),
BasicAlgorithm: &dataproc.AutoscalingPolicyBasicAlgorithmArgs{
YarnConfig: &dataproc.AutoscalingPolicyBasicAlgorithmYarnConfigArgs{
GracefulDecommissionTimeout: pulumi.String("string"),
ScaleDownFactor: pulumi.Float64(0),
ScaleUpFactor: pulumi.Float64(0),
ScaleDownMinWorkerFraction: pulumi.Float64(0),
ScaleUpMinWorkerFraction: pulumi.Float64(0),
},
CooldownPeriod: pulumi.String("string"),
},
Location: pulumi.String("string"),
Project: pulumi.String("string"),
SecondaryWorkerConfig: &dataproc.AutoscalingPolicySecondaryWorkerConfigArgs{
MaxInstances: pulumi.Int(0),
MinInstances: pulumi.Int(0),
Weight: pulumi.Int(0),
},
WorkerConfig: &dataproc.AutoscalingPolicyWorkerConfigArgs{
MaxInstances: pulumi.Int(0),
MinInstances: pulumi.Int(0),
Weight: pulumi.Int(0),
},
})
var autoscalingPolicyResource = new AutoscalingPolicy("autoscalingPolicyResource", AutoscalingPolicyArgs.builder()
.policyId("string")
.basicAlgorithm(AutoscalingPolicyBasicAlgorithmArgs.builder()
.yarnConfig(AutoscalingPolicyBasicAlgorithmYarnConfigArgs.builder()
.gracefulDecommissionTimeout("string")
.scaleDownFactor(0)
.scaleUpFactor(0)
.scaleDownMinWorkerFraction(0)
.scaleUpMinWorkerFraction(0)
.build())
.cooldownPeriod("string")
.build())
.location("string")
.project("string")
.secondaryWorkerConfig(AutoscalingPolicySecondaryWorkerConfigArgs.builder()
.maxInstances(0)
.minInstances(0)
.weight(0)
.build())
.workerConfig(AutoscalingPolicyWorkerConfigArgs.builder()
.maxInstances(0)
.minInstances(0)
.weight(0)
.build())
.build());
autoscaling_policy_resource = gcp.dataproc.AutoscalingPolicy("autoscalingPolicyResource",
policy_id="string",
basic_algorithm={
"yarnConfig": {
"gracefulDecommissionTimeout": "string",
"scaleDownFactor": 0,
"scaleUpFactor": 0,
"scaleDownMinWorkerFraction": 0,
"scaleUpMinWorkerFraction": 0,
},
"cooldownPeriod": "string",
},
location="string",
project="string",
secondary_worker_config={
"maxInstances": 0,
"minInstances": 0,
"weight": 0,
},
worker_config={
"maxInstances": 0,
"minInstances": 0,
"weight": 0,
})
const autoscalingPolicyResource = new gcp.dataproc.AutoscalingPolicy("autoscalingPolicyResource", {
policyId: "string",
basicAlgorithm: {
yarnConfig: {
gracefulDecommissionTimeout: "string",
scaleDownFactor: 0,
scaleUpFactor: 0,
scaleDownMinWorkerFraction: 0,
scaleUpMinWorkerFraction: 0,
},
cooldownPeriod: "string",
},
location: "string",
project: "string",
secondaryWorkerConfig: {
maxInstances: 0,
minInstances: 0,
weight: 0,
},
workerConfig: {
maxInstances: 0,
minInstances: 0,
weight: 0,
},
});
type: gcp:dataproc:AutoscalingPolicy
properties:
basicAlgorithm:
cooldownPeriod: string
yarnConfig:
gracefulDecommissionTimeout: string
scaleDownFactor: 0
scaleDownMinWorkerFraction: 0
scaleUpFactor: 0
scaleUpMinWorkerFraction: 0
location: string
policyId: string
project: string
secondaryWorkerConfig:
maxInstances: 0
minInstances: 0
weight: 0
workerConfig:
maxInstances: 0
minInstances: 0
weight: 0
AutoscalingPolicy Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The AutoscalingPolicy resource accepts the following input properties:
- Policy
Id string - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- Basic
Algorithm AutoscalingPolicy Basic Algorithm - Basic algorithm for autoscaling. Structure is documented below.
- Location string
- The location where the autoscaling policy should reside.
The default value is
global
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Secondary
Worker AutoscalingConfig Policy Secondary Worker Config - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- Worker
Config AutoscalingPolicy Worker Config - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- Policy
Id string - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- Basic
Algorithm AutoscalingPolicy Basic Algorithm Args - Basic algorithm for autoscaling. Structure is documented below.
- Location string
- The location where the autoscaling policy should reside.
The default value is
global
. - Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Secondary
Worker AutoscalingConfig Policy Secondary Worker Config Args - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- Worker
Config AutoscalingPolicy Worker Config Args - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- policy
Id String - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- basic
Algorithm AutoscalingPolicy Basic Algorithm - Basic algorithm for autoscaling. Structure is documented below.
- location String
- The location where the autoscaling policy should reside.
The default value is
global
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary
Worker AutoscalingConfig Policy Secondary Worker Config - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker
Config AutoscalingPolicy Worker Config - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- policy
Id string - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- basic
Algorithm AutoscalingPolicy Basic Algorithm - Basic algorithm for autoscaling. Structure is documented below.
- location string
- The location where the autoscaling policy should reside.
The default value is
global
. - project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary
Worker AutoscalingConfig Policy Secondary Worker Config - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker
Config AutoscalingPolicy Worker Config - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- policy_
id str - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- basic_
algorithm AutoscalingPolicy Basic Algorithm Args - Basic algorithm for autoscaling. Structure is documented below.
- location str
- The location where the autoscaling policy should reside.
The default value is
global
. - project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary_
worker_ Autoscalingconfig Policy Secondary Worker Config Args - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker_
config AutoscalingPolicy Worker Config Args - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- policy
Id String - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- basic
Algorithm Property Map - Basic algorithm for autoscaling. Structure is documented below.
- location String
- The location where the autoscaling policy should reside.
The default value is
global
. - project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary
Worker Property MapConfig - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker
Config Property Map - Describes how the autoscaler will operate for primary workers. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the AutoscalingPolicy resource produces the following output properties:
Look up Existing AutoscalingPolicy Resource
Get an existing AutoscalingPolicy resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: AutoscalingPolicyState, opts?: CustomResourceOptions): AutoscalingPolicy
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
basic_algorithm: Optional[AutoscalingPolicyBasicAlgorithmArgs] = None,
location: Optional[str] = None,
name: Optional[str] = None,
policy_id: Optional[str] = None,
project: Optional[str] = None,
secondary_worker_config: Optional[AutoscalingPolicySecondaryWorkerConfigArgs] = None,
worker_config: Optional[AutoscalingPolicyWorkerConfigArgs] = None) -> AutoscalingPolicy
func GetAutoscalingPolicy(ctx *Context, name string, id IDInput, state *AutoscalingPolicyState, opts ...ResourceOption) (*AutoscalingPolicy, error)
public static AutoscalingPolicy Get(string name, Input<string> id, AutoscalingPolicyState? state, CustomResourceOptions? opts = null)
public static AutoscalingPolicy get(String name, Output<String> id, AutoscalingPolicyState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Basic
Algorithm AutoscalingPolicy Basic Algorithm - Basic algorithm for autoscaling. Structure is documented below.
- Location string
- The location where the autoscaling policy should reside.
The default value is
global
. - Name string
- The "resource name" of the autoscaling policy.
- Policy
Id string - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Secondary
Worker AutoscalingConfig Policy Secondary Worker Config - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- Worker
Config AutoscalingPolicy Worker Config - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- Basic
Algorithm AutoscalingPolicy Basic Algorithm Args - Basic algorithm for autoscaling. Structure is documented below.
- Location string
- The location where the autoscaling policy should reside.
The default value is
global
. - Name string
- The "resource name" of the autoscaling policy.
- Policy
Id string - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- Secondary
Worker AutoscalingConfig Policy Secondary Worker Config Args - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- Worker
Config AutoscalingPolicy Worker Config Args - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- basic
Algorithm AutoscalingPolicy Basic Algorithm - Basic algorithm for autoscaling. Structure is documented below.
- location String
- The location where the autoscaling policy should reside.
The default value is
global
. - name String
- The "resource name" of the autoscaling policy.
- policy
Id String - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary
Worker AutoscalingConfig Policy Secondary Worker Config - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker
Config AutoscalingPolicy Worker Config - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- basic
Algorithm AutoscalingPolicy Basic Algorithm - Basic algorithm for autoscaling. Structure is documented below.
- location string
- The location where the autoscaling policy should reside.
The default value is
global
. - name string
- The "resource name" of the autoscaling policy.
- policy
Id string - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary
Worker AutoscalingConfig Policy Secondary Worker Config - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker
Config AutoscalingPolicy Worker Config - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- basic_
algorithm AutoscalingPolicy Basic Algorithm Args - Basic algorithm for autoscaling. Structure is documented below.
- location str
- The location where the autoscaling policy should reside.
The default value is
global
. - name str
- The "resource name" of the autoscaling policy.
- policy_
id str - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary_
worker_ Autoscalingconfig Policy Secondary Worker Config Args - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker_
config AutoscalingPolicy Worker Config Args - Describes how the autoscaler will operate for primary workers. Structure is documented below.
- basic
Algorithm Property Map - Basic algorithm for autoscaling. Structure is documented below.
- location String
- The location where the autoscaling policy should reside.
The default value is
global
. - name String
- The "resource name" of the autoscaling policy.
- policy
Id String - The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_),
and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between
3 and 50 characters.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- secondary
Worker Property MapConfig - Describes how the autoscaler will operate for secondary workers. Structure is documented below.
- worker
Config Property Map - Describes how the autoscaler will operate for primary workers. Structure is documented below.
Supporting Types
AutoscalingPolicyBasicAlgorithm, AutoscalingPolicyBasicAlgorithmArgs
- Yarn
Config AutoscalingPolicy Basic Algorithm Yarn Config - YARN autoscaling configuration. Structure is documented below.
- Cooldown
Period string - Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.
- Yarn
Config AutoscalingPolicy Basic Algorithm Yarn Config - YARN autoscaling configuration. Structure is documented below.
- Cooldown
Period string - Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.
- yarn
Config AutoscalingPolicy Basic Algorithm Yarn Config - YARN autoscaling configuration. Structure is documented below.
- cooldown
Period String - Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.
- yarn
Config AutoscalingPolicy Basic Algorithm Yarn Config - YARN autoscaling configuration. Structure is documented below.
- cooldown
Period string - Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.
- yarn_
config AutoscalingPolicy Basic Algorithm Yarn Config - YARN autoscaling configuration. Structure is documented below.
- cooldown_
period str - Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.
- yarn
Config Property Map - YARN autoscaling configuration. Structure is documented below.
- cooldown
Period String - Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: [2m, 1d]. Default: 2m.
AutoscalingPolicyBasicAlgorithmYarnConfig, AutoscalingPolicyBasicAlgorithmYarnConfigArgs
- Graceful
Decommission stringTimeout - Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].
- Scale
Down doubleFactor - Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].
- Scale
Up doubleFactor - Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].
- Scale
Down doubleMin Worker Fraction - Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- Scale
Up doubleMin Worker Fraction - Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- Graceful
Decommission stringTimeout - Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].
- Scale
Down float64Factor - Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].
- Scale
Up float64Factor - Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].
- Scale
Down float64Min Worker Fraction - Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- Scale
Up float64Min Worker Fraction - Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- graceful
Decommission StringTimeout - Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].
- scale
Down DoubleFactor - Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].
- scale
Up DoubleFactor - Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].
- scale
Down DoubleMin Worker Fraction - Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- scale
Up DoubleMin Worker Fraction - Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- graceful
Decommission stringTimeout - Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].
- scale
Down numberFactor - Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].
- scale
Up numberFactor - Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].
- scale
Down numberMin Worker Fraction - Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- scale
Up numberMin Worker Fraction - Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- graceful_
decommission_ strtimeout - Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].
- scale_
down_ floatfactor - Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].
- scale_
up_ floatfactor - Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].
- scale_
down_ floatmin_ worker_ fraction - Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- scale_
up_ floatmin_ worker_ fraction - Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- graceful
Decommission StringTimeout - Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. Bounds: [0s, 1d].
- scale
Down NumberFactor - Fraction of average pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. Bounds: [0.0, 1.0].
- scale
Up NumberFactor - Fraction of average pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). Bounds: [0.0, 1.0].
- scale
Down NumberMin Worker Fraction - Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
- scale
Up NumberMin Worker Fraction - Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: [0.0, 1.0]. Default: 0.0.
AutoscalingPolicySecondaryWorkerConfig, AutoscalingPolicySecondaryWorkerConfigArgs
- Max
Instances int - Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.
- Min
Instances int - Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
- Weight int
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- Max
Instances int - Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.
- Min
Instances int - Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
- Weight int
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max
Instances Integer - Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.
- min
Instances Integer - Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
- weight Integer
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max
Instances number - Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.
- min
Instances number - Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
- weight number
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max_
instances int - Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.
- min_
instances int - Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
- weight int
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max
Instances Number - Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Bounds: [minInstances, ). Defaults to 0.
- min
Instances Number - Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.
- weight Number
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
AutoscalingPolicyWorkerConfig, AutoscalingPolicyWorkerConfigArgs
- Max
Instances int - Maximum number of instances for this group.
- Min
Instances int - Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.
- Weight int
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- Max
Instances int - Maximum number of instances for this group.
- Min
Instances int - Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.
- Weight int
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max
Instances Integer - Maximum number of instances for this group.
- min
Instances Integer - Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.
- weight Integer
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max
Instances number - Maximum number of instances for this group.
- min
Instances number - Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.
- weight number
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max_
instances int - Maximum number of instances for this group.
- min_
instances int - Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.
- weight int
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
- max
Instances Number - Maximum number of instances for this group.
- min
Instances Number - Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.
- weight Number
- Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if maxInstances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.
Import
AutoscalingPolicy can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}
{{project}}/{{location}}/{{policy_id}}
{{location}}/{{policy_id}}
When using the pulumi import
command, AutoscalingPolicy can be imported using one of the formats above. For example:
$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}
$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default {{project}}/{{location}}/{{policy_id}}
$ pulumi import gcp:dataproc/autoscalingPolicy:AutoscalingPolicy default {{location}}/{{policy_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.