gcp.dataplex.Datascan
Explore with Pulumi AI
Represents a user-visible job which provides the insights for the related data source.
To get more information about Datascan, see:
- API documentation
- How-to Guides
Example Usage
Dataplex Datascan Basic Profile
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const basicProfile = new gcp.dataplex.Datascan("basic_profile", {
location: "us-central1",
dataScanId: "dataprofile-basic",
data: {
resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
executionSpec: {
trigger: {
onDemand: {},
},
},
dataProfileSpec: {},
project: "my-project-name",
});
import pulumi
import pulumi_gcp as gcp
basic_profile = gcp.dataplex.Datascan("basic_profile",
location="us-central1",
data_scan_id="dataprofile-basic",
data={
"resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
execution_spec={
"trigger": {
"on_demand": {},
},
},
data_profile_spec={},
project="my-project-name")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataplex.NewDatascan(ctx, "basic_profile", &dataplex.DatascanArgs{
Location: pulumi.String("us-central1"),
DataScanId: pulumi.String("dataprofile-basic"),
Data: &dataplex.DatascanDataArgs{
Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
},
ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
OnDemand: nil,
},
},
DataProfileSpec: nil,
Project: pulumi.String("my-project-name"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var basicProfile = new Gcp.DataPlex.Datascan("basic_profile", new()
{
Location = "us-central1",
DataScanId = "dataprofile-basic",
Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
{
Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
{
Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
{
OnDemand = null,
},
},
DataProfileSpec = null,
Project = "my-project-name",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerOnDemandArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var basicProfile = new Datascan("basicProfile", DatascanArgs.builder()
.location("us-central1")
.dataScanId("dataprofile-basic")
.data(DatascanDataArgs.builder()
.resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
.build())
.executionSpec(DatascanExecutionSpecArgs.builder()
.trigger(DatascanExecutionSpecTriggerArgs.builder()
.onDemand()
.build())
.build())
.dataProfileSpec()
.project("my-project-name")
.build());
}
}
resources:
basicProfile:
type: gcp:dataplex:Datascan
name: basic_profile
properties:
location: us-central1
dataScanId: dataprofile-basic
data:
resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
executionSpec:
trigger:
onDemand: {}
dataProfileSpec: {}
project: my-project-name
Dataplex Datascan Full Profile
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const source = new gcp.bigquery.Dataset("source", {
datasetId: "dataplex_dataset",
friendlyName: "test",
description: "This is a test description",
location: "US",
deleteContentsOnDestroy: true,
});
const fullProfile = new gcp.dataplex.Datascan("full_profile", {
location: "us-central1",
displayName: "Full Datascan Profile",
dataScanId: "dataprofile-full",
description: "Example resource - Full Datascan Profile",
labels: {
author: "billing",
},
data: {
resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
executionSpec: {
trigger: {
schedule: {
cron: "TZ=America/New_York 1 1 * * *",
},
},
},
dataProfileSpec: {
samplingPercent: 80,
rowFilter: "word_count > 10",
includeFields: {
fieldNames: ["word_count"],
},
excludeFields: {
fieldNames: ["property_type"],
},
postScanActions: {
bigqueryExport: {
resultsTable: "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
},
},
},
project: "my-project-name",
}, {
dependsOn: [source],
});
import pulumi
import pulumi_gcp as gcp
source = gcp.bigquery.Dataset("source",
dataset_id="dataplex_dataset",
friendly_name="test",
description="This is a test description",
location="US",
delete_contents_on_destroy=True)
full_profile = gcp.dataplex.Datascan("full_profile",
location="us-central1",
display_name="Full Datascan Profile",
data_scan_id="dataprofile-full",
description="Example resource - Full Datascan Profile",
labels={
"author": "billing",
},
data={
"resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
execution_spec={
"trigger": {
"schedule": {
"cron": "TZ=America/New_York 1 1 * * *",
},
},
},
data_profile_spec={
"sampling_percent": 80,
"row_filter": "word_count > 10",
"include_fields": {
"field_names": ["word_count"],
},
"exclude_fields": {
"field_names": ["property_type"],
},
"post_scan_actions": {
"bigquery_export": {
"results_table": "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
},
},
},
project="my-project-name",
opts = pulumi.ResourceOptions(depends_on=[source]))
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
source, err := bigquery.NewDataset(ctx, "source", &bigquery.DatasetArgs{
DatasetId: pulumi.String("dataplex_dataset"),
FriendlyName: pulumi.String("test"),
Description: pulumi.String("This is a test description"),
Location: pulumi.String("US"),
DeleteContentsOnDestroy: pulumi.Bool(true),
})
if err != nil {
return err
}
_, err = dataplex.NewDatascan(ctx, "full_profile", &dataplex.DatascanArgs{
Location: pulumi.String("us-central1"),
DisplayName: pulumi.String("Full Datascan Profile"),
DataScanId: pulumi.String("dataprofile-full"),
Description: pulumi.String("Example resource - Full Datascan Profile"),
Labels: pulumi.StringMap{
"author": pulumi.String("billing"),
},
Data: &dataplex.DatascanDataArgs{
Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
},
ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
Cron: pulumi.String("TZ=America/New_York 1 1 * * *"),
},
},
},
DataProfileSpec: &dataplex.DatascanDataProfileSpecArgs{
SamplingPercent: pulumi.Float64(80),
RowFilter: pulumi.String("word_count > 10"),
IncludeFields: &dataplex.DatascanDataProfileSpecIncludeFieldsArgs{
FieldNames: pulumi.StringArray{
pulumi.String("word_count"),
},
},
ExcludeFields: &dataplex.DatascanDataProfileSpecExcludeFieldsArgs{
FieldNames: pulumi.StringArray{
pulumi.String("property_type"),
},
},
PostScanActions: &dataplex.DatascanDataProfileSpecPostScanActionsArgs{
BigqueryExport: &dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs{
ResultsTable: pulumi.String("//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export"),
},
},
},
Project: pulumi.String("my-project-name"),
}, pulumi.DependsOn([]pulumi.Resource{
source,
}))
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var source = new Gcp.BigQuery.Dataset("source", new()
{
DatasetId = "dataplex_dataset",
FriendlyName = "test",
Description = "This is a test description",
Location = "US",
DeleteContentsOnDestroy = true,
});
var fullProfile = new Gcp.DataPlex.Datascan("full_profile", new()
{
Location = "us-central1",
DisplayName = "Full Datascan Profile",
DataScanId = "dataprofile-full",
Description = "Example resource - Full Datascan Profile",
Labels =
{
{ "author", "billing" },
},
Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
{
Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
{
Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
{
Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
{
Cron = "TZ=America/New_York 1 1 * * *",
},
},
},
DataProfileSpec = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecArgs
{
SamplingPercent = 80,
RowFilter = "word_count > 10",
IncludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecIncludeFieldsArgs
{
FieldNames = new[]
{
"word_count",
},
},
ExcludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecExcludeFieldsArgs
{
FieldNames = new[]
{
"property_type",
},
},
PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsArgs
{
BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs
{
ResultsTable = "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
},
},
},
Project = "my-project-name",
}, new CustomResourceOptions
{
DependsOn =
{
source,
},
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerScheduleArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecIncludeFieldsArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecExcludeFieldsArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecPostScanActionsArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var source = new Dataset("source", DatasetArgs.builder()
.datasetId("dataplex_dataset")
.friendlyName("test")
.description("This is a test description")
.location("US")
.deleteContentsOnDestroy(true)
.build());
var fullProfile = new Datascan("fullProfile", DatascanArgs.builder()
.location("us-central1")
.displayName("Full Datascan Profile")
.dataScanId("dataprofile-full")
.description("Example resource - Full Datascan Profile")
.labels(Map.of("author", "billing"))
.data(DatascanDataArgs.builder()
.resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
.build())
.executionSpec(DatascanExecutionSpecArgs.builder()
.trigger(DatascanExecutionSpecTriggerArgs.builder()
.schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
.cron("TZ=America/New_York 1 1 * * *")
.build())
.build())
.build())
.dataProfileSpec(DatascanDataProfileSpecArgs.builder()
.samplingPercent(80)
.rowFilter("word_count > 10")
.includeFields(DatascanDataProfileSpecIncludeFieldsArgs.builder()
.fieldNames("word_count")
.build())
.excludeFields(DatascanDataProfileSpecExcludeFieldsArgs.builder()
.fieldNames("property_type")
.build())
.postScanActions(DatascanDataProfileSpecPostScanActionsArgs.builder()
.bigqueryExport(DatascanDataProfileSpecPostScanActionsBigqueryExportArgs.builder()
.resultsTable("//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export")
.build())
.build())
.build())
.project("my-project-name")
.build(), CustomResourceOptions.builder()
.dependsOn(source)
.build());
}
}
resources:
fullProfile:
type: gcp:dataplex:Datascan
name: full_profile
properties:
location: us-central1
displayName: Full Datascan Profile
dataScanId: dataprofile-full
description: Example resource - Full Datascan Profile
labels:
author: billing
data:
resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
executionSpec:
trigger:
schedule:
cron: TZ=America/New_York 1 1 * * *
dataProfileSpec:
samplingPercent: 80
rowFilter: word_count > 10
includeFields:
fieldNames:
- word_count
excludeFields:
fieldNames:
- property_type
postScanActions:
bigqueryExport:
resultsTable: //bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export
project: my-project-name
options:
dependson:
- ${source}
source:
type: gcp:bigquery:Dataset
properties:
datasetId: dataplex_dataset
friendlyName: test
description: This is a test description
location: US
deleteContentsOnDestroy: true
Dataplex Datascan Basic Quality
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const basicQuality = new gcp.dataplex.Datascan("basic_quality", {
location: "us-central1",
dataScanId: "dataquality-basic",
data: {
resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
executionSpec: {
trigger: {
onDemand: {},
},
},
dataQualitySpec: {
rules: [{
dimension: "VALIDITY",
name: "rule1",
description: "rule 1 for validity dimension",
tableConditionExpectation: {
sqlExpression: "COUNT(*) > 0",
},
}],
},
project: "my-project-name",
});
import pulumi
import pulumi_gcp as gcp
basic_quality = gcp.dataplex.Datascan("basic_quality",
location="us-central1",
data_scan_id="dataquality-basic",
data={
"resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
execution_spec={
"trigger": {
"on_demand": {},
},
},
data_quality_spec={
"rules": [{
"dimension": "VALIDITY",
"name": "rule1",
"description": "rule 1 for validity dimension",
"table_condition_expectation": {
"sql_expression": "COUNT(*) > 0",
},
}],
},
project="my-project-name")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataplex.NewDatascan(ctx, "basic_quality", &dataplex.DatascanArgs{
Location: pulumi.String("us-central1"),
DataScanId: pulumi.String("dataquality-basic"),
Data: &dataplex.DatascanDataArgs{
Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
},
ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
OnDemand: nil,
},
},
DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
Rules: dataplex.DatascanDataQualitySpecRuleArray{
&dataplex.DatascanDataQualitySpecRuleArgs{
Dimension: pulumi.String("VALIDITY"),
Name: pulumi.String("rule1"),
Description: pulumi.String("rule 1 for validity dimension"),
TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
SqlExpression: pulumi.String("COUNT(*) > 0"),
},
},
},
},
Project: pulumi.String("my-project-name"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var basicQuality = new Gcp.DataPlex.Datascan("basic_quality", new()
{
Location = "us-central1",
DataScanId = "dataquality-basic",
Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
{
Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
},
ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
{
Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
{
OnDemand = null,
},
},
DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
{
Rules = new[]
{
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Dimension = "VALIDITY",
Name = "rule1",
Description = "rule 1 for validity dimension",
TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
{
SqlExpression = "COUNT(*) > 0",
},
},
},
},
Project = "my-project-name",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerOnDemandArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataQualitySpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var basicQuality = new Datascan("basicQuality", DatascanArgs.builder()
.location("us-central1")
.dataScanId("dataquality-basic")
.data(DatascanDataArgs.builder()
.resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
.build())
.executionSpec(DatascanExecutionSpecArgs.builder()
.trigger(DatascanExecutionSpecTriggerArgs.builder()
.onDemand()
.build())
.build())
.dataQualitySpec(DatascanDataQualitySpecArgs.builder()
.rules(DatascanDataQualitySpecRuleArgs.builder()
.dimension("VALIDITY")
.name("rule1")
.description("rule 1 for validity dimension")
.tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
.sqlExpression("COUNT(*) > 0")
.build())
.build())
.build())
.project("my-project-name")
.build());
}
}
resources:
basicQuality:
type: gcp:dataplex:Datascan
name: basic_quality
properties:
location: us-central1
dataScanId: dataquality-basic
data:
resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
executionSpec:
trigger:
onDemand: {}
dataQualitySpec:
rules:
- dimension: VALIDITY
name: rule1
description: rule 1 for validity dimension
tableConditionExpectation:
sqlExpression: COUNT(*) > 0
project: my-project-name
Dataplex Datascan Full Quality
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const fullQuality = new gcp.dataplex.Datascan("full_quality", {
location: "us-central1",
displayName: "Full Datascan Quality",
dataScanId: "dataquality-full",
description: "Example resource - Full Datascan Quality",
labels: {
author: "billing",
},
data: {
resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
},
executionSpec: {
trigger: {
schedule: {
cron: "TZ=America/New_York 1 1 * * *",
},
},
field: "modified_date",
},
dataQualitySpec: {
samplingPercent: 5,
rowFilter: "station_id > 1000",
rules: [
{
column: "address",
dimension: "VALIDITY",
threshold: 0.99,
nonNullExpectation: {},
},
{
column: "council_district",
dimension: "VALIDITY",
ignoreNull: true,
threshold: 0.9,
rangeExpectation: {
minValue: "1",
maxValue: "10",
strictMinEnabled: true,
strictMaxEnabled: false,
},
},
{
column: "power_type",
dimension: "VALIDITY",
ignoreNull: false,
regexExpectation: {
regex: ".*solar.*",
},
},
{
column: "property_type",
dimension: "VALIDITY",
ignoreNull: false,
setExpectation: {
values: [
"sidewalk",
"parkland",
],
},
},
{
column: "address",
dimension: "UNIQUENESS",
uniquenessExpectation: {},
},
{
column: "number_of_docks",
dimension: "VALIDITY",
statisticRangeExpectation: {
statistic: "MEAN",
minValue: "5",
maxValue: "15",
strictMinEnabled: true,
strictMaxEnabled: true,
},
},
{
column: "footprint_length",
dimension: "VALIDITY",
rowConditionExpectation: {
sqlExpression: "footprint_length > 0 AND footprint_length <= 10",
},
},
{
dimension: "VALIDITY",
tableConditionExpectation: {
sqlExpression: "COUNT(*) > 0",
},
},
{
dimension: "VALIDITY",
sqlAssertion: {
sqlStatement: "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null",
},
},
],
},
project: "my-project-name",
});
import pulumi
import pulumi_gcp as gcp
full_quality = gcp.dataplex.Datascan("full_quality",
location="us-central1",
display_name="Full Datascan Quality",
data_scan_id="dataquality-full",
description="Example resource - Full Datascan Quality",
labels={
"author": "billing",
},
data={
"resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
},
execution_spec={
"trigger": {
"schedule": {
"cron": "TZ=America/New_York 1 1 * * *",
},
},
"field": "modified_date",
},
data_quality_spec={
"sampling_percent": 5,
"row_filter": "station_id > 1000",
"rules": [
{
"column": "address",
"dimension": "VALIDITY",
"threshold": 0.99,
"non_null_expectation": {},
},
{
"column": "council_district",
"dimension": "VALIDITY",
"ignore_null": True,
"threshold": 0.9,
"range_expectation": {
"min_value": "1",
"max_value": "10",
"strict_min_enabled": True,
"strict_max_enabled": False,
},
},
{
"column": "power_type",
"dimension": "VALIDITY",
"ignore_null": False,
"regex_expectation": {
"regex": ".*solar.*",
},
},
{
"column": "property_type",
"dimension": "VALIDITY",
"ignore_null": False,
"set_expectation": {
"values": [
"sidewalk",
"parkland",
],
},
},
{
"column": "address",
"dimension": "UNIQUENESS",
"uniqueness_expectation": {},
},
{
"column": "number_of_docks",
"dimension": "VALIDITY",
"statistic_range_expectation": {
"statistic": "MEAN",
"min_value": "5",
"max_value": "15",
"strict_min_enabled": True,
"strict_max_enabled": True,
},
},
{
"column": "footprint_length",
"dimension": "VALIDITY",
"row_condition_expectation": {
"sql_expression": "footprint_length > 0 AND footprint_length <= 10",
},
},
{
"dimension": "VALIDITY",
"table_condition_expectation": {
"sql_expression": "COUNT(*) > 0",
},
},
{
"dimension": "VALIDITY",
"sql_assertion": {
"sql_statement": "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null",
},
},
],
},
project="my-project-name")
package main
import (
"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := dataplex.NewDatascan(ctx, "full_quality", &dataplex.DatascanArgs{
Location: pulumi.String("us-central1"),
DisplayName: pulumi.String("Full Datascan Quality"),
DataScanId: pulumi.String("dataquality-full"),
Description: pulumi.String("Example resource - Full Datascan Quality"),
Labels: pulumi.StringMap{
"author": pulumi.String("billing"),
},
Data: &dataplex.DatascanDataArgs{
Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations"),
},
ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
Cron: pulumi.String("TZ=America/New_York 1 1 * * *"),
},
},
Field: pulumi.String("modified_date"),
},
DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
SamplingPercent: pulumi.Float64(5),
RowFilter: pulumi.String("station_id > 1000"),
Rules: dataplex.DatascanDataQualitySpecRuleArray{
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("address"),
Dimension: pulumi.String("VALIDITY"),
Threshold: pulumi.Float64(0.99),
NonNullExpectation: nil,
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("council_district"),
Dimension: pulumi.String("VALIDITY"),
IgnoreNull: pulumi.Bool(true),
Threshold: pulumi.Float64(0.9),
RangeExpectation: &dataplex.DatascanDataQualitySpecRuleRangeExpectationArgs{
MinValue: pulumi.String("1"),
MaxValue: pulumi.String("10"),
StrictMinEnabled: pulumi.Bool(true),
StrictMaxEnabled: pulumi.Bool(false),
},
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("power_type"),
Dimension: pulumi.String("VALIDITY"),
IgnoreNull: pulumi.Bool(false),
RegexExpectation: &dataplex.DatascanDataQualitySpecRuleRegexExpectationArgs{
Regex: pulumi.String(".*solar.*"),
},
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("property_type"),
Dimension: pulumi.String("VALIDITY"),
IgnoreNull: pulumi.Bool(false),
SetExpectation: &dataplex.DatascanDataQualitySpecRuleSetExpectationArgs{
Values: pulumi.StringArray{
pulumi.String("sidewalk"),
pulumi.String("parkland"),
},
},
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("address"),
Dimension: pulumi.String("UNIQUENESS"),
UniquenessExpectation: nil,
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("number_of_docks"),
Dimension: pulumi.String("VALIDITY"),
StatisticRangeExpectation: &dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs{
Statistic: pulumi.String("MEAN"),
MinValue: pulumi.String("5"),
MaxValue: pulumi.String("15"),
StrictMinEnabled: pulumi.Bool(true),
StrictMaxEnabled: pulumi.Bool(true),
},
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Column: pulumi.String("footprint_length"),
Dimension: pulumi.String("VALIDITY"),
RowConditionExpectation: &dataplex.DatascanDataQualitySpecRuleRowConditionExpectationArgs{
SqlExpression: pulumi.String("footprint_length > 0 AND footprint_length <= 10"),
},
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Dimension: pulumi.String("VALIDITY"),
TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
SqlExpression: pulumi.String("COUNT(*) > 0"),
},
},
&dataplex.DatascanDataQualitySpecRuleArgs{
Dimension: pulumi.String("VALIDITY"),
SqlAssertion: &dataplex.DatascanDataQualitySpecRuleSqlAssertionArgs{
SqlStatement: pulumi.String("select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null"),
},
},
},
},
Project: pulumi.String("my-project-name"),
})
if err != nil {
return err
}
return nil
})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() =>
{
var fullQuality = new Gcp.DataPlex.Datascan("full_quality", new()
{
Location = "us-central1",
DisplayName = "Full Datascan Quality",
DataScanId = "dataquality-full",
Description = "Example resource - Full Datascan Quality",
Labels =
{
{ "author", "billing" },
},
Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
{
Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
},
ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
{
Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
{
Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
{
Cron = "TZ=America/New_York 1 1 * * *",
},
},
Field = "modified_date",
},
DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
{
SamplingPercent = 5,
RowFilter = "station_id > 1000",
Rules = new[]
{
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "address",
Dimension = "VALIDITY",
Threshold = 0.99,
NonNullExpectation = null,
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "council_district",
Dimension = "VALIDITY",
IgnoreNull = true,
Threshold = 0.9,
RangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRangeExpectationArgs
{
MinValue = "1",
MaxValue = "10",
StrictMinEnabled = true,
StrictMaxEnabled = false,
},
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "power_type",
Dimension = "VALIDITY",
IgnoreNull = false,
RegexExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRegexExpectationArgs
{
Regex = ".*solar.*",
},
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "property_type",
Dimension = "VALIDITY",
IgnoreNull = false,
SetExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSetExpectationArgs
{
Values = new[]
{
"sidewalk",
"parkland",
},
},
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "address",
Dimension = "UNIQUENESS",
UniquenessExpectation = null,
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "number_of_docks",
Dimension = "VALIDITY",
StatisticRangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs
{
Statistic = "MEAN",
MinValue = "5",
MaxValue = "15",
StrictMinEnabled = true,
StrictMaxEnabled = true,
},
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Column = "footprint_length",
Dimension = "VALIDITY",
RowConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRowConditionExpectationArgs
{
SqlExpression = "footprint_length > 0 AND footprint_length <= 10",
},
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Dimension = "VALIDITY",
TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
{
SqlExpression = "COUNT(*) > 0",
},
},
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Dimension = "VALIDITY",
SqlAssertion = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSqlAssertionArgs
{
SqlStatement = "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null",
},
},
},
},
Project = "my-project-name",
});
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerScheduleArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataQualitySpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var fullQuality = new Datascan("fullQuality", DatascanArgs.builder()
.location("us-central1")
.displayName("Full Datascan Quality")
.dataScanId("dataquality-full")
.description("Example resource - Full Datascan Quality")
.labels(Map.of("author", "billing"))
.data(DatascanDataArgs.builder()
.resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations")
.build())
.executionSpec(DatascanExecutionSpecArgs.builder()
.trigger(DatascanExecutionSpecTriggerArgs.builder()
.schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
.cron("TZ=America/New_York 1 1 * * *")
.build())
.build())
.field("modified_date")
.build())
.dataQualitySpec(DatascanDataQualitySpecArgs.builder()
.samplingPercent(5)
.rowFilter("station_id > 1000")
.rules(
DatascanDataQualitySpecRuleArgs.builder()
.column("address")
.dimension("VALIDITY")
.threshold(0.99)
.nonNullExpectation()
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.column("council_district")
.dimension("VALIDITY")
.ignoreNull(true)
.threshold(0.9)
.rangeExpectation(DatascanDataQualitySpecRuleRangeExpectationArgs.builder()
.minValue(1)
.maxValue(10)
.strictMinEnabled(true)
.strictMaxEnabled(false)
.build())
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.column("power_type")
.dimension("VALIDITY")
.ignoreNull(false)
.regexExpectation(DatascanDataQualitySpecRuleRegexExpectationArgs.builder()
.regex(".*solar.*")
.build())
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.column("property_type")
.dimension("VALIDITY")
.ignoreNull(false)
.setExpectation(DatascanDataQualitySpecRuleSetExpectationArgs.builder()
.values(
"sidewalk",
"parkland")
.build())
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.column("address")
.dimension("UNIQUENESS")
.uniquenessExpectation()
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.column("number_of_docks")
.dimension("VALIDITY")
.statisticRangeExpectation(DatascanDataQualitySpecRuleStatisticRangeExpectationArgs.builder()
.statistic("MEAN")
.minValue(5)
.maxValue(15)
.strictMinEnabled(true)
.strictMaxEnabled(true)
.build())
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.column("footprint_length")
.dimension("VALIDITY")
.rowConditionExpectation(DatascanDataQualitySpecRuleRowConditionExpectationArgs.builder()
.sqlExpression("footprint_length > 0 AND footprint_length <= 10")
.build())
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.dimension("VALIDITY")
.tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
.sqlExpression("COUNT(*) > 0")
.build())
.build(),
DatascanDataQualitySpecRuleArgs.builder()
.dimension("VALIDITY")
.sqlAssertion(DatascanDataQualitySpecRuleSqlAssertionArgs.builder()
.sqlStatement("select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null")
.build())
.build())
.build())
.project("my-project-name")
.build());
}
}
resources:
fullQuality:
type: gcp:dataplex:Datascan
name: full_quality
properties:
location: us-central1
displayName: Full Datascan Quality
dataScanId: dataquality-full
description: Example resource - Full Datascan Quality
labels:
author: billing
data:
resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations
executionSpec:
trigger:
schedule:
cron: TZ=America/New_York 1 1 * * *
field: modified_date
dataQualitySpec:
samplingPercent: 5
rowFilter: station_id > 1000
rules:
- column: address
dimension: VALIDITY
threshold: 0.99
nonNullExpectation: {}
- column: council_district
dimension: VALIDITY
ignoreNull: true
threshold: 0.9
rangeExpectation:
minValue: 1
maxValue: 10
strictMinEnabled: true
strictMaxEnabled: false
- column: power_type
dimension: VALIDITY
ignoreNull: false
regexExpectation:
regex: .*solar.*
- column: property_type
dimension: VALIDITY
ignoreNull: false
setExpectation:
values:
- sidewalk
- parkland
- column: address
dimension: UNIQUENESS
uniquenessExpectation: {}
- column: number_of_docks
dimension: VALIDITY
statisticRangeExpectation:
statistic: MEAN
minValue: 5
maxValue: 15
strictMinEnabled: true
strictMaxEnabled: true
- column: footprint_length
dimension: VALIDITY
rowConditionExpectation:
sqlExpression: footprint_length > 0 AND footprint_length <= 10
- dimension: VALIDITY
tableConditionExpectation:
sqlExpression: COUNT(*) > 0
- dimension: VALIDITY
sqlAssertion:
sqlStatement: select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null
project: my-project-name
Create Datascan Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Datascan(name: string, args: DatascanArgs, opts?: CustomResourceOptions);
@overload
def Datascan(resource_name: str,
args: DatascanArgs,
opts: Optional[ResourceOptions] = None)
@overload
def Datascan(resource_name: str,
opts: Optional[ResourceOptions] = None,
data: Optional[DatascanDataArgs] = None,
data_scan_id: Optional[str] = None,
execution_spec: Optional[DatascanExecutionSpecArgs] = None,
location: Optional[str] = None,
data_profile_spec: Optional[DatascanDataProfileSpecArgs] = None,
data_quality_spec: Optional[DatascanDataQualitySpecArgs] = None,
description: Optional[str] = None,
display_name: Optional[str] = None,
labels: Optional[Mapping[str, str]] = None,
project: Optional[str] = None)
func NewDatascan(ctx *Context, name string, args DatascanArgs, opts ...ResourceOption) (*Datascan, error)
public Datascan(string name, DatascanArgs args, CustomResourceOptions? opts = null)
public Datascan(String name, DatascanArgs args)
public Datascan(String name, DatascanArgs args, CustomResourceOptions options)
type: gcp:dataplex:Datascan
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var datascanResource = new Gcp.DataPlex.Datascan("datascanResource", new()
{
Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
{
Entity = "string",
Resource = "string",
},
DataScanId = "string",
ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
{
Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
{
OnDemand = null,
Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
{
Cron = "string",
},
},
Field = "string",
},
Location = "string",
DataProfileSpec = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecArgs
{
ExcludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecExcludeFieldsArgs
{
FieldNames = new[]
{
"string",
},
},
IncludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecIncludeFieldsArgs
{
FieldNames = new[]
{
"string",
},
},
PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsArgs
{
BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs
{
ResultsTable = "string",
},
},
RowFilter = "string",
SamplingPercent = 0,
},
DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
{
PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecPostScanActionsArgs
{
BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecPostScanActionsBigqueryExportArgs
{
ResultsTable = "string",
},
},
RowFilter = "string",
Rules = new[]
{
new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
{
Dimension = "string",
RangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRangeExpectationArgs
{
MaxValue = "string",
MinValue = "string",
StrictMaxEnabled = false,
StrictMinEnabled = false,
},
RowConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRowConditionExpectationArgs
{
SqlExpression = "string",
},
IgnoreNull = false,
Name = "string",
NonNullExpectation = null,
Column = "string",
RegexExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRegexExpectationArgs
{
Regex = "string",
},
Description = "string",
SetExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSetExpectationArgs
{
Values = new[]
{
"string",
},
},
SqlAssertion = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSqlAssertionArgs
{
SqlStatement = "string",
},
StatisticRangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs
{
Statistic = "string",
MaxValue = "string",
MinValue = "string",
StrictMaxEnabled = false,
StrictMinEnabled = false,
},
TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
{
SqlExpression = "string",
},
Threshold = 0,
UniquenessExpectation = null,
},
},
SamplingPercent = 0,
},
Description = "string",
DisplayName = "string",
Labels =
{
{ "string", "string" },
},
Project = "string",
});
example, err := dataplex.NewDatascan(ctx, "datascanResource", &dataplex.DatascanArgs{
Data: &dataplex.DatascanDataArgs{
Entity: pulumi.String("string"),
Resource: pulumi.String("string"),
},
DataScanId: pulumi.String("string"),
ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
OnDemand: nil,
Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
Cron: pulumi.String("string"),
},
},
Field: pulumi.String("string"),
},
Location: pulumi.String("string"),
DataProfileSpec: &dataplex.DatascanDataProfileSpecArgs{
ExcludeFields: &dataplex.DatascanDataProfileSpecExcludeFieldsArgs{
FieldNames: pulumi.StringArray{
pulumi.String("string"),
},
},
IncludeFields: &dataplex.DatascanDataProfileSpecIncludeFieldsArgs{
FieldNames: pulumi.StringArray{
pulumi.String("string"),
},
},
PostScanActions: &dataplex.DatascanDataProfileSpecPostScanActionsArgs{
BigqueryExport: &dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs{
ResultsTable: pulumi.String("string"),
},
},
RowFilter: pulumi.String("string"),
SamplingPercent: pulumi.Float64(0),
},
DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
PostScanActions: &dataplex.DatascanDataQualitySpecPostScanActionsArgs{
BigqueryExport: &dataplex.DatascanDataQualitySpecPostScanActionsBigqueryExportArgs{
ResultsTable: pulumi.String("string"),
},
},
RowFilter: pulumi.String("string"),
Rules: dataplex.DatascanDataQualitySpecRuleArray{
&dataplex.DatascanDataQualitySpecRuleArgs{
Dimension: pulumi.String("string"),
RangeExpectation: &dataplex.DatascanDataQualitySpecRuleRangeExpectationArgs{
MaxValue: pulumi.String("string"),
MinValue: pulumi.String("string"),
StrictMaxEnabled: pulumi.Bool(false),
StrictMinEnabled: pulumi.Bool(false),
},
RowConditionExpectation: &dataplex.DatascanDataQualitySpecRuleRowConditionExpectationArgs{
SqlExpression: pulumi.String("string"),
},
IgnoreNull: pulumi.Bool(false),
Name: pulumi.String("string"),
NonNullExpectation: nil,
Column: pulumi.String("string"),
RegexExpectation: &dataplex.DatascanDataQualitySpecRuleRegexExpectationArgs{
Regex: pulumi.String("string"),
},
Description: pulumi.String("string"),
SetExpectation: &dataplex.DatascanDataQualitySpecRuleSetExpectationArgs{
Values: pulumi.StringArray{
pulumi.String("string"),
},
},
SqlAssertion: &dataplex.DatascanDataQualitySpecRuleSqlAssertionArgs{
SqlStatement: pulumi.String("string"),
},
StatisticRangeExpectation: &dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs{
Statistic: pulumi.String("string"),
MaxValue: pulumi.String("string"),
MinValue: pulumi.String("string"),
StrictMaxEnabled: pulumi.Bool(false),
StrictMinEnabled: pulumi.Bool(false),
},
TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
SqlExpression: pulumi.String("string"),
},
Threshold: pulumi.Float64(0),
UniquenessExpectation: nil,
},
},
SamplingPercent: pulumi.Float64(0),
},
Description: pulumi.String("string"),
DisplayName: pulumi.String("string"),
Labels: pulumi.StringMap{
"string": pulumi.String("string"),
},
Project: pulumi.String("string"),
})
var datascanResource = new Datascan("datascanResource", DatascanArgs.builder()
.data(DatascanDataArgs.builder()
.entity("string")
.resource("string")
.build())
.dataScanId("string")
.executionSpec(DatascanExecutionSpecArgs.builder()
.trigger(DatascanExecutionSpecTriggerArgs.builder()
.onDemand()
.schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
.cron("string")
.build())
.build())
.field("string")
.build())
.location("string")
.dataProfileSpec(DatascanDataProfileSpecArgs.builder()
.excludeFields(DatascanDataProfileSpecExcludeFieldsArgs.builder()
.fieldNames("string")
.build())
.includeFields(DatascanDataProfileSpecIncludeFieldsArgs.builder()
.fieldNames("string")
.build())
.postScanActions(DatascanDataProfileSpecPostScanActionsArgs.builder()
.bigqueryExport(DatascanDataProfileSpecPostScanActionsBigqueryExportArgs.builder()
.resultsTable("string")
.build())
.build())
.rowFilter("string")
.samplingPercent(0)
.build())
.dataQualitySpec(DatascanDataQualitySpecArgs.builder()
.postScanActions(DatascanDataQualitySpecPostScanActionsArgs.builder()
.bigqueryExport(DatascanDataQualitySpecPostScanActionsBigqueryExportArgs.builder()
.resultsTable("string")
.build())
.build())
.rowFilter("string")
.rules(DatascanDataQualitySpecRuleArgs.builder()
.dimension("string")
.rangeExpectation(DatascanDataQualitySpecRuleRangeExpectationArgs.builder()
.maxValue("string")
.minValue("string")
.strictMaxEnabled(false)
.strictMinEnabled(false)
.build())
.rowConditionExpectation(DatascanDataQualitySpecRuleRowConditionExpectationArgs.builder()
.sqlExpression("string")
.build())
.ignoreNull(false)
.name("string")
.nonNullExpectation()
.column("string")
.regexExpectation(DatascanDataQualitySpecRuleRegexExpectationArgs.builder()
.regex("string")
.build())
.description("string")
.setExpectation(DatascanDataQualitySpecRuleSetExpectationArgs.builder()
.values("string")
.build())
.sqlAssertion(DatascanDataQualitySpecRuleSqlAssertionArgs.builder()
.sqlStatement("string")
.build())
.statisticRangeExpectation(DatascanDataQualitySpecRuleStatisticRangeExpectationArgs.builder()
.statistic("string")
.maxValue("string")
.minValue("string")
.strictMaxEnabled(false)
.strictMinEnabled(false)
.build())
.tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
.sqlExpression("string")
.build())
.threshold(0)
.uniquenessExpectation()
.build())
.samplingPercent(0)
.build())
.description("string")
.displayName("string")
.labels(Map.of("string", "string"))
.project("string")
.build());
datascan_resource = gcp.dataplex.Datascan("datascanResource",
data={
"entity": "string",
"resource": "string",
},
data_scan_id="string",
execution_spec={
"trigger": {
"onDemand": {},
"schedule": {
"cron": "string",
},
},
"field": "string",
},
location="string",
data_profile_spec={
"excludeFields": {
"fieldNames": ["string"],
},
"includeFields": {
"fieldNames": ["string"],
},
"postScanActions": {
"bigqueryExport": {
"resultsTable": "string",
},
},
"rowFilter": "string",
"samplingPercent": 0,
},
data_quality_spec={
"postScanActions": {
"bigqueryExport": {
"resultsTable": "string",
},
},
"rowFilter": "string",
"rules": [{
"dimension": "string",
"rangeExpectation": {
"maxValue": "string",
"minValue": "string",
"strictMaxEnabled": False,
"strictMinEnabled": False,
},
"rowConditionExpectation": {
"sqlExpression": "string",
},
"ignoreNull": False,
"name": "string",
"nonNullExpectation": {},
"column": "string",
"regexExpectation": {
"regex": "string",
},
"description": "string",
"setExpectation": {
"values": ["string"],
},
"sqlAssertion": {
"sqlStatement": "string",
},
"statisticRangeExpectation": {
"statistic": "string",
"maxValue": "string",
"minValue": "string",
"strictMaxEnabled": False,
"strictMinEnabled": False,
},
"tableConditionExpectation": {
"sqlExpression": "string",
},
"threshold": 0,
"uniquenessExpectation": {},
}],
"samplingPercent": 0,
},
description="string",
display_name="string",
labels={
"string": "string",
},
project="string")
const datascanResource = new gcp.dataplex.Datascan("datascanResource", {
data: {
entity: "string",
resource: "string",
},
dataScanId: "string",
executionSpec: {
trigger: {
onDemand: {},
schedule: {
cron: "string",
},
},
field: "string",
},
location: "string",
dataProfileSpec: {
excludeFields: {
fieldNames: ["string"],
},
includeFields: {
fieldNames: ["string"],
},
postScanActions: {
bigqueryExport: {
resultsTable: "string",
},
},
rowFilter: "string",
samplingPercent: 0,
},
dataQualitySpec: {
postScanActions: {
bigqueryExport: {
resultsTable: "string",
},
},
rowFilter: "string",
rules: [{
dimension: "string",
rangeExpectation: {
maxValue: "string",
minValue: "string",
strictMaxEnabled: false,
strictMinEnabled: false,
},
rowConditionExpectation: {
sqlExpression: "string",
},
ignoreNull: false,
name: "string",
nonNullExpectation: {},
column: "string",
regexExpectation: {
regex: "string",
},
description: "string",
setExpectation: {
values: ["string"],
},
sqlAssertion: {
sqlStatement: "string",
},
statisticRangeExpectation: {
statistic: "string",
maxValue: "string",
minValue: "string",
strictMaxEnabled: false,
strictMinEnabled: false,
},
tableConditionExpectation: {
sqlExpression: "string",
},
threshold: 0,
uniquenessExpectation: {},
}],
samplingPercent: 0,
},
description: "string",
displayName: "string",
labels: {
string: "string",
},
project: "string",
});
type: gcp:dataplex:Datascan
properties:
data:
entity: string
resource: string
dataProfileSpec:
excludeFields:
fieldNames:
- string
includeFields:
fieldNames:
- string
postScanActions:
bigqueryExport:
resultsTable: string
rowFilter: string
samplingPercent: 0
dataQualitySpec:
postScanActions:
bigqueryExport:
resultsTable: string
rowFilter: string
rules:
- column: string
description: string
dimension: string
ignoreNull: false
name: string
nonNullExpectation: {}
rangeExpectation:
maxValue: string
minValue: string
strictMaxEnabled: false
strictMinEnabled: false
regexExpectation:
regex: string
rowConditionExpectation:
sqlExpression: string
setExpectation:
values:
- string
sqlAssertion:
sqlStatement: string
statisticRangeExpectation:
maxValue: string
minValue: string
statistic: string
strictMaxEnabled: false
strictMinEnabled: false
tableConditionExpectation:
sqlExpression: string
threshold: 0
uniquenessExpectation: {}
samplingPercent: 0
dataScanId: string
description: string
displayName: string
executionSpec:
field: string
trigger:
onDemand: {}
schedule:
cron: string
labels:
string: string
location: string
project: string
Datascan Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The Datascan resource accepts the following input properties:
- Data
Datascan
Data - The data source for DataScan. Structure is documented below.
- Data
Scan stringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- Execution
Spec DatascanExecution Spec - DataScan execution settings. Structure is documented below.
- Location string
- The location where the data scan should reside.
- Data
Profile DatascanSpec Data Profile Spec - DataProfileScan related setting.
- Data
Quality DatascanSpec Data Quality Spec - DataQualityScan related setting.
- Description string
- Description of the scan.
- Display
Name string - User friendly display name.
- Labels Dictionary<string, string>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Project string
- Data
Datascan
Data Args - The data source for DataScan. Structure is documented below.
- Data
Scan stringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- Execution
Spec DatascanExecution Spec Args - DataScan execution settings. Structure is documented below.
- Location string
- The location where the data scan should reside.
- Data
Profile DatascanSpec Data Profile Spec Args - DataProfileScan related setting.
- Data
Quality DatascanSpec Data Quality Spec Args - DataQualityScan related setting.
- Description string
- Description of the scan.
- Display
Name string - User friendly display name.
- Labels map[string]string
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Project string
- data
Datascan
Data - The data source for DataScan. Structure is documented below.
- data
Scan StringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- execution
Spec DatascanExecution Spec - DataScan execution settings. Structure is documented below.
- location String
- The location where the data scan should reside.
- data
Profile DatascanSpec Data Profile Spec - DataProfileScan related setting.
- data
Quality DatascanSpec Data Quality Spec - DataQualityScan related setting.
- description String
- Description of the scan.
- display
Name String - User friendly display name.
- labels Map<String,String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project String
- data
Datascan
Data - The data source for DataScan. Structure is documented below.
- data
Scan stringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- execution
Spec DatascanExecution Spec - DataScan execution settings. Structure is documented below.
- location string
- The location where the data scan should reside.
- data
Profile DatascanSpec Data Profile Spec - DataProfileScan related setting.
- data
Quality DatascanSpec Data Quality Spec - DataQualityScan related setting.
- description string
- Description of the scan.
- display
Name string - User friendly display name.
- labels {[key: string]: string}
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project string
- data
Datascan
Data Args - The data source for DataScan. Structure is documented below.
- data_
scan_ strid - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- execution_
spec DatascanExecution Spec Args - DataScan execution settings. Structure is documented below.
- location str
- The location where the data scan should reside.
- data_
profile_ Datascanspec Data Profile Spec Args - DataProfileScan related setting.
- data_
quality_ Datascanspec Data Quality Spec Args - DataQualityScan related setting.
- description str
- Description of the scan.
- display_
name str - User friendly display name.
- labels Mapping[str, str]
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project str
- data Property Map
- The data source for DataScan. Structure is documented below.
- data
Scan StringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- execution
Spec Property Map - DataScan execution settings. Structure is documented below.
- location String
- The location where the data scan should reside.
- data
Profile Property MapSpec - DataProfileScan related setting.
- data
Quality Property MapSpec - DataQualityScan related setting.
- description String
- Description of the scan.
- display
Name String - User friendly display name.
- labels Map<String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project String
Outputs
All input properties are implicitly available as output properties. Additionally, the Datascan resource produces the following output properties:
- Create
Time string - The time when the scan was created.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Execution
Statuses List<DatascanExecution Status> - Status of the data scan execution. Structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- Update
Time string - The time when the scan was last updated.
- Create
Time string - The time when the scan was created.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Execution
Statuses []DatascanExecution Status - Status of the data scan execution. Structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- Update
Time string - The time when the scan was last updated.
- create
Time String - The time when the scan was created.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution
Statuses List<DatascanExecution Status> - Status of the data scan execution. Structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update
Time String - The time when the scan was last updated.
- create
Time string - The time when the scan was created.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution
Statuses DatascanExecution Status[] - Status of the data scan execution. Structure is documented below.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- state string
- Current state of the DataScan.
- type string
- The type of DataScan.
- uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update
Time string - The time when the scan was last updated.
- create_
time str - The time when the scan was created.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution_
statuses Sequence[DatascanExecution Status] - Status of the data scan execution. Structure is documented below.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- state str
- Current state of the DataScan.
- type str
- The type of DataScan.
- uid str
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update_
time str - The time when the scan was last updated.
- create
Time String - The time when the scan was created.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution
Statuses List<Property Map> - Status of the data scan execution. Structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update
Time String - The time when the scan was last updated.
Look up Existing Datascan Resource
Get an existing Datascan resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DatascanState, opts?: CustomResourceOptions): Datascan
@staticmethod
def get(resource_name: str,
id: str,
opts: Optional[ResourceOptions] = None,
create_time: Optional[str] = None,
data: Optional[DatascanDataArgs] = None,
data_profile_spec: Optional[DatascanDataProfileSpecArgs] = None,
data_quality_spec: Optional[DatascanDataQualitySpecArgs] = None,
data_scan_id: Optional[str] = None,
description: Optional[str] = None,
display_name: Optional[str] = None,
effective_labels: Optional[Mapping[str, str]] = None,
execution_spec: Optional[DatascanExecutionSpecArgs] = None,
execution_statuses: Optional[Sequence[DatascanExecutionStatusArgs]] = None,
labels: Optional[Mapping[str, str]] = None,
location: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
pulumi_labels: Optional[Mapping[str, str]] = None,
state: Optional[str] = None,
type: Optional[str] = None,
uid: Optional[str] = None,
update_time: Optional[str] = None) -> Datascan
func GetDatascan(ctx *Context, name string, id IDInput, state *DatascanState, opts ...ResourceOption) (*Datascan, error)
public static Datascan Get(string name, Input<string> id, DatascanState? state, CustomResourceOptions? opts = null)
public static Datascan get(String name, Output<String> id, DatascanState state, CustomResourceOptions options)
Resource lookup is not supported in YAML
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Create
Time string - The time when the scan was created.
- Data
Datascan
Data - The data source for DataScan. Structure is documented below.
- Data
Profile DatascanSpec Data Profile Spec - DataProfileScan related setting.
- Data
Quality DatascanSpec Data Quality Spec - DataQualityScan related setting.
- Data
Scan stringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- Description string
- Description of the scan.
- Display
Name string - User friendly display name.
- Effective
Labels Dictionary<string, string> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Execution
Spec DatascanExecution Spec - DataScan execution settings. Structure is documented below.
- Execution
Statuses List<DatascanExecution Status> - Status of the data scan execution. Structure is documented below.
- Labels Dictionary<string, string>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Location string
- The location where the data scan should reside.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- Project string
- Pulumi
Labels Dictionary<string, string> - The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- Update
Time string - The time when the scan was last updated.
- Create
Time string - The time when the scan was created.
- Data
Datascan
Data Args - The data source for DataScan. Structure is documented below.
- Data
Profile DatascanSpec Data Profile Spec Args - DataProfileScan related setting.
- Data
Quality DatascanSpec Data Quality Spec Args - DataQualityScan related setting.
- Data
Scan stringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- Description string
- Description of the scan.
- Display
Name string - User friendly display name.
- Effective
Labels map[string]string - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- Execution
Spec DatascanExecution Spec Args - DataScan execution settings. Structure is documented below.
- Execution
Statuses []DatascanExecution Status Args - Status of the data scan execution. Structure is documented below.
- Labels map[string]string
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Location string
- The location where the data scan should reside.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- Project string
- Pulumi
Labels map[string]string - The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- Update
Time string - The time when the scan was last updated.
- create
Time String - The time when the scan was created.
- data
Datascan
Data - The data source for DataScan. Structure is documented below.
- data
Profile DatascanSpec Data Profile Spec - DataProfileScan related setting.
- data
Quality DatascanSpec Data Quality Spec - DataQualityScan related setting.
- data
Scan StringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description String
- Description of the scan.
- display
Name String - User friendly display name.
- effective
Labels Map<String,String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution
Spec DatascanExecution Spec - DataScan execution settings. Structure is documented below.
- execution
Statuses List<DatascanExecution Status> - Status of the data scan execution. Structure is documented below.
- labels Map<String,String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location String
- The location where the data scan should reside.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project String
- pulumi
Labels Map<String,String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update
Time String - The time when the scan was last updated.
- create
Time string - The time when the scan was created.
- data
Datascan
Data - The data source for DataScan. Structure is documented below.
- data
Profile DatascanSpec Data Profile Spec - DataProfileScan related setting.
- data
Quality DatascanSpec Data Quality Spec - DataQualityScan related setting.
- data
Scan stringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description string
- Description of the scan.
- display
Name string - User friendly display name.
- effective
Labels {[key: string]: string} - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution
Spec DatascanExecution Spec - DataScan execution settings. Structure is documented below.
- execution
Statuses DatascanExecution Status[] - Status of the data scan execution. Structure is documented below.
- labels {[key: string]: string}
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location string
- The location where the data scan should reside.
- name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project string
- pulumi
Labels {[key: string]: string} - The combination of labels configured directly on the resource and default labels configured on the provider.
- state string
- Current state of the DataScan.
- type string
- The type of DataScan.
- uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update
Time string - The time when the scan was last updated.
- create_
time str - The time when the scan was created.
- data
Datascan
Data Args - The data source for DataScan. Structure is documented below.
- data_
profile_ Datascanspec Data Profile Spec Args - DataProfileScan related setting.
- data_
quality_ Datascanspec Data Quality Spec Args - DataQualityScan related setting.
- data_
scan_ strid - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description str
- Description of the scan.
- display_
name str - User friendly display name.
- effective_
labels Mapping[str, str] - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution_
spec DatascanExecution Spec Args - DataScan execution settings. Structure is documented below.
- execution_
statuses Sequence[DatascanExecution Status Args] - Status of the data scan execution. Structure is documented below.
- labels Mapping[str, str]
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location str
- The location where the data scan should reside.
- name str
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project str
- pulumi_
labels Mapping[str, str] - The combination of labels configured directly on the resource and default labels configured on the provider.
- state str
- Current state of the DataScan.
- type str
- The type of DataScan.
- uid str
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update_
time str - The time when the scan was last updated.
- create
Time String - The time when the scan was created.
- data Property Map
- The data source for DataScan. Structure is documented below.
- data
Profile Property MapSpec - DataProfileScan related setting.
- data
Quality Property MapSpec - DataQualityScan related setting.
- data
Scan StringId - DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description String
- Description of the scan.
- display
Name String - User friendly display name.
- effective
Labels Map<String> - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution
Spec Property Map - DataScan execution settings. Structure is documented below.
- execution
Statuses List<Property Map> - Status of the data scan execution. Structure is documented below.
- labels Map<String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location String
- The location where the data scan should reside.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project String
- pulumi
Labels Map<String> - The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update
Time String - The time when the scan was last updated.
Supporting Types
DatascanData, DatascanDataArgs
- Entity string
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- Resource string
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- Entity string
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- Resource string
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity String
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource String
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity string
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource string
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity str
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource str
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity String
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource String
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
DatascanDataProfileSpec, DatascanDataProfileSpecArgs
- Exclude
Fields DatascanData Profile Spec Exclude Fields - The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of
include_fields
value. Structure is documented below. - Include
Fields DatascanData Profile Spec Include Fields - The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in
exclude_fields
. Structure is documented below. - Post
Scan DatascanActions Data Profile Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- Row
Filter string - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- Sampling
Percent double - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- Exclude
Fields DatascanData Profile Spec Exclude Fields - The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of
include_fields
value. Structure is documented below. - Include
Fields DatascanData Profile Spec Include Fields - The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in
exclude_fields
. Structure is documented below. - Post
Scan DatascanActions Data Profile Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- Row
Filter string - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- Sampling
Percent float64 - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- exclude
Fields DatascanData Profile Spec Exclude Fields - The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of
include_fields
value. Structure is documented below. - include
Fields DatascanData Profile Spec Include Fields - The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in
exclude_fields
. Structure is documented below. - post
Scan DatascanActions Data Profile Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- row
Filter String - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- sampling
Percent Double - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- exclude
Fields DatascanData Profile Spec Exclude Fields - The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of
include_fields
value. Structure is documented below. - include
Fields DatascanData Profile Spec Include Fields - The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in
exclude_fields
. Structure is documented below. - post
Scan DatascanActions Data Profile Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- row
Filter string - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- sampling
Percent number - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- exclude_
fields DatascanData Profile Spec Exclude Fields - The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of
include_fields
value. Structure is documented below. - include_
fields DatascanData Profile Spec Include Fields - The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in
exclude_fields
. Structure is documented below. - post_
scan_ Datascanactions Data Profile Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- row_
filter str - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- sampling_
percent float - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- exclude
Fields Property Map - The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of
include_fields
value. Structure is documented below. - include
Fields Property Map - The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in
exclude_fields
. Structure is documented below. - post
Scan Property MapActions - Actions to take upon job completion. Structure is documented below.
- row
Filter String - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- sampling
Percent Number - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
DatascanDataProfileSpecExcludeFields, DatascanDataProfileSpecExcludeFieldsArgs
- Field
Names List<string> - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- Field
Names []string - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field
Names List<String> - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field
Names string[] - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field_
names Sequence[str] - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field
Names List<String> - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
DatascanDataProfileSpecIncludeFields, DatascanDataProfileSpecIncludeFieldsArgs
- Field
Names List<string> - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- Field
Names []string - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field
Names List<String> - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field
Names string[] - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field_
names Sequence[str] - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field
Names List<String> - Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
DatascanDataProfileSpecPostScanActions, DatascanDataProfileSpecPostScanActionsArgs
- Bigquery
Export DatascanData Profile Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- Bigquery
Export DatascanData Profile Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery
Export DatascanData Profile Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery
Export DatascanData Profile Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery_
export DatascanData Profile Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery
Export Property Map - If set, results will be exported to the provided BigQuery table. Structure is documented below.
DatascanDataProfileSpecPostScanActionsBigqueryExport, DatascanDataProfileSpecPostScanActionsBigqueryExportArgs
- Results
Table string - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- Results
Table string - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results
Table String - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results
Table string - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results_
table str - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results
Table String - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
DatascanDataQualitySpec, DatascanDataQualitySpecArgs
- Post
Scan DatascanActions Data Quality Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- Row
Filter string - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- Rules
List<Datascan
Data Quality Spec Rule> - The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- Sampling
Percent double - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- Post
Scan DatascanActions Data Quality Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- Row
Filter string - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- Rules
[]Datascan
Data Quality Spec Rule - The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- Sampling
Percent float64 - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- post
Scan DatascanActions Data Quality Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- row
Filter String - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules
List<Datascan
Data Quality Spec Rule> - The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- sampling
Percent Double - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- post
Scan DatascanActions Data Quality Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- row
Filter string - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules
Datascan
Data Quality Spec Rule[] - The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- sampling
Percent number - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- post_
scan_ Datascanactions Data Quality Spec Post Scan Actions - Actions to take upon job completion. Structure is documented below.
- row_
filter str - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules
Sequence[Datascan
Data Quality Spec Rule] - The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- sampling_
percent float - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
- post
Scan Property MapActions - Actions to take upon job completion. Structure is documented below.
- row
Filter String - A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules List<Property Map>
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- sampling
Percent Number - The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if
sampling_percent
is not specified, 0 or 100.
DatascanDataQualitySpecPostScanActions, DatascanDataQualitySpecPostScanActionsArgs
- Bigquery
Export DatascanData Quality Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- Bigquery
Export DatascanData Quality Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery
Export DatascanData Quality Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery
Export DatascanData Quality Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery_
export DatascanData Quality Spec Post Scan Actions Bigquery Export - If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery
Export Property Map - If set, results will be exported to the provided BigQuery table. Structure is documented below.
DatascanDataQualitySpecPostScanActionsBigqueryExport, DatascanDataQualitySpecPostScanActionsBigqueryExportArgs
- Results
Table string - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- Results
Table string - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results
Table String - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results
Table string - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results_
table str - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results
Table String - The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
DatascanDataQualitySpecRule, DatascanDataQualitySpecRuleArgs
- Dimension string
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- Column string
- The unnested column which this rule is evaluated against.
- Description string
- Description of the rule. The maximum length is 1,024 characters.
- Ignore
Null bool - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- Name string
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- Non
Null DatascanExpectation Data Quality Spec Rule Non Null Expectation - ColumnMap rule which evaluates whether each column value is null.
- Range
Expectation DatascanData Quality Spec Rule Range Expectation - ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- Regex
Expectation DatascanData Quality Spec Rule Regex Expectation - ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- Row
Condition DatascanExpectation Data Quality Spec Rule Row Condition Expectation - Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- Set
Expectation DatascanData Quality Spec Rule Set Expectation - ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- Sql
Assertion DatascanData Quality Spec Rule Sql Assertion - Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- Statistic
Range DatascanExpectation Data Quality Spec Rule Statistic Range Expectation - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- Table
Condition DatascanExpectation Data Quality Spec Rule Table Condition Expectation - Table rule which evaluates whether the provided expression is true. Structure is documented below.
- Threshold double
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- Uniqueness
Expectation DatascanData Quality Spec Rule Uniqueness Expectation - Row-level rule which evaluates whether each column value is unique.
- Dimension string
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- Column string
- The unnested column which this rule is evaluated against.
- Description string
- Description of the rule. The maximum length is 1,024 characters.
- Ignore
Null bool - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- Name string
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- Non
Null DatascanExpectation Data Quality Spec Rule Non Null Expectation - ColumnMap rule which evaluates whether each column value is null.
- Range
Expectation DatascanData Quality Spec Rule Range Expectation - ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- Regex
Expectation DatascanData Quality Spec Rule Regex Expectation - ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- Row
Condition DatascanExpectation Data Quality Spec Rule Row Condition Expectation - Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- Set
Expectation DatascanData Quality Spec Rule Set Expectation - ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- Sql
Assertion DatascanData Quality Spec Rule Sql Assertion - Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- Statistic
Range DatascanExpectation Data Quality Spec Rule Statistic Range Expectation - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- Table
Condition DatascanExpectation Data Quality Spec Rule Table Condition Expectation - Table rule which evaluates whether the provided expression is true. Structure is documented below.
- Threshold float64
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- Uniqueness
Expectation DatascanData Quality Spec Rule Uniqueness Expectation - Row-level rule which evaluates whether each column value is unique.
- dimension String
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column String
- The unnested column which this rule is evaluated against.
- description String
- Description of the rule. The maximum length is 1,024 characters.
- ignore
Null Boolean - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name String
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- non
Null DatascanExpectation Data Quality Spec Rule Non Null Expectation - ColumnMap rule which evaluates whether each column value is null.
- range
Expectation DatascanData Quality Spec Rule Range Expectation - ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regex
Expectation DatascanData Quality Spec Rule Regex Expectation - ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- row
Condition DatascanExpectation Data Quality Spec Rule Row Condition Expectation - Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- set
Expectation DatascanData Quality Spec Rule Set Expectation - ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sql
Assertion DatascanData Quality Spec Rule Sql Assertion - Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statistic
Range DatascanExpectation Data Quality Spec Rule Statistic Range Expectation - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- table
Condition DatascanExpectation Data Quality Spec Rule Table Condition Expectation - Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold Double
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniqueness
Expectation DatascanData Quality Spec Rule Uniqueness Expectation - Row-level rule which evaluates whether each column value is unique.
- dimension string
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column string
- The unnested column which this rule is evaluated against.
- description string
- Description of the rule. The maximum length is 1,024 characters.
- ignore
Null boolean - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name string
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- non
Null DatascanExpectation Data Quality Spec Rule Non Null Expectation - ColumnMap rule which evaluates whether each column value is null.
- range
Expectation DatascanData Quality Spec Rule Range Expectation - ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regex
Expectation DatascanData Quality Spec Rule Regex Expectation - ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- row
Condition DatascanExpectation Data Quality Spec Rule Row Condition Expectation - Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- set
Expectation DatascanData Quality Spec Rule Set Expectation - ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sql
Assertion DatascanData Quality Spec Rule Sql Assertion - Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statistic
Range DatascanExpectation Data Quality Spec Rule Statistic Range Expectation - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- table
Condition DatascanExpectation Data Quality Spec Rule Table Condition Expectation - Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold number
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniqueness
Expectation DatascanData Quality Spec Rule Uniqueness Expectation - Row-level rule which evaluates whether each column value is unique.
- dimension str
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column str
- The unnested column which this rule is evaluated against.
- description str
- Description of the rule. The maximum length is 1,024 characters.
- ignore_
null bool - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name str
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- non_
null_ Datascanexpectation Data Quality Spec Rule Non Null Expectation - ColumnMap rule which evaluates whether each column value is null.
- range_
expectation DatascanData Quality Spec Rule Range Expectation - ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regex_
expectation DatascanData Quality Spec Rule Regex Expectation - ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- row_
condition_ Datascanexpectation Data Quality Spec Rule Row Condition Expectation - Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- set_
expectation DatascanData Quality Spec Rule Set Expectation - ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sql_
assertion DatascanData Quality Spec Rule Sql Assertion - Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statistic_
range_ Datascanexpectation Data Quality Spec Rule Statistic Range Expectation - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- table_
condition_ Datascanexpectation Data Quality Spec Rule Table Condition Expectation - Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold float
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniqueness_
expectation DatascanData Quality Spec Rule Uniqueness Expectation - Row-level rule which evaluates whether each column value is unique.
- dimension String
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column String
- The unnested column which this rule is evaluated against.
- description String
- Description of the rule. The maximum length is 1,024 characters.
- ignore
Null Boolean - Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name String
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- non
Null Property MapExpectation - ColumnMap rule which evaluates whether each column value is null.
- range
Expectation Property Map - ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regex
Expectation Property Map - ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- row
Condition Property MapExpectation - Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- set
Expectation Property Map - ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sql
Assertion Property Map - Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statistic
Range Property MapExpectation - ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- table
Condition Property MapExpectation - Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold Number
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniqueness
Expectation Property Map - Row-level rule which evaluates whether each column value is unique.
DatascanDataQualitySpecRuleRangeExpectation, DatascanDataQualitySpecRuleRangeExpectationArgs
- Max
Value string - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Min
Value string - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Strict
Max boolEnabled - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- Strict
Min boolEnabled - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- Max
Value string - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Min
Value string - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Strict
Max boolEnabled - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- Strict
Min boolEnabled - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- max
Value String - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min
Value String - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict
Max BooleanEnabled - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict
Min BooleanEnabled - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- max
Value string - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min
Value string - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict
Max booleanEnabled - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict
Min booleanEnabled - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- max_
value str - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min_
value str - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict_
max_ boolenabled - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict_
min_ boolenabled - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- max
Value String - The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min
Value String - The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict
Max BooleanEnabled - Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict
Min BooleanEnabled - Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
DatascanDataQualitySpecRuleRegexExpectation, DatascanDataQualitySpecRuleRegexExpectationArgs
- Regex string
- A regular expression the column value is expected to match.
- Regex string
- A regular expression the column value is expected to match.
- regex String
- A regular expression the column value is expected to match.
- regex string
- A regular expression the column value is expected to match.
- regex str
- A regular expression the column value is expected to match.
- regex String
- A regular expression the column value is expected to match.
DatascanDataQualitySpecRuleRowConditionExpectation, DatascanDataQualitySpecRuleRowConditionExpectationArgs
- Sql
Expression string - The SQL expression.
- Sql
Expression string - The SQL expression.
- sql
Expression String - The SQL expression.
- sql
Expression string - The SQL expression.
- sql_
expression str - The SQL expression.
- sql
Expression String - The SQL expression.
DatascanDataQualitySpecRuleSetExpectation, DatascanDataQualitySpecRuleSetExpectationArgs
- Values List<string>
- Expected values for the column value.
- Values []string
- Expected values for the column value.
- values List<String>
- Expected values for the column value.
- values string[]
- Expected values for the column value.
- values Sequence[str]
- Expected values for the column value.
- values List<String>
- Expected values for the column value.
DatascanDataQualitySpecRuleSqlAssertion, DatascanDataQualitySpecRuleSqlAssertionArgs
- Sql
Statement string - The SQL statement.
- Sql
Statement string - The SQL statement.
- sql
Statement String - The SQL statement.
- sql
Statement string - The SQL statement.
- sql_
statement str - The SQL statement.
- sql
Statement String - The SQL statement.
DatascanDataQualitySpecRuleStatisticRangeExpectation, DatascanDataQualitySpecRuleStatisticRangeExpectationArgs
- Statistic string
- column statistics.
Possible values are:
STATISTIC_UNDEFINED
,MEAN
,MIN
,MAX
. - Max
Value string - The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Min
Value string - The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Strict
Max boolEnabled - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- Strict
Min boolEnabled - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- Statistic string
- column statistics.
Possible values are:
STATISTIC_UNDEFINED
,MEAN
,MIN
,MAX
. - Max
Value string - The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Min
Value string - The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- Strict
Max boolEnabled - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- Strict
Min boolEnabled - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic String
- column statistics.
Possible values are:
STATISTIC_UNDEFINED
,MEAN
,MIN
,MAX
. - max
Value String - The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min
Value String - The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict
Max BooleanEnabled - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict
Min BooleanEnabled - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic string
- column statistics.
Possible values are:
STATISTIC_UNDEFINED
,MEAN
,MIN
,MAX
. - max
Value string - The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min
Value string - The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict
Max booleanEnabled - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict
Min booleanEnabled - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic str
- column statistics.
Possible values are:
STATISTIC_UNDEFINED
,MEAN
,MIN
,MAX
. - max_
value str - The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min_
value str - The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict_
max_ boolenabled - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict_
min_ boolenabled - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic String
- column statistics.
Possible values are:
STATISTIC_UNDEFINED
,MEAN
,MIN
,MAX
. - max
Value String - The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min
Value String - The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict
Max BooleanEnabled - Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict
Min BooleanEnabled - Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
DatascanDataQualitySpecRuleTableConditionExpectation, DatascanDataQualitySpecRuleTableConditionExpectationArgs
- Sql
Expression string - The SQL expression.
- Sql
Expression string - The SQL expression.
- sql
Expression String - The SQL expression.
- sql
Expression string - The SQL expression.
- sql_
expression str - The SQL expression.
- sql
Expression String - The SQL expression.
DatascanExecutionSpec, DatascanExecutionSpecArgs
- Trigger
Datascan
Execution Spec Trigger - Spec related to how often and when a scan should be triggered. Structure is documented below.
- Field string
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- Trigger
Datascan
Execution Spec Trigger - Spec related to how often and when a scan should be triggered. Structure is documented below.
- Field string
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger
Datascan
Execution Spec Trigger - Spec related to how often and when a scan should be triggered. Structure is documented below.
- field String
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger
Datascan
Execution Spec Trigger - Spec related to how often and when a scan should be triggered. Structure is documented below.
- field string
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger
Datascan
Execution Spec Trigger - Spec related to how often and when a scan should be triggered. Structure is documented below.
- field str
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger Property Map
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- field String
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
DatascanExecutionSpecTrigger, DatascanExecutionSpecTriggerArgs
- On
Demand DatascanExecution Spec Trigger On Demand - The scan runs once via dataScans.run API.
- Schedule
Datascan
Execution Spec Trigger Schedule - The scan is scheduled to run periodically. Structure is documented below.
- On
Demand DatascanExecution Spec Trigger On Demand - The scan runs once via dataScans.run API.
- Schedule
Datascan
Execution Spec Trigger Schedule - The scan is scheduled to run periodically. Structure is documented below.
- on
Demand DatascanExecution Spec Trigger On Demand - The scan runs once via dataScans.run API.
- schedule
Datascan
Execution Spec Trigger Schedule - The scan is scheduled to run periodically. Structure is documented below.
- on
Demand DatascanExecution Spec Trigger On Demand - The scan runs once via dataScans.run API.
- schedule
Datascan
Execution Spec Trigger Schedule - The scan is scheduled to run periodically. Structure is documented below.
- on_
demand DatascanExecution Spec Trigger On Demand - The scan runs once via dataScans.run API.
- schedule
Datascan
Execution Spec Trigger Schedule - The scan is scheduled to run periodically. Structure is documented below.
- on
Demand Property Map - The scan runs once via dataScans.run API.
- schedule Property Map
- The scan is scheduled to run periodically. Structure is documented below.
DatascanExecutionSpecTriggerSchedule, DatascanExecutionSpecTriggerScheduleArgs
- Cron string
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- Cron string
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron String
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron string
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron str
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron String
- Cron schedule for running scans periodically. This field is required for Schedule scans.
DatascanExecutionStatus, DatascanExecutionStatusArgs
- Latest
Job stringEnd Time - (Output) The time when the latest DataScanJob started.
- Latest
Job stringStart Time - (Output) The time when the latest DataScanJob ended.
- Latest
Job stringEnd Time - (Output) The time when the latest DataScanJob started.
- Latest
Job stringStart Time - (Output) The time when the latest DataScanJob ended.
- latest
Job StringEnd Time - (Output) The time when the latest DataScanJob started.
- latest
Job StringStart Time - (Output) The time when the latest DataScanJob ended.
- latest
Job stringEnd Time - (Output) The time when the latest DataScanJob started.
- latest
Job stringStart Time - (Output) The time when the latest DataScanJob ended.
- latest_
job_ strend_ time - (Output) The time when the latest DataScanJob started.
- latest_
job_ strstart_ time - (Output) The time when the latest DataScanJob ended.
- latest
Job StringEnd Time - (Output) The time when the latest DataScanJob started.
- latest
Job StringStart Time - (Output) The time when the latest DataScanJob ended.
Import
Datascan can be imported using any of these accepted formats:
projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}
{{project}}/{{location}}/{{data_scan_id}}
{{location}}/{{data_scan_id}}
{{data_scan_id}}
When using the pulumi import
command, Datascan can be imported using one of the formats above. For example:
$ pulumi import gcp:dataplex/datascan:Datascan default projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}
$ pulumi import gcp:dataplex/datascan:Datascan default {{project}}/{{location}}/{{data_scan_id}}
$ pulumi import gcp:dataplex/datascan:Datascan default {{location}}/{{data_scan_id}}
$ pulumi import gcp:dataplex/datascan:Datascan default {{data_scan_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the
google-beta
Terraform Provider.