mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-19 06:21:28 +00:00
Compare commits
47 Commits
update-ind
...
tidy/remov
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
09cefe591e | ||
|
|
a309283a7c | ||
|
|
b10fa79ae8 | ||
|
|
37e2725038 | ||
|
|
37fd299ad0 | ||
|
|
a94027acea | ||
|
|
b59c9075e2 | ||
|
|
c215697a02 | ||
|
|
d936a630c1 | ||
|
|
11ee2b9c42 | ||
|
|
64c59476f4 | ||
|
|
2bae05b8ed | ||
|
|
ca163c3d6e | ||
|
|
9a796aa202 | ||
|
|
51ff85bb2d | ||
|
|
d389ff1450 | ||
|
|
4415731da4 | ||
|
|
0fdc83af9d | ||
|
|
71a8a41104 | ||
|
|
da19d7ba9f | ||
|
|
1475abb1cb | ||
|
|
27b2f965dd | ||
|
|
100352d6b4 | ||
|
|
8ee8b2560a | ||
|
|
d4a6482091 | ||
|
|
8639290108 | ||
|
|
e699f5d042 | ||
|
|
e977b3eee5 | ||
|
|
c5be8e2a93 | ||
|
|
bff116dbed | ||
|
|
4df120e40e | ||
|
|
e53420c1d0 | ||
|
|
88ccc8a447 | ||
|
|
a98059967d | ||
|
|
b680c7ae95 | ||
|
|
a677abd5e8 | ||
|
|
8c850b58cb | ||
|
|
a34267f54b | ||
|
|
155482851a | ||
|
|
81386a7a43 | ||
|
|
d8e38c1a1d | ||
|
|
3e37d77780 | ||
|
|
e0783c2922 | ||
|
|
c2d4643f9d | ||
|
|
84456f50f6 | ||
|
|
fb10bb4aea | ||
|
|
366d4ad04a |
6
.changes/unreleased/Docs-20240311-140344.yaml
Normal file
6
.changes/unreleased/Docs-20240311-140344.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Docs
|
||||
body: Enable display of unit tests
|
||||
time: 2024-03-11T14:03:44.490834-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "501"
|
||||
6
.changes/unreleased/Docs-20240501-021050.yaml
Normal file
6
.changes/unreleased/Docs-20240501-021050.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Docs
|
||||
body: Unit tests not rendering
|
||||
time: 2024-05-01T02:10:50.987412+02:00
|
||||
custom:
|
||||
Author: aranke
|
||||
Issue: "506"
|
||||
6
.changes/unreleased/Docs-20240516-223036.yaml
Normal file
6
.changes/unreleased/Docs-20240516-223036.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Docs
|
||||
body: Add support for Saved Query node
|
||||
time: 2024-05-16T22:30:36.206492-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "486"
|
||||
6
.changes/unreleased/Docs-20240613-151048.yaml
Normal file
6
.changes/unreleased/Docs-20240613-151048.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Docs
|
||||
body: Fix npm security vulnerabilities as of June 2024
|
||||
time: 2024-06-13T15:10:48.301989+01:00
|
||||
custom:
|
||||
Author: aranke
|
||||
Issue: "513"
|
||||
7
.changes/unreleased/Features-20240522-000309.yaml
Normal file
7
.changes/unreleased/Features-20240522-000309.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Maximally parallelize dbt clone
|
||||
in clone command"
|
||||
time: 2024-05-22T00:03:09.765977-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "7914"
|
||||
6
.changes/unreleased/Features-20240527-124405.yaml
Normal file
6
.changes/unreleased/Features-20240527-124405.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: Add --host flag to dbt docs serve, defaulting to '127.0.0.1'
|
||||
time: 2024-05-27T12:44:05.040843-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10229"
|
||||
6
.changes/unreleased/Features-20240531-150816.yaml
Normal file
6
.changes/unreleased/Features-20240531-150816.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: Update data_test to accept arbitrary config options
|
||||
time: 2024-05-31T15:08:16.431966-05:00
|
||||
custom:
|
||||
Author: McKnight-42
|
||||
Issue: "10197"
|
||||
6
.changes/unreleased/Features-20240606-112334.yaml
Normal file
6
.changes/unreleased/Features-20240606-112334.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: add pre_model and post_model hook calls to data and unit tests to be able to provide extra config options
|
||||
time: 2024-06-06T11:23:34.758675-05:00
|
||||
custom:
|
||||
Author: McKnight-42
|
||||
Issue: "10198"
|
||||
6
.changes/unreleased/Features-20240617-103948.yaml
Normal file
6
.changes/unreleased/Features-20240617-103948.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: add --empty value to jinja context as flags.EMPTY
|
||||
time: 2024-06-17T10:39:48.275801-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10317"
|
||||
6
.changes/unreleased/Features-20240625-095107.yaml
Normal file
6
.changes/unreleased/Features-20240625-095107.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: Support cumulative_type_params & sub-daily granularities in semantic manifest.
|
||||
time: 2024-06-25T09:51:07.983248-07:00
|
||||
custom:
|
||||
Author: courtneyholcomb
|
||||
Issue: "10360"
|
||||
6
.changes/unreleased/Fixes-20240113-073615.yaml
Normal file
6
.changes/unreleased/Fixes-20240113-073615.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Convert "Skipping model due to fail_fast" message to DEBUG level
|
||||
time: 2024-01-13T07:36:15.836294-00:00
|
||||
custom:
|
||||
Author: scottgigante,nevdelap
|
||||
Issue: "8774"
|
||||
7
.changes/unreleased/Fixes-20240522-182855.yaml
Normal file
7
.changes/unreleased/Fixes-20240522-182855.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: 'Fix: Order-insensitive unit test equality assertion for expected/actual with
|
||||
multiple nulls'
|
||||
time: 2024-05-22T18:28:55.91733-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10167"
|
||||
6
.changes/unreleased/Fixes-20240523-204251.yaml
Normal file
6
.changes/unreleased/Fixes-20240523-204251.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Renaming or removing a contracted model should raise a BreakingChange warning/error
|
||||
time: 2024-05-23T20:42:51.033946-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10116"
|
||||
6
.changes/unreleased/Fixes-20240524-131135.yaml
Normal file
6
.changes/unreleased/Fixes-20240524-131135.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: prefer disabled project nodes to external node
|
||||
time: 2024-05-24T13:11:35.440443-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10224"
|
||||
6
.changes/unreleased/Fixes-20240605-111652.yaml
Normal file
6
.changes/unreleased/Fixes-20240605-111652.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Fix issues with selectors and inline nodes
|
||||
time: 2024-06-05T11:16:52.187667-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: 8943 9269
|
||||
6
.changes/unreleased/Fixes-20240607-134648.yaml
Normal file
6
.changes/unreleased/Fixes-20240607-134648.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Fix snapshot config to work in yaml files
|
||||
time: 2024-06-07T13:46:48.383215-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "4000"
|
||||
6
.changes/unreleased/Fixes-20240610-132130.yaml
Normal file
6
.changes/unreleased/Fixes-20240610-132130.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Improve handling of error when loading schema file list
|
||||
time: 2024-06-10T13:21:30.963371-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "10284"
|
||||
6
.changes/unreleased/Fixes-20240612-124256.yaml
Normal file
6
.changes/unreleased/Fixes-20240612-124256.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Saved Query node fail during skip
|
||||
time: 2024-06-12T12:42:56.329073-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "10029"
|
||||
6
.changes/unreleased/Fixes-20240612-152139.yaml
Normal file
6
.changes/unreleased/Fixes-20240612-152139.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Implement state:modified for saved queries
|
||||
time: 2024-06-12T15:21:39.851426-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "10294"
|
||||
6
.changes/unreleased/Fixes-20240613-183117.yaml
Normal file
6
.changes/unreleased/Fixes-20240613-183117.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: DOn't warn on `unit_test` config paths that are properly used
|
||||
time: 2024-06-13T18:31:17.486497-07:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "10311"
|
||||
6
.changes/unreleased/Fixes-20240624-171729.yaml
Normal file
6
.changes/unreleased/Fixes-20240624-171729.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Fix setting `silence` of `warn_error_options` via `dbt_project.yaml` flags
|
||||
time: 2024-06-24T17:17:29.464865-07:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "10160"
|
||||
6
.changes/unreleased/Fixes-20240627-154448.yaml
Normal file
6
.changes/unreleased/Fixes-20240627-154448.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Limit data_tests deprecation to root_project
|
||||
time: 2024-06-27T15:44:48.579869-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "9835"
|
||||
6
.changes/unreleased/Under the Hood-20240529-102814.yaml
Normal file
6
.changes/unreleased/Under the Hood-20240529-102814.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Enable record filtering by type.
|
||||
time: 2024-05-29T10:28:14.547624-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "10240"
|
||||
6
.changes/unreleased/Under the Hood-20240618-140652.yaml
Normal file
6
.changes/unreleased/Under the Hood-20240618-140652.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Remove IntermediateSnapshotNode
|
||||
time: 2024-06-18T14:06:52.618602-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "10326"
|
||||
6
.changes/unreleased/Under the Hood-20240701-131750.yaml
Normal file
6
.changes/unreleased/Under the Hood-20240701-131750.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Additional logging for skipped ephemeral models
|
||||
time: 2024-07-01T13:17:50.827788-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "10389"
|
||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Documentation
|
||||
url: https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose
|
||||
about: Problems and issues with dbt documentation
|
||||
- name: Ask the community for help
|
||||
url: https://github.com/dbt-labs/docs.getdbt.com/discussions
|
||||
about: Need help troubleshooting? Check out our guide on how to ask
|
||||
|
||||
21
.github/workflows/release.yml
vendored
21
.github/workflows/release.yml
vendored
@@ -247,3 +247,24 @@ jobs:
|
||||
|
||||
secrets:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
|
||||
|
||||
testing-slack-notification:
|
||||
# sends notifications to #slackbot-test
|
||||
name: Testing - Slack Notification
|
||||
if: ${{ failure() && inputs.test_run && !inputs.nightly_release }}
|
||||
|
||||
needs:
|
||||
[
|
||||
bump-version-generate-changelog,
|
||||
build-test-package,
|
||||
github-release,
|
||||
pypi-release,
|
||||
docker-release,
|
||||
]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
|
||||
with:
|
||||
status: "failure"
|
||||
|
||||
secrets:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_TESTING_WEBHOOK_URL }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
[settings]
|
||||
profile=black
|
||||
extend_skip_glob=.github/*,third-party-stubs/*,scripts/*
|
||||
known_first_party=dbt,dbt_adapters,dbt_common,dbt_extractor,dbt_semantic_interface
|
||||
known_first_party=dbt,dbt_adapters,dbt_common,dbt_extractor,dbt_semantic_interfaces
|
||||
|
||||
@@ -20,10 +20,12 @@ repos:
|
||||
- "markdown"
|
||||
- id: check-case-conflict
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/psf/black
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 22.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
@@ -34,6 +36,7 @@ repos:
|
||||
- "--check"
|
||||
- "--diff"
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 4.0.1
|
||||
hooks:
|
||||
- id: flake8
|
||||
@@ -41,6 +44,7 @@ repos:
|
||||
alias: flake8-check
|
||||
stages: [manual]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: v1.4.1
|
||||
hooks:
|
||||
- id: mypy
|
||||
|
||||
26
codecov.yml
26
codecov.yml
@@ -1,6 +1,7 @@
|
||||
ignore:
|
||||
- ".github"
|
||||
- ".changes"
|
||||
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
@@ -11,3 +12,28 @@ coverage:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 80%
|
||||
|
||||
comment:
|
||||
layout: "header, diff, flags, components" # show component info in the PR comment
|
||||
|
||||
component_management:
|
||||
default_rules: # default rules that will be inherited by all components
|
||||
statuses:
|
||||
- type: project # in this case every component that doens't have a status defined will have a project type one
|
||||
target: auto
|
||||
threshold: 0.1%
|
||||
- type: patch
|
||||
target: 80%
|
||||
individual_components:
|
||||
- component_id: unittests
|
||||
name: "Unit Tests"
|
||||
flag_regexes:
|
||||
- "unit"
|
||||
statuses:
|
||||
- type: patch
|
||||
target: 80%
|
||||
threshold: 5%
|
||||
- component_id: integrationtests
|
||||
name: "Integration Tests"
|
||||
flag_regexes:
|
||||
- "integration"
|
||||
|
||||
@@ -38,6 +38,7 @@ from dbt.artifacts.resources.v1.macro import Macro, MacroArgument, MacroDependsO
|
||||
from dbt.artifacts.resources.v1.metric import (
|
||||
ConstantPropertyInput,
|
||||
ConversionTypeParams,
|
||||
CumulativeTypeParams,
|
||||
Metric,
|
||||
MetricConfig,
|
||||
MetricInput,
|
||||
|
||||
@@ -2,13 +2,6 @@ import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from dbt_semantic_interfaces.references import MeasureReference, MetricReference
|
||||
from dbt_semantic_interfaces.type_enums import (
|
||||
ConversionCalculationType,
|
||||
MetricType,
|
||||
TimeGranularity,
|
||||
)
|
||||
|
||||
from dbt.artifacts.resources.base import GraphResource
|
||||
from dbt.artifacts.resources.types import NodeType
|
||||
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
|
||||
@@ -18,6 +11,13 @@ from dbt.artifacts.resources.v1.semantic_layer_components import (
|
||||
)
|
||||
from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeBehavior
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.references import MeasureReference, MetricReference
|
||||
from dbt_semantic_interfaces.type_enums import (
|
||||
ConversionCalculationType,
|
||||
MetricType,
|
||||
PeriodAggregation,
|
||||
TimeGranularity,
|
||||
)
|
||||
|
||||
"""
|
||||
The following classes are dataclasses which are used to construct the Metric
|
||||
@@ -80,6 +80,13 @@ class ConversionTypeParams(dbtClassMixin):
|
||||
constant_properties: Optional[List[ConstantPropertyInput]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class CumulativeTypeParams(dbtClassMixin):
|
||||
window: Optional[MetricTimeWindow] = None
|
||||
grain_to_date: Optional[TimeGranularity] = None
|
||||
period_agg: PeriodAggregation = PeriodAggregation.FIRST
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricTypeParams(dbtClassMixin):
|
||||
measure: Optional[MetricInputMeasure] = None
|
||||
@@ -91,6 +98,7 @@ class MetricTypeParams(dbtClassMixin):
|
||||
grain_to_date: Optional[TimeGranularity] = None
|
||||
metrics: Optional[List[MetricInput]] = None
|
||||
conversion_type_params: Optional[ConversionTypeParams] = None
|
||||
cumulative_type_params: Optional[CumulativeTypeParams] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -4,10 +4,6 @@ import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from dbt_semantic_interfaces.type_enums.export_destination_type import (
|
||||
ExportDestinationType,
|
||||
)
|
||||
|
||||
from dbt.artifacts.resources.base import GraphResource
|
||||
from dbt.artifacts.resources.types import NodeType
|
||||
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
|
||||
@@ -17,6 +13,9 @@ from dbt.artifacts.resources.v1.semantic_layer_components import (
|
||||
)
|
||||
from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeBehavior
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.type_enums.export_destination_type import (
|
||||
ExportDestinationType,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Sequence, Tuple
|
||||
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.call_parameter_sets import FilterCallParameterSets
|
||||
from dbt_semantic_interfaces.parsing.where_filter.where_filter_parser import (
|
||||
WhereFilterParser,
|
||||
)
|
||||
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
|
||||
|
||||
@dataclass
|
||||
class WhereFilter(dbtClassMixin):
|
||||
|
||||
@@ -2,6 +2,11 @@ import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional, Sequence
|
||||
|
||||
from dbt.artifacts.resources import SourceFileMetadata
|
||||
from dbt.artifacts.resources.base import GraphResource
|
||||
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
|
||||
from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeBehavior
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.references import (
|
||||
DimensionReference,
|
||||
EntityReference,
|
||||
@@ -17,12 +22,6 @@ from dbt_semantic_interfaces.type_enums import (
|
||||
TimeGranularity,
|
||||
)
|
||||
|
||||
from dbt.artifacts.resources import SourceFileMetadata
|
||||
from dbt.artifacts.resources.base import GraphResource
|
||||
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
|
||||
from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeBehavior
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
|
||||
"""
|
||||
The classes in this file are dataclasses which are used to construct the Semantic
|
||||
Model node in dbt-core. Additionally, these classes need to at a minimum support
|
||||
|
||||
@@ -18,39 +18,35 @@ class SnapshotConfig(NodeConfig):
|
||||
# Not using Optional because of serialization issues with a Union of str and List[str]
|
||||
check_cols: Union[str, List[str], None] = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls, data):
|
||||
super().validate(data)
|
||||
# Note: currently you can't just set these keys in schema.yml because this validation
|
||||
# will fail when parsing the snapshot node.
|
||||
if not data.get("strategy") or not data.get("unique_key") or not data.get("target_schema"):
|
||||
def final_validate(self):
|
||||
if not self.strategy or not self.unique_key or not self.target_schema:
|
||||
raise ValidationError(
|
||||
"Snapshots must be configured with a 'strategy', 'unique_key', "
|
||||
"and 'target_schema'."
|
||||
)
|
||||
if data.get("strategy") == "check":
|
||||
if not data.get("check_cols"):
|
||||
if self.strategy == "check":
|
||||
if not self.check_cols:
|
||||
raise ValidationError(
|
||||
"A snapshot configured with the check strategy must "
|
||||
"specify a check_cols configuration."
|
||||
)
|
||||
if isinstance(data["check_cols"], str) and data["check_cols"] != "all":
|
||||
if isinstance(self.check_cols, str) and self.check_cols != "all":
|
||||
raise ValidationError(
|
||||
f"Invalid value for 'check_cols': {data['check_cols']}. "
|
||||
f"Invalid value for 'check_cols': {self.check_cols}. "
|
||||
"Expected 'all' or a list of strings."
|
||||
)
|
||||
elif data.get("strategy") == "timestamp":
|
||||
if not data.get("updated_at"):
|
||||
elif self.strategy == "timestamp":
|
||||
if not self.updated_at:
|
||||
raise ValidationError(
|
||||
"A snapshot configured with the timestamp strategy "
|
||||
"must specify an updated_at configuration."
|
||||
)
|
||||
if data.get("check_cols"):
|
||||
if self.check_cols:
|
||||
raise ValidationError("A 'timestamp' snapshot should not have 'check_cols'")
|
||||
# If the strategy is not 'check' or 'timestamp' it's a custom strategy,
|
||||
# formerly supported with GenericSnapshotConfig
|
||||
|
||||
if data.get("materialized") and data.get("materialized") != "snapshot":
|
||||
if self.materialized and self.materialized != "snapshot":
|
||||
raise ValidationError("A snapshot must have a materialized value of 'snapshot'")
|
||||
|
||||
# Called by "calculate_node_config_dict" in ContextConfigGenerator
|
||||
|
||||
@@ -1,2 +1,11 @@
|
||||
# alias to latest
|
||||
from dbt.artifacts.schemas.catalog.v1.catalog import * # noqa
|
||||
from dbt_common.contracts.metadata import (
|
||||
CatalogKey,
|
||||
CatalogTable,
|
||||
ColumnMap,
|
||||
ColumnMetadata,
|
||||
StatsDict,
|
||||
StatsItem,
|
||||
TableMetadata,
|
||||
)
|
||||
|
||||
@@ -1,71 +1,18 @@
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, NamedTuple, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from dbt.artifacts.schemas.base import (
|
||||
ArtifactMixin,
|
||||
BaseArtifactMetadata,
|
||||
schema_version,
|
||||
)
|
||||
from dbt_common.contracts.metadata import CatalogTable
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_common.utils.formatting import lowercase
|
||||
|
||||
Primitive = Union[bool, str, float, None]
|
||||
PrimitiveDict = Dict[str, Primitive]
|
||||
|
||||
CatalogKey = NamedTuple(
|
||||
"CatalogKey", [("database", Optional[str]), ("schema", str), ("name", str)]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StatsItem(dbtClassMixin):
|
||||
id: str
|
||||
label: str
|
||||
value: Primitive
|
||||
include: bool
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
StatsDict = Dict[str, StatsItem]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ColumnMetadata(dbtClassMixin):
|
||||
type: str
|
||||
index: int
|
||||
name: str
|
||||
comment: Optional[str] = None
|
||||
|
||||
|
||||
ColumnMap = Dict[str, ColumnMetadata]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TableMetadata(dbtClassMixin):
|
||||
type: str
|
||||
schema: str
|
||||
name: str
|
||||
database: Optional[str] = None
|
||||
comment: Optional[str] = None
|
||||
owner: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class CatalogTable(dbtClassMixin):
|
||||
metadata: TableMetadata
|
||||
columns: ColumnMap
|
||||
stats: StatsDict
|
||||
# the same table with two unique IDs will just be listed two times
|
||||
unique_id: Optional[str] = None
|
||||
|
||||
def key(self) -> CatalogKey:
|
||||
return CatalogKey(
|
||||
lowercase(self.metadata.database),
|
||||
self.metadata.schema.lower(),
|
||||
self.metadata.name.lower(),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CatalogMetadata(BaseArtifactMetadata):
|
||||
|
||||
@@ -57,6 +57,7 @@ def convert_config(config_name, config_value):
|
||||
ret = WarnErrorOptions(
|
||||
include=config_value.get("include", []),
|
||||
exclude=config_value.get("exclude", []),
|
||||
silence=config_value.get("silence", []),
|
||||
valid_error_names=ALL_EVENT_NAMES,
|
||||
)
|
||||
return ret
|
||||
@@ -289,6 +290,10 @@ class Flags:
|
||||
params_assigned_from_default, ["WARN_ERROR", "WARN_ERROR_OPTIONS"]
|
||||
)
|
||||
|
||||
# Handle arguments mutually exclusive with INLINE
|
||||
self._assert_mutually_exclusive(params_assigned_from_default, ["SELECT", "INLINE"])
|
||||
self._assert_mutually_exclusive(params_assigned_from_default, ["SELECTOR", "INLINE"])
|
||||
|
||||
# Support lower cased access for legacy code.
|
||||
params = set(
|
||||
x for x in dir(self) if not callable(getattr(self, x)) and not x.startswith("__")
|
||||
@@ -315,7 +320,9 @@ class Flags:
|
||||
"""
|
||||
set_flag = None
|
||||
for flag in group:
|
||||
flag_set_by_user = flag.lower() not in params_assigned_from_default
|
||||
flag_set_by_user = (
|
||||
hasattr(self, flag) and flag.lower() not in params_assigned_from_default
|
||||
)
|
||||
if flag_set_by_user and set_flag:
|
||||
raise DbtUsageException(
|
||||
f"{flag.lower()}: not allowed with argument {set_flag.lower()}"
|
||||
|
||||
@@ -274,6 +274,7 @@ def docs_generate(ctx, **kwargs):
|
||||
@click.pass_context
|
||||
@global_flags
|
||||
@p.browser
|
||||
@p.host
|
||||
@p.port
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
|
||||
@@ -135,6 +135,14 @@ full_refresh = click.option(
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
host = click.option(
|
||||
"--host",
|
||||
envvar="DBT_HOST",
|
||||
help="host to serve dbt docs on",
|
||||
type=click.STRING,
|
||||
default="127.0.0.1",
|
||||
)
|
||||
|
||||
indirect_selection = click.option(
|
||||
"--indirect-selection",
|
||||
envvar="DBT_INDIRECT_SELECTION",
|
||||
|
||||
@@ -41,7 +41,13 @@ from dbt_common.events.functions import LOG_VERSION, fire_event
|
||||
from dbt_common.events.helpers import get_json_string_utcnow
|
||||
from dbt_common.exceptions import DbtBaseException as DbtException
|
||||
from dbt_common.invocation import reset_invocation_id
|
||||
from dbt_common.record import Recorder, RecorderMode, get_record_mode_from_env
|
||||
from dbt_common.record import (
|
||||
Recorder,
|
||||
RecorderMode,
|
||||
get_record_mode_from_env,
|
||||
get_record_types_from_dict,
|
||||
get_record_types_from_env,
|
||||
)
|
||||
from dbt_common.utils import cast_dict_to_dict_of_strings
|
||||
|
||||
|
||||
@@ -101,13 +107,23 @@ def preflight(func):
|
||||
|
||||
def setup_record_replay():
|
||||
rec_mode = get_record_mode_from_env()
|
||||
rec_types = get_record_types_from_env()
|
||||
|
||||
recorder: Optional[Recorder] = None
|
||||
if rec_mode == RecorderMode.REPLAY:
|
||||
recording_path = os.environ["DBT_REPLAY"]
|
||||
recorder = Recorder(RecorderMode.REPLAY, recording_path)
|
||||
previous_recording_path = os.environ.get("DBT_RECORDER_FILE_PATH")
|
||||
recorder = Recorder(
|
||||
RecorderMode.REPLAY, types=rec_types, previous_recording_path=previous_recording_path
|
||||
)
|
||||
elif rec_mode == RecorderMode.DIFF:
|
||||
previous_recording_path = os.environ.get("DBT_RECORDER_FILE_PATH")
|
||||
# ensure types match the previous recording
|
||||
types = get_record_types_from_dict(previous_recording_path)
|
||||
recorder = Recorder(
|
||||
RecorderMode.DIFF, types=types, previous_recording_path=previous_recording_path
|
||||
)
|
||||
elif rec_mode == RecorderMode.RECORD:
|
||||
recorder = Recorder(RecorderMode.RECORD)
|
||||
recorder = Recorder(RecorderMode.RECORD, types=rec_types)
|
||||
|
||||
get_invocation_context().recorder = recorder
|
||||
|
||||
@@ -116,7 +132,10 @@ def tear_down_record_replay():
|
||||
recorder = get_invocation_context().recorder
|
||||
if recorder is not None:
|
||||
if recorder.mode == RecorderMode.RECORD:
|
||||
recorder.write("recording.json")
|
||||
recorder.write()
|
||||
if recorder.mode == RecorderMode.DIFF:
|
||||
recorder.write()
|
||||
recorder.write_diffs(diff_file_name="recording_diffs.json")
|
||||
elif recorder.mode == RecorderMode.REPLAY:
|
||||
recorder.write_diffs("replay_diffs.json")
|
||||
|
||||
|
||||
@@ -15,5 +15,6 @@ DEPENDENCIES_FILE_NAME = "dependencies.yml"
|
||||
PACKAGE_LOCK_FILE_NAME = "package-lock.yml"
|
||||
MANIFEST_FILE_NAME = "manifest.json"
|
||||
SEMANTIC_MANIFEST_FILE_NAME = "semantic_manifest.json"
|
||||
TIME_SPINE_MODEL_NAME = "metricflow_time_spine"
|
||||
PARTIAL_PARSE_FILE_NAME = "partial_parse.msgpack"
|
||||
PACKAGE_LOCK_HASH_KEY = "sha1_hash"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
## Artifacts
|
||||
|
||||
### Generating JSON schemas
|
||||
A helper script, `sripts/collect-artifact-schema.py` is available to generate json schemas corresponding to versioned artifacts (`ArtifactMixin`s).
|
||||
A helper script, `scripts/collect-artifact-schema.py` is available to generate json schemas corresponding to versioned artifacts (`ArtifactMixin`s).
|
||||
|
||||
This script is necessary to run when a new artifact schema version is created, or when changes are made to existing artifact versions, and writes json schema to `schema/dbt/<artifact>/v<version>.json`.
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ from dbt.adapters.exceptions import (
|
||||
DuplicateMacroInPackageError,
|
||||
DuplicateMaterializationNameError,
|
||||
)
|
||||
from dbt.adapters.factory import get_adapter_package_names
|
||||
|
||||
# to preserve import paths
|
||||
from dbt.artifacts.resources import BaseResource, DeferRelation, NodeVersion
|
||||
@@ -720,9 +721,6 @@ class MacroMethods:
|
||||
filter: Optional[Callable[[MacroCandidate], bool]] = None,
|
||||
) -> CandidateList:
|
||||
"""Find macros by their name."""
|
||||
# avoid an import cycle
|
||||
from dbt.adapters.factory import get_adapter_package_names
|
||||
|
||||
candidates: CandidateList = CandidateList()
|
||||
|
||||
macros_by_name = self.get_macros_by_name()
|
||||
@@ -988,6 +986,7 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
self.metrics.values(),
|
||||
self.semantic_models.values(),
|
||||
self.saved_queries.values(),
|
||||
self.unit_tests.values(),
|
||||
)
|
||||
for resource in all_resources:
|
||||
resource_type_plural = resource.resource_type.pluralize()
|
||||
@@ -1094,6 +1093,7 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
metrics=cls._map_resources_to_map_nodes(writable_manifest.metrics),
|
||||
groups=cls._map_resources_to_map_nodes(writable_manifest.groups),
|
||||
semantic_models=cls._map_resources_to_map_nodes(writable_manifest.semantic_models),
|
||||
saved_queries=cls._map_resources_to_map_nodes(writable_manifest.saved_queries),
|
||||
selectors={
|
||||
selector_id: selector
|
||||
for selector_id, selector in writable_manifest.selectors.items()
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from typing import Any, Dict, Iterator, List
|
||||
|
||||
from dbt_semantic_interfaces.type_enums import MetricType
|
||||
|
||||
from dbt.contracts.graph.manifest import Manifest, Metric
|
||||
from dbt_semantic_interfaces.type_enums import MetricType
|
||||
|
||||
DERIVED_METRICS = [MetricType.DERIVED, MetricType.RATIO]
|
||||
BASE_METRICS = [MetricType.SIMPLE, MetricType.CUMULATIVE, MetricType.CONVERSION]
|
||||
|
||||
@@ -39,12 +39,6 @@ class UnitTestNodeConfig(NodeConfig):
|
||||
expected_sql: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmptySnapshotConfig(NodeConfig):
|
||||
materialized: str = "snapshot"
|
||||
unique_key: Optional[str] = None # override NodeConfig unique_key definition
|
||||
|
||||
|
||||
RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {
|
||||
NodeType.Metric: MetricConfig,
|
||||
NodeType.SemanticModel: SemanticModelConfig,
|
||||
@@ -62,7 +56,6 @@ RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {
|
||||
# base resource types are like resource types, except nothing has mandatory
|
||||
# configs.
|
||||
BASE_RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = RESOURCE_TYPES.copy()
|
||||
BASE_RESOURCE_TYPES.update({NodeType.Snapshot: EmptySnapshotConfig})
|
||||
|
||||
|
||||
def get_config_for(resource_type: NodeType, base=False) -> Type[BaseConfig]:
|
||||
|
||||
@@ -19,6 +19,8 @@ from typing import (
|
||||
from mashumaro.types import SerializableType
|
||||
|
||||
from dbt import deprecations
|
||||
from dbt.adapters.base import ConstraintSupport
|
||||
from dbt.adapters.factory import get_adapter_constraint_support
|
||||
from dbt.artifacts.resources import Analysis as AnalysisResource
|
||||
from dbt.artifacts.resources import (
|
||||
BaseResource,
|
||||
@@ -57,7 +59,7 @@ from dbt.artifacts.resources import Snapshot as SnapshotResource
|
||||
from dbt.artifacts.resources import SourceDefinition as SourceDefinitionResource
|
||||
from dbt.artifacts.resources import SqlOperation as SqlOperationResource
|
||||
from dbt.artifacts.resources import UnitTestDefinition as UnitTestDefinitionResource
|
||||
from dbt.contracts.graph.model_config import EmptySnapshotConfig, UnitTestNodeConfig
|
||||
from dbt.contracts.graph.model_config import UnitTestNodeConfig
|
||||
from dbt.contracts.graph.node_args import ModelNodeArgs
|
||||
from dbt.contracts.graph.unparsed import (
|
||||
HasYamlMetadata,
|
||||
@@ -469,6 +471,13 @@ class ModelNode(ModelResource, CompiledNode):
|
||||
def is_latest_version(self) -> bool:
|
||||
return self.version is not None and self.version == self.latest_version
|
||||
|
||||
@property
|
||||
def is_past_deprecation_date(self) -> bool:
|
||||
return (
|
||||
self.deprecation_date is not None
|
||||
and self.deprecation_date < datetime.now().astimezone()
|
||||
)
|
||||
|
||||
@property
|
||||
def search_name(self):
|
||||
if self.version is None:
|
||||
@@ -570,6 +579,42 @@ class ModelNode(ModelResource, CompiledNode):
|
||||
data = contract_state.encode("utf-8")
|
||||
self.contract.checksum = hashlib.new("sha256", data).hexdigest()
|
||||
|
||||
def same_contract_removed(self) -> bool:
|
||||
"""
|
||||
self: the removed (deleted, renamed, or disabled) model node
|
||||
"""
|
||||
# If the contract wasn't previously enforced, no contract change has occurred
|
||||
if self.contract.enforced is False:
|
||||
return True
|
||||
|
||||
# Removed node is past its deprecation_date, so deletion does not constitute a contract change
|
||||
if self.is_past_deprecation_date:
|
||||
return True
|
||||
|
||||
# Disabled, deleted, or renamed node with previously enforced contract.
|
||||
if not self.config.enabled:
|
||||
breaking_change = f"Contracted model '{self.unique_id}' was disabled."
|
||||
else:
|
||||
breaking_change = f"Contracted model '{self.unique_id}' was deleted or renamed."
|
||||
|
||||
if self.version is None:
|
||||
warn_or_error(
|
||||
UnversionedBreakingChange(
|
||||
breaking_changes=[breaking_change],
|
||||
model_name=self.name,
|
||||
model_file_path=self.original_file_path,
|
||||
),
|
||||
node=self,
|
||||
)
|
||||
return False
|
||||
else:
|
||||
raise (
|
||||
ContractBreakingChangeError(
|
||||
breaking_changes=[breaking_change],
|
||||
node=self,
|
||||
)
|
||||
)
|
||||
|
||||
def same_contract(self, old, adapter_type=None) -> bool:
|
||||
# If the contract wasn't previously enforced:
|
||||
if old.contract.enforced is False and self.contract.enforced is False:
|
||||
@@ -601,10 +646,6 @@ class ModelNode(ModelResource, CompiledNode):
|
||||
# Breaking change: the contract was previously enforced, and it no longer is
|
||||
contract_enforced_disabled = True
|
||||
|
||||
# TODO: this avoid the circular imports but isn't ideal
|
||||
from dbt.adapters.base import ConstraintSupport
|
||||
from dbt.adapters.factory import get_adapter_constraint_support
|
||||
|
||||
constraint_support = get_adapter_constraint_support(adapter_type)
|
||||
column_constraints_exist = False
|
||||
|
||||
@@ -1000,19 +1041,6 @@ class UnitTestFileFixture(BaseNode):
|
||||
# ====================================
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntermediateSnapshotNode(CompiledNode):
|
||||
# at an intermediate stage in parsing, where we've built something better
|
||||
# than an unparsed node for rendering in parse mode, it's pretty possible
|
||||
# that we won't have critical snapshot-related information that is only
|
||||
# defined in config blocks. To fix that, we have an intermediate type that
|
||||
# uses a regular node config, which the snapshot parser will then convert
|
||||
# into a full ParsedSnapshotNode after rendering. Note: it currently does
|
||||
# not work to set snapshot config in schema files because of the validation.
|
||||
resource_type: Literal[NodeType.Snapshot]
|
||||
config: EmptySnapshotConfig = field(default_factory=EmptySnapshotConfig)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SnapshotNode(SnapshotResource, CompiledNode):
|
||||
@classmethod
|
||||
@@ -1092,7 +1120,7 @@ class UnpatchedSourceDefinition(BaseNode):
|
||||
def get_source_representation(self):
|
||||
return f'source("{self.source.name}", "{self.table.name}")'
|
||||
|
||||
def validate_data_tests(self):
|
||||
def validate_data_tests(self, is_root_project: bool):
|
||||
"""
|
||||
sources parse tests differently than models, so we need to do some validation
|
||||
here where it's done in the PatchParser for other nodes
|
||||
@@ -1103,11 +1131,12 @@ class UnpatchedSourceDefinition(BaseNode):
|
||||
"Invalid test config: cannot have both 'tests' and 'data_tests' defined"
|
||||
)
|
||||
if self.tests:
|
||||
deprecations.warn(
|
||||
"project-test-config",
|
||||
deprecated_path="tests",
|
||||
exp_path="data_tests",
|
||||
)
|
||||
if is_root_project:
|
||||
deprecations.warn(
|
||||
"project-test-config",
|
||||
deprecated_path="tests",
|
||||
exp_path="data_tests",
|
||||
)
|
||||
self.data_tests.extend(self.tests)
|
||||
self.tests.clear()
|
||||
|
||||
@@ -1118,11 +1147,12 @@ class UnpatchedSourceDefinition(BaseNode):
|
||||
"Invalid test config: cannot have both 'tests' and 'data_tests' defined"
|
||||
)
|
||||
if column.tests:
|
||||
deprecations.warn(
|
||||
"project-test-config",
|
||||
deprecated_path="tests",
|
||||
exp_path="data_tests",
|
||||
)
|
||||
if is_root_project:
|
||||
deprecations.warn(
|
||||
"project-test-config",
|
||||
deprecated_path="tests",
|
||||
exp_path="data_tests",
|
||||
)
|
||||
column.data_tests.extend(column.tests)
|
||||
column.tests.clear()
|
||||
|
||||
@@ -1140,7 +1170,6 @@ class UnpatchedSourceDefinition(BaseNode):
|
||||
return [] if self.table.columns is None else self.table.columns
|
||||
|
||||
def get_tests(self) -> Iterator[Tuple[Dict[str, Any], Optional[UnparsedColumn]]]:
|
||||
self.validate_data_tests()
|
||||
for data_test in self.data_tests:
|
||||
yield normalize_test(data_test), None
|
||||
|
||||
@@ -1521,7 +1550,6 @@ class SavedQuery(NodeInfoMixin, GraphNode, SavedQueryResource):
|
||||
return self.group == old.group
|
||||
|
||||
def same_exports(self, old: "SavedQuery") -> bool:
|
||||
# TODO: This isn't currently used in `same_contents` (nor called anywhere else)
|
||||
if len(self.exports) != len(old.exports):
|
||||
return False
|
||||
|
||||
@@ -1551,6 +1579,7 @@ class SavedQuery(NodeInfoMixin, GraphNode, SavedQueryResource):
|
||||
and self.same_label(old)
|
||||
and self.same_config(old)
|
||||
and self.same_group(old)
|
||||
and self.same_exports(old)
|
||||
and True
|
||||
)
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
from dbt.constants import TIME_SPINE_MODEL_NAME
|
||||
from dbt.events.types import SemanticValidationFailure
|
||||
from dbt.exceptions import ParsingError
|
||||
from dbt_common.clients.system import write_file
|
||||
from dbt_common.events.base_types import EventLevel
|
||||
from dbt_common.events.functions import fire_event
|
||||
from dbt_semantic_interfaces.implementations.metric import PydanticMetric
|
||||
from dbt_semantic_interfaces.implementations.project_configuration import (
|
||||
PydanticProjectConfiguration,
|
||||
@@ -15,12 +21,6 @@ from dbt_semantic_interfaces.validations.semantic_manifest_validator import (
|
||||
SemanticManifestValidator,
|
||||
)
|
||||
|
||||
from dbt.events.types import SemanticValidationFailure
|
||||
from dbt.exceptions import ParsingError
|
||||
from dbt_common.clients.system import write_file
|
||||
from dbt_common.events.base_types import EventLevel
|
||||
from dbt_common.events.functions import fire_event
|
||||
|
||||
|
||||
class SemanticManifest:
|
||||
def __init__(self, manifest) -> None:
|
||||
@@ -82,8 +82,7 @@ class SemanticManifest:
|
||||
# Look for time-spine table model and create time spine table configuration
|
||||
if self.manifest.semantic_models:
|
||||
# Get model for time_spine_table
|
||||
time_spine_model_name = "metricflow_time_spine"
|
||||
model = self.manifest.ref_lookup.find(time_spine_model_name, None, None, self.manifest)
|
||||
model = self.manifest.ref_lookup.find(TIME_SPINE_MODEL_NAME, None, None, self.manifest)
|
||||
if not model:
|
||||
raise ParsingError(
|
||||
"The semantic layer requires a 'metricflow_time_spine' model in the project, but none was found. "
|
||||
|
||||
@@ -4,8 +4,6 @@ from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
|
||||
|
||||
from dbt_semantic_interfaces.type_enums import ConversionCalculationType
|
||||
|
||||
# trigger the PathEncoder
|
||||
import dbt_common.helper_types # noqa:F401
|
||||
from dbt import deprecations
|
||||
@@ -39,6 +37,10 @@ from dbt_common.dataclass_schema import (
|
||||
dbtClassMixin,
|
||||
)
|
||||
from dbt_common.exceptions import DbtInternalError
|
||||
from dbt_semantic_interfaces.type_enums import (
|
||||
ConversionCalculationType,
|
||||
PeriodAggregation,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -532,6 +534,13 @@ class UnparsedConversionTypeParams(dbtClassMixin):
|
||||
constant_properties: Optional[List[ConstantPropertyInput]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnparsedCumulativeTypeParams(dbtClassMixin):
|
||||
window: Optional[str] = None
|
||||
grain_to_date: Optional[str] = None
|
||||
period_agg: str = PeriodAggregation.FIRST.value
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnparsedMetricTypeParams(dbtClassMixin):
|
||||
measure: Optional[Union[UnparsedMetricInputMeasure, str]] = None
|
||||
@@ -542,6 +551,7 @@ class UnparsedMetricTypeParams(dbtClassMixin):
|
||||
grain_to_date: Optional[str] = None # str is really a TimeGranularity Enum
|
||||
metrics: Optional[List[Union[UnparsedMetricInput, str]]] = None
|
||||
conversion_type_params: Optional[UnparsedConversionTypeParams] = None
|
||||
cumulative_type_params: Optional[UnparsedCumulativeTypeParams] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -1903,7 +1903,19 @@ message EndOfRunSummaryMsg {
|
||||
EndOfRunSummary data = 2;
|
||||
}
|
||||
|
||||
// Skipped Z031, Z032, Z033
|
||||
// Skipped Z031, Z032
|
||||
|
||||
// Z033
|
||||
message MarkSkippedChildren {
|
||||
string unique_id = 1;
|
||||
string status = 2;
|
||||
RunResultMsg run_result = 3;
|
||||
}
|
||||
|
||||
message MarkSkippedChildrenMsg {
|
||||
CoreEventInfo info = 1;
|
||||
MarkSkippedChildren data = 2;
|
||||
}
|
||||
|
||||
// Z034
|
||||
message LogSkipBecauseError {
|
||||
@@ -1911,6 +1923,7 @@ message LogSkipBecauseError {
|
||||
string relation = 2;
|
||||
int32 index = 3;
|
||||
int32 total = 4;
|
||||
string status = 5;
|
||||
}
|
||||
|
||||
message LogSkipBecauseErrorMsg {
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1856,7 +1856,21 @@ class EndOfRunSummary(InfoLevel):
|
||||
return message
|
||||
|
||||
|
||||
# Skipped Z031, Z032, Z033
|
||||
# Skipped Z031, Z032
|
||||
|
||||
|
||||
class MarkSkippedChildren(DebugLevel):
|
||||
def code(self) -> str:
|
||||
return "Z033"
|
||||
|
||||
def message(self) -> str:
|
||||
msg = (
|
||||
f"Marking all children of '{self.unique_id}' to be skipped "
|
||||
f"because of status '{self.status}'. "
|
||||
)
|
||||
if self.run_result.message:
|
||||
msg = msg + f" Reason: {self.run_result.message}."
|
||||
return msg
|
||||
|
||||
|
||||
class LogSkipBecauseError(ErrorLevel):
|
||||
@@ -1864,7 +1878,7 @@ class LogSkipBecauseError(ErrorLevel):
|
||||
return "Z034"
|
||||
|
||||
def message(self) -> str:
|
||||
msg = f"SKIP relation {self.schema}.{self.relation} due to ephemeral model error"
|
||||
msg = f"SKIP relation {self.schema}.{self.relation} due to ephemeral model status '{self.status}'"
|
||||
return format_fancy_output_line(
|
||||
msg=msg, status=red("ERROR SKIP"), index=self.index, total=self.total
|
||||
)
|
||||
|
||||
@@ -68,6 +68,7 @@ def get_flag_dict():
|
||||
"target_path",
|
||||
"log_path",
|
||||
"invocation_command",
|
||||
"empty",
|
||||
}
|
||||
return {key: getattr(GLOBAL_FLAGS, key.upper(), None) for key in flag_attr}
|
||||
|
||||
|
||||
@@ -25,8 +25,15 @@ class GraphQueue:
|
||||
the same time, as there is an unlocked race!
|
||||
"""
|
||||
|
||||
def __init__(self, graph: nx.DiGraph, manifest: Manifest, selected: Set[UniqueId]) -> None:
|
||||
self.graph = graph
|
||||
def __init__(
|
||||
self,
|
||||
graph: nx.DiGraph,
|
||||
manifest: Manifest,
|
||||
selected: Set[UniqueId],
|
||||
preserve_edges: bool = True,
|
||||
) -> None:
|
||||
# 'create_empty_copy' returns a copy of the graph G with all of the edges removed, and leaves nodes intact.
|
||||
self.graph = graph if preserve_edges else nx.classes.function.create_empty_copy(graph)
|
||||
self.manifest = manifest
|
||||
self._selected = selected
|
||||
# store the queue as a priority queue.
|
||||
|
||||
@@ -319,7 +319,7 @@ class NodeSelector(MethodManager):
|
||||
|
||||
return filtered_nodes
|
||||
|
||||
def get_graph_queue(self, spec: SelectionSpec) -> GraphQueue:
|
||||
def get_graph_queue(self, spec: SelectionSpec, preserve_edges: bool = True) -> GraphQueue:
|
||||
"""Returns a queue over nodes in the graph that tracks progress of
|
||||
dependecies.
|
||||
"""
|
||||
@@ -330,7 +330,7 @@ class NodeSelector(MethodManager):
|
||||
# Construct a new graph using the selected_nodes
|
||||
new_graph = self.full_graph.get_subset_graph(selected_nodes)
|
||||
# should we give a way here for consumers to mutate the graph?
|
||||
return GraphQueue(new_graph.graph, self.manifest, selected_nodes)
|
||||
return GraphQueue(new_graph.graph, self.manifest, selected_nodes, preserve_edges)
|
||||
|
||||
|
||||
class ResourceTypeSelector(NodeSelector):
|
||||
|
||||
@@ -109,7 +109,7 @@ def is_selected_node(fqn: List[str], node_selector: str, is_versioned: bool) ->
|
||||
|
||||
|
||||
SelectorTarget = Union[
|
||||
SourceDefinition, ManifestNode, Exposure, Metric, SemanticModel, UnitTestDefinition
|
||||
SourceDefinition, ManifestNode, Exposure, Metric, SemanticModel, UnitTestDefinition, SavedQuery
|
||||
]
|
||||
|
||||
|
||||
@@ -202,6 +202,7 @@ class SelectorMethod(metaclass=abc.ABCMeta):
|
||||
self.metric_nodes(included_nodes),
|
||||
self.unit_tests(included_nodes),
|
||||
self.semantic_model_nodes(included_nodes),
|
||||
self.saved_query_nodes(included_nodes),
|
||||
)
|
||||
|
||||
def configurable_nodes(
|
||||
@@ -680,7 +681,8 @@ class StateSelectorMethod(SelectorMethod):
|
||||
self, old: Optional[SelectorTarget], new: SelectorTarget, adapter_type: str
|
||||
) -> bool:
|
||||
if isinstance(
|
||||
new, (SourceDefinition, Exposure, Metric, SemanticModel, UnitTestDefinition)
|
||||
new,
|
||||
(SourceDefinition, Exposure, Metric, SemanticModel, UnitTestDefinition, SavedQuery),
|
||||
):
|
||||
# these all overwrite `same_contents`
|
||||
different_contents = not new.same_contents(old) # type: ignore
|
||||
@@ -719,7 +721,9 @@ class StateSelectorMethod(SelectorMethod):
|
||||
) -> Callable[[Optional[SelectorTarget], SelectorTarget], bool]:
|
||||
# get a function that compares two selector target based on compare method provided
|
||||
def check_modified_contract(old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
|
||||
if hasattr(new, compare_method):
|
||||
if new is None and hasattr(old, compare_method + "_removed"):
|
||||
return getattr(old, compare_method + "_removed")()
|
||||
elif hasattr(new, compare_method):
|
||||
# when old body does not exist or old and new are not the same
|
||||
return not old or not getattr(new, compare_method)(old, adapter_type) # type: ignore
|
||||
else:
|
||||
@@ -773,6 +777,8 @@ class StateSelectorMethod(SelectorMethod):
|
||||
previous_node = SemanticModel.from_resource(manifest.semantic_models[unique_id])
|
||||
elif unique_id in manifest.unit_tests:
|
||||
previous_node = UnitTestDefinition.from_resource(manifest.unit_tests[unique_id])
|
||||
elif unique_id in manifest.saved_queries:
|
||||
previous_node = SavedQuery.from_resource(manifest.saved_queries[unique_id])
|
||||
|
||||
keyword_args = {}
|
||||
if checker.__name__ in [
|
||||
@@ -785,6 +791,22 @@ class StateSelectorMethod(SelectorMethod):
|
||||
if checker(previous_node, node, **keyword_args): # type: ignore
|
||||
yield unique_id
|
||||
|
||||
# checkers that can handle removed nodes
|
||||
if checker.__name__ in ["check_modified_contract"]:
|
||||
# ignore included_nodes, since those cannot contain removed nodes
|
||||
for previous_unique_id, previous_node in manifest.nodes.items():
|
||||
# detect removed (deleted, renamed, or disabled) nodes
|
||||
removed_node = None
|
||||
if previous_unique_id in self.manifest.disabled.keys():
|
||||
removed_node = self.manifest.disabled[previous_unique_id][0]
|
||||
elif previous_unique_id not in self.manifest.nodes.keys():
|
||||
removed_node = previous_node
|
||||
|
||||
if removed_node:
|
||||
# do not yield -- removed nodes should never be selected for downstream execution
|
||||
# as they are not part of the current project's manifest.nodes
|
||||
checker(removed_node, None, **keyword_args) # type: ignore
|
||||
|
||||
|
||||
class ResultSelectorMethod(SelectorMethod):
|
||||
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
|
||||
|
||||
@@ -32,7 +32,6 @@ from dbt_common.dataclass_schema import ValidationError
|
||||
FinalValue = TypeVar("FinalValue", bound=BaseNode)
|
||||
IntermediateValue = TypeVar("IntermediateValue", bound=BaseNode)
|
||||
|
||||
IntermediateNode = TypeVar("IntermediateNode", bound=Any)
|
||||
FinalNode = TypeVar("FinalNode", bound=ManifestNode)
|
||||
|
||||
|
||||
@@ -118,7 +117,7 @@ class RelationUpdate:
|
||||
|
||||
class ConfiguredParser(
|
||||
Parser[FinalNode],
|
||||
Generic[ConfiguredBlockType, IntermediateNode, FinalNode],
|
||||
Generic[ConfiguredBlockType, FinalNode],
|
||||
):
|
||||
def __init__(
|
||||
self,
|
||||
@@ -144,7 +143,7 @@ class ConfiguredParser(
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def parse_from_dict(self, dict, validate=True) -> IntermediateNode:
|
||||
def parse_from_dict(self, dict, validate=True) -> FinalNode:
|
||||
pass
|
||||
|
||||
@abc.abstractproperty
|
||||
@@ -208,7 +207,7 @@ class ConfiguredParser(
|
||||
fqn: List[str],
|
||||
name=None,
|
||||
**kwargs,
|
||||
) -> IntermediateNode:
|
||||
) -> FinalNode:
|
||||
"""Create the node that will be passed in to the parser context for
|
||||
"rendering". Some information may be partial, as it'll be updated by
|
||||
config() and any ref()/source() calls discovered during rendering.
|
||||
@@ -253,10 +252,10 @@ class ConfiguredParser(
|
||||
)
|
||||
raise DictParseError(exc, node=node)
|
||||
|
||||
def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]:
|
||||
def _context_for(self, parsed_node: FinalNode, config: ContextConfig) -> Dict[str, Any]:
|
||||
return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config)
|
||||
|
||||
def render_with_context(self, parsed_node: IntermediateNode, config: ContextConfig):
|
||||
def render_with_context(self, parsed_node: FinalNode, config: ContextConfig):
|
||||
# Given the parsed node and a ContextConfig to use during parsing,
|
||||
# render the node's sql with macro capture enabled.
|
||||
# Note: this mutates the config object when config calls are rendered.
|
||||
@@ -271,7 +270,7 @@ class ConfiguredParser(
|
||||
# updating the config with new config passed in, then re-creating the
|
||||
# config from the dict in the node.
|
||||
def update_parsed_node_config_dict(
|
||||
self, parsed_node: IntermediateNode, config_dict: Dict[str, Any]
|
||||
self, parsed_node: FinalNode, config_dict: Dict[str, Any]
|
||||
) -> None:
|
||||
# Overwrite node config
|
||||
final_config_dict = parsed_node.config.to_dict(omit_none=True)
|
||||
@@ -281,7 +280,7 @@ class ConfiguredParser(
|
||||
parsed_node.config = parsed_node.config.from_dict(final_config_dict)
|
||||
|
||||
def update_parsed_node_relation_names(
|
||||
self, parsed_node: IntermediateNode, config_dict: Dict[str, Any]
|
||||
self, parsed_node: FinalNode, config_dict: Dict[str, Any]
|
||||
) -> None:
|
||||
|
||||
# These call the RelationUpdate callable to go through generate_name macros
|
||||
@@ -300,7 +299,7 @@ class ConfiguredParser(
|
||||
|
||||
def update_parsed_node_config(
|
||||
self,
|
||||
parsed_node: IntermediateNode,
|
||||
parsed_node: FinalNode,
|
||||
config: ContextConfig,
|
||||
context=None,
|
||||
patch_config_dict=None,
|
||||
@@ -334,6 +333,7 @@ class ConfiguredParser(
|
||||
# If we have access in the config, copy to node level
|
||||
if parsed_node.resource_type == NodeType.Model and config_dict.get("access", None):
|
||||
if AccessType.is_valid(config_dict["access"]):
|
||||
assert hasattr(parsed_node, "access")
|
||||
parsed_node.access = AccessType(config_dict["access"])
|
||||
else:
|
||||
raise InvalidAccessTypeError(
|
||||
@@ -360,7 +360,9 @@ class ConfiguredParser(
|
||||
if "contract" in config_dict and config_dict["contract"]:
|
||||
contract_dct = config_dict["contract"]
|
||||
Contract.validate(contract_dct)
|
||||
parsed_node.contract = Contract.from_dict(contract_dct)
|
||||
# Seed node has contract config (from NodeConfig) but no contract in SeedNode
|
||||
if hasattr(parsed_node, "contract"):
|
||||
parsed_node.contract = Contract.from_dict(contract_dct)
|
||||
|
||||
# unrendered_config is used to compare the original database/schema/alias
|
||||
# values and to handle 'same_config' and 'same_contents' calls
|
||||
@@ -382,6 +384,7 @@ class ConfiguredParser(
|
||||
|
||||
# at this point, we've collected our hooks. Use the node context to
|
||||
# render each hook and collect refs/sources
|
||||
assert hasattr(parsed_node.config, "pre_hook") and hasattr(parsed_node.config, "post_hook")
|
||||
hooks = list(itertools.chain(parsed_node.config.pre_hook, parsed_node.config.post_hook))
|
||||
# skip context rebuilding if there aren't any hooks
|
||||
if not hooks:
|
||||
@@ -413,7 +416,7 @@ class ConfiguredParser(
|
||||
self._mangle_hooks(config_dict)
|
||||
return config_dict
|
||||
|
||||
def render_update(self, node: IntermediateNode, config: ContextConfig) -> None:
|
||||
def render_update(self, node: FinalNode, config: ContextConfig) -> None:
|
||||
try:
|
||||
context = self.render_with_context(node, config)
|
||||
self.update_parsed_node_config(node, config, context=context)
|
||||
@@ -462,25 +465,23 @@ class ConfiguredParser(
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def transform(self, node: IntermediateNode) -> FinalNode:
|
||||
def transform(self, node: FinalNode) -> FinalNode:
|
||||
pass
|
||||
|
||||
|
||||
class SimpleParser(
|
||||
ConfiguredParser[ConfiguredBlockType, FinalNode, FinalNode],
|
||||
ConfiguredParser[ConfiguredBlockType, FinalNode],
|
||||
Generic[ConfiguredBlockType, FinalNode],
|
||||
):
|
||||
def transform(self, node):
|
||||
return node
|
||||
|
||||
|
||||
class SQLParser(
|
||||
ConfiguredParser[FileBlock, IntermediateNode, FinalNode], Generic[IntermediateNode, FinalNode]
|
||||
):
|
||||
class SQLParser(ConfiguredParser[FileBlock, FinalNode], Generic[FinalNode]):
|
||||
def parse_file(self, file_block: FileBlock) -> None:
|
||||
self.parse_node(file_block)
|
||||
|
||||
|
||||
class SimpleSQLParser(SQLParser[FinalNode, FinalNode]):
|
||||
class SimpleSQLParser(SQLParser[FinalNode]):
|
||||
def transform(self, node):
|
||||
return node
|
||||
|
||||
@@ -114,7 +114,8 @@ class TestBuilder(Generic[Testable]):
|
||||
self.package_name: str = package_name
|
||||
self.target: Testable = target
|
||||
self.version: Optional[NodeVersion] = version
|
||||
|
||||
self.render_ctx: Dict[str, Any] = render_ctx
|
||||
self.column_name: Optional[str] = column_name
|
||||
self.args["model"] = self.build_model_str()
|
||||
|
||||
match = self.TEST_NAME_PATTERN.match(test_name)
|
||||
@@ -125,39 +126,12 @@ class TestBuilder(Generic[Testable]):
|
||||
self.name: str = groups["test_name"]
|
||||
self.namespace: str = groups["test_namespace"]
|
||||
self.config: Dict[str, Any] = {}
|
||||
# Process legacy args
|
||||
self.config.update(self._process_legacy_args())
|
||||
|
||||
# This code removes keys identified as config args from the test entry
|
||||
# dictionary. The keys remaining in the 'args' dictionary will be
|
||||
# "kwargs", or keyword args that are passed to the test macro.
|
||||
# The "kwargs" are not rendered into strings until compilation time.
|
||||
# The "configs" are rendered here (since they were not rendered back
|
||||
# in the 'get_key_dicts' methods in the schema parsers).
|
||||
for key in self.CONFIG_ARGS:
|
||||
value = self.args.pop(key, None)
|
||||
# 'modifier' config could be either top level arg or in config
|
||||
if value and "config" in self.args and key in self.args["config"]:
|
||||
raise SameKeyNestedError()
|
||||
if not value and "config" in self.args:
|
||||
value = self.args["config"].pop(key, None)
|
||||
if isinstance(value, str):
|
||||
|
||||
try:
|
||||
value = get_rendered(value, render_ctx, native=True)
|
||||
except UndefinedMacroError as e:
|
||||
|
||||
raise CustomMacroPopulatingConfigValueError(
|
||||
target_name=self.target.name,
|
||||
column_name=column_name,
|
||||
name=self.name,
|
||||
key=key,
|
||||
err_msg=e.msg,
|
||||
)
|
||||
|
||||
if value is not None:
|
||||
self.config[key] = value
|
||||
|
||||
# Process config args if present
|
||||
if "config" in self.args:
|
||||
del self.args["config"]
|
||||
self.config.update(self._render_values(self.args.pop("config", {})))
|
||||
|
||||
if self.namespace is not None:
|
||||
self.package_name = self.namespace
|
||||
@@ -182,6 +156,36 @@ class TestBuilder(Generic[Testable]):
|
||||
if short_name != full_name and "alias" not in self.config:
|
||||
self.config["alias"] = short_name
|
||||
|
||||
def _process_legacy_args(self):
|
||||
config = {}
|
||||
for key in self.CONFIG_ARGS:
|
||||
value = self.args.pop(key, None)
|
||||
if value and "config" in self.args and key in self.args["config"]:
|
||||
raise SameKeyNestedError()
|
||||
if not value and "config" in self.args:
|
||||
value = self.args["config"].pop(key, None)
|
||||
config[key] = value
|
||||
|
||||
return self._render_values(config)
|
||||
|
||||
def _render_values(self, config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
rendered_config = {}
|
||||
for key, value in config.items():
|
||||
if isinstance(value, str):
|
||||
try:
|
||||
value = get_rendered(value, self.render_ctx, native=True)
|
||||
except UndefinedMacroError as e:
|
||||
raise CustomMacroPopulatingConfigValueError(
|
||||
target_name=self.target.name,
|
||||
column_name=self.column_name,
|
||||
name=self.name,
|
||||
key=key,
|
||||
err_msg=e.msg,
|
||||
)
|
||||
if value is not None:
|
||||
rendered_config[key] = value
|
||||
return rendered_config
|
||||
|
||||
def _bad_type(self) -> TypeError:
|
||||
return TypeError('invalid target type "{}"'.format(type(self.target)))
|
||||
|
||||
|
||||
@@ -10,8 +10,6 @@ from itertools import chain
|
||||
from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, Union
|
||||
|
||||
import msgpack
|
||||
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
|
||||
from dbt_semantic_interfaces.type_enums import MetricType
|
||||
|
||||
import dbt.deprecations
|
||||
import dbt.exceptions
|
||||
@@ -119,6 +117,8 @@ from dbt_common.events.functions import fire_event, get_invocation_id, warn_or_e
|
||||
from dbt_common.events.types import Note
|
||||
from dbt_common.exceptions.base import DbtValidationError
|
||||
from dbt_common.helper_types import PathSet
|
||||
from dbt_semantic_interfaces.enum_extension import assert_values_exhausted
|
||||
from dbt_semantic_interfaces.type_enums import MetricType
|
||||
|
||||
PERF_INFO_FILE_NAME = "perf_info.json"
|
||||
|
||||
@@ -467,6 +467,7 @@ class ManifestLoader:
|
||||
self.process_model_inferred_primary_keys()
|
||||
self.check_valid_group_config()
|
||||
self.check_valid_access_property()
|
||||
self.check_valid_snapshot_config()
|
||||
|
||||
semantic_manifest = SemanticManifest(self.manifest)
|
||||
if not semantic_manifest.validate():
|
||||
@@ -570,25 +571,21 @@ class ManifestLoader:
|
||||
|
||||
def check_for_model_deprecations(self):
|
||||
for node in self.manifest.nodes.values():
|
||||
if isinstance(node, ModelNode):
|
||||
if (
|
||||
node.deprecation_date
|
||||
and node.deprecation_date < datetime.datetime.now().astimezone()
|
||||
):
|
||||
warn_or_error(
|
||||
DeprecatedModel(
|
||||
model_name=node.name,
|
||||
model_version=version_to_str(node.version),
|
||||
deprecation_date=node.deprecation_date.isoformat(),
|
||||
)
|
||||
if isinstance(node, ModelNode) and node.is_past_deprecation_date:
|
||||
warn_or_error(
|
||||
DeprecatedModel(
|
||||
model_name=node.name,
|
||||
model_version=version_to_str(node.version),
|
||||
deprecation_date=node.deprecation_date.isoformat(),
|
||||
)
|
||||
)
|
||||
|
||||
resolved_refs = self.manifest.resolve_refs(node, self.root_project.project_name)
|
||||
resolved_model_refs = [r for r in resolved_refs if isinstance(r, ModelNode)]
|
||||
node.depends_on
|
||||
for resolved_ref in resolved_model_refs:
|
||||
if resolved_ref.deprecation_date:
|
||||
if resolved_ref.deprecation_date < datetime.datetime.now().astimezone():
|
||||
if resolved_ref.is_past_deprecation_date:
|
||||
event_cls = DeprecatedReference
|
||||
else:
|
||||
event_cls = UpcomingReferenceDeprecation
|
||||
@@ -808,8 +805,12 @@ class ManifestLoader:
|
||||
plugin_model_nodes = pm.get_nodes().models
|
||||
for node_arg in plugin_model_nodes.values():
|
||||
node = ModelNode.from_args(node_arg)
|
||||
# node may already exist from package or running project - in which case we should avoid clobbering it with an external node
|
||||
if node.unique_id not in self.manifest.nodes:
|
||||
# node may already exist from package or running project (even if it is disabled),
|
||||
# in which case we should avoid clobbering it with an external node
|
||||
if (
|
||||
node.unique_id not in self.manifest.nodes
|
||||
and node.unique_id not in self.manifest.disabled
|
||||
):
|
||||
self.manifest.add_node_nofile(node)
|
||||
manifest_nodes_modified = True
|
||||
|
||||
@@ -1035,47 +1036,6 @@ class ManifestLoader:
|
||||
query_header_context = generate_query_header_context(adapter.config, macro_manifest)
|
||||
self.macro_hook(query_header_context)
|
||||
|
||||
# This creates a MacroManifest which contains the macros in
|
||||
# the adapter. Only called by the load_macros call from the
|
||||
# adapter.
|
||||
def create_macro_manifest(self):
|
||||
for project in self.all_projects.values():
|
||||
# what is the manifest passed in actually used for?
|
||||
macro_parser = MacroParser(project, self.manifest)
|
||||
for path in macro_parser.get_paths():
|
||||
source_file = load_source_file(path, ParseFileType.Macro, project.project_name, {})
|
||||
block = FileBlock(source_file)
|
||||
# This does not add the file to the manifest.files,
|
||||
# but that shouldn't be necessary here.
|
||||
macro_parser.parse_file(block)
|
||||
macro_manifest = MacroManifest(self.manifest.macros)
|
||||
return macro_manifest
|
||||
|
||||
# This is called by the adapter code only, to create the
|
||||
# MacroManifest that's stored in the adapter.
|
||||
# 'get_full_manifest' uses a persistent ManifestLoader while this
|
||||
# creates a temporary ManifestLoader and throws it away.
|
||||
# Not sure when this would actually get used except in tests.
|
||||
# The ManifestLoader loads macros with other files, then copies
|
||||
# into the adapter MacroManifest.
|
||||
@classmethod
|
||||
def load_macros(
|
||||
cls,
|
||||
root_config: RuntimeConfig,
|
||||
macro_hook: Callable[[Manifest], Any],
|
||||
base_macros_only=False,
|
||||
) -> Manifest:
|
||||
# base_only/base_macros_only: for testing only,
|
||||
# allows loading macros without running 'dbt deps' first
|
||||
projects = root_config.load_dependencies(base_only=base_macros_only)
|
||||
|
||||
# This creates a loader object, including result,
|
||||
# and then throws it away, returning only the
|
||||
# manifest
|
||||
loader = cls(root_config, projects, macro_hook)
|
||||
|
||||
return loader.create_macro_manifest()
|
||||
|
||||
# Create tracking event for saving performance info
|
||||
def track_project_load(self):
|
||||
invocation_id = get_invocation_id()
|
||||
@@ -1345,6 +1305,16 @@ class ManifestLoader:
|
||||
materialization=node.get_materialization(),
|
||||
)
|
||||
|
||||
def check_valid_snapshot_config(self):
|
||||
# Snapshot config can be set in either SQL files or yaml files,
|
||||
# so we need to validate afterward.
|
||||
for node in self.manifest.nodes.values():
|
||||
if node.resource_type != NodeType.Snapshot:
|
||||
continue
|
||||
if node.created_at < self.started_at:
|
||||
continue
|
||||
node.config.final_validate()
|
||||
|
||||
def write_perf_info(self, target_path: str):
|
||||
path = os.path.join(target_path, PERF_INFO_FILE_NAME)
|
||||
write_file(path, json.dumps(self._perf_info, cls=dbt.utils.JSONEncoder, indent=4))
|
||||
|
||||
@@ -1,16 +1,8 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from dbt_semantic_interfaces.type_enums import (
|
||||
AggregationType,
|
||||
ConversionCalculationType,
|
||||
DimensionType,
|
||||
EntityType,
|
||||
MetricType,
|
||||
TimeGranularity,
|
||||
)
|
||||
|
||||
from dbt.artifacts.resources import (
|
||||
ConversionTypeParams,
|
||||
CumulativeTypeParams,
|
||||
Dimension,
|
||||
DimensionTypeParams,
|
||||
Entity,
|
||||
@@ -42,6 +34,7 @@ from dbt.context.providers import (
|
||||
from dbt.contracts.graph.nodes import Exposure, Group, Metric, SavedQuery, SemanticModel
|
||||
from dbt.contracts.graph.unparsed import (
|
||||
UnparsedConversionTypeParams,
|
||||
UnparsedCumulativeTypeParams,
|
||||
UnparsedDimension,
|
||||
UnparsedDimensionTypeParams,
|
||||
UnparsedEntity,
|
||||
@@ -64,6 +57,15 @@ from dbt.parser.common import YamlBlock
|
||||
from dbt.parser.schemas import ParseResult, SchemaParser, YamlReader
|
||||
from dbt_common.dataclass_schema import ValidationError
|
||||
from dbt_common.exceptions import DbtInternalError
|
||||
from dbt_semantic_interfaces.type_enums import (
|
||||
AggregationType,
|
||||
ConversionCalculationType,
|
||||
DimensionType,
|
||||
EntityType,
|
||||
MetricType,
|
||||
PeriodAggregation,
|
||||
TimeGranularity,
|
||||
)
|
||||
|
||||
|
||||
def parse_where_filter(
|
||||
@@ -221,9 +223,19 @@ class MetricParser(YamlReader):
|
||||
|
||||
return input_measures
|
||||
|
||||
def _get_time_window(
|
||||
self,
|
||||
unparsed_window: Optional[str],
|
||||
def _get_period_agg(self, unparsed_period_agg: str) -> PeriodAggregation:
|
||||
return PeriodAggregation(unparsed_period_agg)
|
||||
|
||||
def _get_optional_grain_to_date(
|
||||
self, unparsed_grain_to_date: Optional[str]
|
||||
) -> Optional[TimeGranularity]:
|
||||
if not unparsed_grain_to_date:
|
||||
return None
|
||||
|
||||
return TimeGranularity(unparsed_grain_to_date)
|
||||
|
||||
def _get_optional_time_window(
|
||||
self, unparsed_window: Optional[str]
|
||||
) -> Optional[MetricTimeWindow]:
|
||||
if unparsed_window is not None:
|
||||
parts = unparsed_window.split(" ")
|
||||
@@ -277,7 +289,7 @@ class MetricParser(YamlReader):
|
||||
name=unparsed.name,
|
||||
filter=parse_where_filter(unparsed.filter),
|
||||
alias=unparsed.alias,
|
||||
offset_window=self._get_time_window(unparsed.offset_window),
|
||||
offset_window=self._get_optional_time_window(unparsed.offset_window),
|
||||
offset_to_grain=offset_to_grain,
|
||||
)
|
||||
|
||||
@@ -311,11 +323,48 @@ class MetricParser(YamlReader):
|
||||
conversion_measure=self._get_input_measure(unparsed.conversion_measure),
|
||||
entity=unparsed.entity,
|
||||
calculation=ConversionCalculationType(unparsed.calculation),
|
||||
window=self._get_time_window(unparsed.window),
|
||||
window=self._get_optional_time_window(unparsed.window),
|
||||
constant_properties=unparsed.constant_properties,
|
||||
)
|
||||
|
||||
def _get_metric_type_params(self, type_params: UnparsedMetricTypeParams) -> MetricTypeParams:
|
||||
def _get_optional_cumulative_type_params(
|
||||
self, unparsed_metric: UnparsedMetric
|
||||
) -> Optional[CumulativeTypeParams]:
|
||||
unparsed_type_params = unparsed_metric.type_params
|
||||
if unparsed_metric.type.lower() == MetricType.CUMULATIVE.value:
|
||||
if not unparsed_type_params.cumulative_type_params:
|
||||
unparsed_type_params.cumulative_type_params = UnparsedCumulativeTypeParams()
|
||||
|
||||
if (
|
||||
unparsed_type_params.window
|
||||
and not unparsed_type_params.cumulative_type_params.window
|
||||
):
|
||||
unparsed_type_params.cumulative_type_params.window = unparsed_type_params.window
|
||||
if (
|
||||
unparsed_type_params.grain_to_date
|
||||
and not unparsed_type_params.cumulative_type_params.grain_to_date
|
||||
):
|
||||
unparsed_type_params.cumulative_type_params.grain_to_date = (
|
||||
unparsed_type_params.grain_to_date
|
||||
)
|
||||
|
||||
return CumulativeTypeParams(
|
||||
window=self._get_optional_time_window(
|
||||
unparsed_type_params.cumulative_type_params.window
|
||||
),
|
||||
grain_to_date=self._get_optional_grain_to_date(
|
||||
unparsed_type_params.cumulative_type_params.grain_to_date
|
||||
),
|
||||
period_agg=self._get_period_agg(
|
||||
unparsed_type_params.cumulative_type_params.period_agg
|
||||
),
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _get_metric_type_params(self, unparsed_metric: UnparsedMetric) -> MetricTypeParams:
|
||||
type_params = unparsed_metric.type_params
|
||||
|
||||
grain_to_date: Optional[TimeGranularity] = None
|
||||
if type_params.grain_to_date is not None:
|
||||
grain_to_date = TimeGranularity(type_params.grain_to_date)
|
||||
@@ -325,12 +374,15 @@ class MetricParser(YamlReader):
|
||||
numerator=self._get_optional_metric_input(type_params.numerator),
|
||||
denominator=self._get_optional_metric_input(type_params.denominator),
|
||||
expr=str(type_params.expr) if type_params.expr is not None else None,
|
||||
window=self._get_time_window(type_params.window),
|
||||
window=self._get_optional_time_window(type_params.window),
|
||||
grain_to_date=grain_to_date,
|
||||
metrics=self._get_metric_inputs(type_params.metrics),
|
||||
conversion_type_params=self._get_optional_conversion_type_params(
|
||||
type_params.conversion_type_params
|
||||
)
|
||||
),
|
||||
cumulative_type_params=self._get_optional_cumulative_type_params(
|
||||
unparsed_metric=unparsed_metric,
|
||||
),
|
||||
# input measures are calculated via metric processing post parsing
|
||||
# input_measures=?,
|
||||
)
|
||||
@@ -380,7 +432,7 @@ class MetricParser(YamlReader):
|
||||
description=unparsed.description,
|
||||
label=unparsed.label,
|
||||
type=MetricType(unparsed.type),
|
||||
type_params=self._get_metric_type_params(unparsed.type_params),
|
||||
type_params=self._get_metric_type_params(unparsed),
|
||||
filter=parse_where_filter(unparsed.filter),
|
||||
meta=unparsed.meta,
|
||||
tags=unparsed.tags,
|
||||
|
||||
@@ -117,6 +117,11 @@ def yaml_from_file(source_file: SchemaSourceFile) -> Optional[Dict[str, Any]]:
|
||||
if contents is None:
|
||||
return contents
|
||||
|
||||
if not isinstance(contents, dict):
|
||||
raise DbtValidationError(
|
||||
f"Contents of file '{source_file.original_file_path}' are not valid. Dictionary expected."
|
||||
)
|
||||
|
||||
# When loaded_loaded_at_field is defined as None or null, it shows up in
|
||||
# the dict but when it is not defined, it does not show up in the dict
|
||||
# We need to capture this to be able to override source level settings later.
|
||||
@@ -539,37 +544,44 @@ class PatchParser(YamlReader, Generic[NonSourceTarget, Parsed]):
|
||||
def normalize_access_attribute(self, data, path):
|
||||
return self.normalize_attribute(data, path, "access")
|
||||
|
||||
@property
|
||||
def is_root_project(self):
|
||||
if self.root_project.project_name == self.project.project_name:
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_data_tests(self, data):
|
||||
# Rename 'tests' -> 'data_tests' at both model-level and column-level
|
||||
# Raise a validation error if the user has defined both names
|
||||
def validate_and_rename(data):
|
||||
def validate_and_rename(data, is_root_project: bool):
|
||||
if data.get("tests"):
|
||||
if "tests" in data and "data_tests" in data:
|
||||
raise ValidationError(
|
||||
"Invalid test config: cannot have both 'tests' and 'data_tests' defined"
|
||||
)
|
||||
deprecations.warn(
|
||||
"project-test-config",
|
||||
deprecated_path="tests",
|
||||
exp_path="data_tests",
|
||||
)
|
||||
if is_root_project:
|
||||
deprecations.warn(
|
||||
"project-test-config",
|
||||
deprecated_path="tests",
|
||||
exp_path="data_tests",
|
||||
)
|
||||
data["data_tests"] = data.pop("tests")
|
||||
|
||||
# model-level tests
|
||||
validate_and_rename(data)
|
||||
validate_and_rename(data, self.is_root_project)
|
||||
|
||||
# column-level tests
|
||||
if data.get("columns"):
|
||||
for column in data["columns"]:
|
||||
validate_and_rename(column)
|
||||
validate_and_rename(column, self.is_root_project)
|
||||
|
||||
# versioned models
|
||||
if data.get("versions"):
|
||||
for version in data["versions"]:
|
||||
validate_and_rename(version)
|
||||
validate_and_rename(version, self.is_root_project)
|
||||
if version.get("columns"):
|
||||
for column in version["columns"]:
|
||||
validate_and_rename(column)
|
||||
validate_and_rename(column, self.is_root_project)
|
||||
|
||||
def patch_node_config(self, node, patch):
|
||||
if "access" in patch.config:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode
|
||||
from dbt.contracts.graph.nodes import SnapshotNode
|
||||
from dbt.exceptions import SnapshopConfigError
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.parser.base import SQLParser
|
||||
@@ -10,11 +10,11 @@ from dbt.utils import split_path
|
||||
from dbt_common.dataclass_schema import ValidationError
|
||||
|
||||
|
||||
class SnapshotParser(SQLParser[IntermediateSnapshotNode, SnapshotNode]):
|
||||
def parse_from_dict(self, dct, validate=True) -> IntermediateSnapshotNode:
|
||||
class SnapshotParser(SQLParser[SnapshotNode]):
|
||||
def parse_from_dict(self, dct, validate=True) -> SnapshotNode:
|
||||
if validate:
|
||||
IntermediateSnapshotNode.validate(dct)
|
||||
return IntermediateSnapshotNode.from_dict(dct)
|
||||
SnapshotNode.validate(dct)
|
||||
return SnapshotNode.from_dict(dct)
|
||||
|
||||
@property
|
||||
def resource_type(self) -> NodeType:
|
||||
@@ -54,18 +54,10 @@ class SnapshotParser(SQLParser[IntermediateSnapshotNode, SnapshotNode]):
|
||||
fqn.append(name)
|
||||
return fqn
|
||||
|
||||
def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode:
|
||||
def transform(self, node: SnapshotNode) -> SnapshotNode:
|
||||
try:
|
||||
# The config_call_dict is not serialized, because normally
|
||||
# it is not needed after parsing. But since the snapshot node
|
||||
# does this extra to_dict, save and restore it, to keep
|
||||
# the model config when there is also schema config.
|
||||
config_call_dict = node.config_call_dict
|
||||
dct = node.to_dict(omit_none=True)
|
||||
parsed_node = SnapshotNode.from_dict(dct)
|
||||
parsed_node.config_call_dict = config_call_dict
|
||||
self.set_snapshot_attributes(parsed_node)
|
||||
return parsed_node
|
||||
self.set_snapshot_attributes(node)
|
||||
return node
|
||||
except ValidationError as exc:
|
||||
raise SnapshopConfigError(exc, node)
|
||||
|
||||
|
||||
@@ -226,6 +226,8 @@ class SourcePatcher:
|
||||
return generic_test_parser
|
||||
|
||||
def get_source_tests(self, target: UnpatchedSourceDefinition) -> Iterable[GenericTestNode]:
|
||||
is_root_project = True if self.root_project.project_name == target.package_name else False
|
||||
target.validate_data_tests(is_root_project)
|
||||
for data_test, column in target.get_tests():
|
||||
yield self.parse_source_test(
|
||||
target=target,
|
||||
|
||||
@@ -410,13 +410,14 @@ class BaseRunner(metaclass=ABCMeta):
|
||||
return self.skip_cause.node.is_ephemeral_model
|
||||
|
||||
def on_skip(self):
|
||||
schema_name = self.node.schema
|
||||
schema_name = getattr(self.node, "schema", "")
|
||||
node_name = self.node.name
|
||||
|
||||
error_message = None
|
||||
if not self.node.is_ephemeral_model:
|
||||
# if this model was skipped due to an upstream ephemeral model
|
||||
# failure, print a special 'error skip' message.
|
||||
# Include skip_cause NodeStatus
|
||||
if self._skip_caused_by_ephemeral_failure():
|
||||
fire_event(
|
||||
LogSkipBecauseError(
|
||||
@@ -424,8 +425,10 @@ class BaseRunner(metaclass=ABCMeta):
|
||||
relation=node_name,
|
||||
index=self.node_index,
|
||||
total=self.num_nodes,
|
||||
status=self.skip_cause.status,
|
||||
)
|
||||
)
|
||||
# skip_cause here should be the run_result from the ephemeral model
|
||||
print_run_result_error(result=self.skip_cause, newline=False)
|
||||
if self.skip_cause is None: # mypy appeasement
|
||||
raise DbtInternalError(
|
||||
|
||||
@@ -10,7 +10,7 @@ from dbt.graph import ResourceTypeSelector
|
||||
from dbt.node_types import REFABLE_NODE_TYPES
|
||||
from dbt.task.base import BaseRunner, resource_types_from_args
|
||||
from dbt.task.run import _validate_materialization_relations_dict
|
||||
from dbt.task.runnable import GraphRunnableTask
|
||||
from dbt.task.runnable import GraphRunnableMode, GraphRunnableTask
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_common.exceptions import CompilationError, DbtInternalError
|
||||
|
||||
@@ -94,6 +94,9 @@ class CloneTask(GraphRunnableTask):
|
||||
def raise_on_first_error(self):
|
||||
return False
|
||||
|
||||
def get_run_mode(self) -> GraphRunnableMode:
|
||||
return GraphRunnableMode.Independent
|
||||
|
||||
def _get_deferred_manifest(self) -> Optional[Manifest]:
|
||||
# Unlike other commands, 'clone' always requires a state manifest
|
||||
# Load previous state, regardless of whether --defer flag has been set
|
||||
|
||||
@@ -104,6 +104,12 @@ class CompileTask(GraphRunnableTask):
|
||||
)
|
||||
sql_node = block_parser.parse_remote(self.args.inline, "inline_query")
|
||||
process_node(self.config, self.manifest, sql_node)
|
||||
# Special hack to remove disabled, if it's there. This would only happen
|
||||
# if all models are disabled in dbt_project
|
||||
if sql_node.config.enabled is False:
|
||||
sql_node.config.enabled = True
|
||||
self.manifest.disabled.pop(sql_node.unique_id)
|
||||
self.manifest.nodes[sql_node.unique_id] = sql_node
|
||||
# keep track of the node added to the manifest
|
||||
self._inline_node_id = sql_node.unique_id
|
||||
except CompilationError as exc:
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -16,11 +16,12 @@ class ServeTask(ConfiguredTask):
|
||||
shutil.copyfile(DOCS_INDEX_FILE_PATH, "index.html")
|
||||
|
||||
port = self.args.port
|
||||
host = self.args.host
|
||||
|
||||
if self.args.browser:
|
||||
webbrowser.open_new_tab(f"http://localhost:{port}")
|
||||
|
||||
with socketserver.TCPServer(("127.0.0.1", port), SimpleHTTPRequestHandler) as httpd:
|
||||
with socketserver.TCPServer((host, port), SimpleHTTPRequestHandler) as httpd:
|
||||
click.echo(f"Serving docs at {port}")
|
||||
click.echo(f"To access from your browser, navigate to: http://localhost:{port}")
|
||||
click.echo("\n\n")
|
||||
|
||||
@@ -13,6 +13,7 @@ from dbt.events.types import (
|
||||
StatsLine,
|
||||
)
|
||||
from dbt.node_types import NodeType
|
||||
from dbt_common.events.base_types import EventLevel
|
||||
from dbt_common.events.format import pluralize
|
||||
from dbt_common.events.functions import fire_event
|
||||
from dbt_common.events.types import Formatting
|
||||
@@ -68,14 +69,13 @@ def print_run_status_line(results) -> None:
|
||||
|
||||
|
||||
def print_run_result_error(result, newline: bool = True, is_warning: bool = False) -> None:
|
||||
if newline:
|
||||
fire_event(Formatting(""))
|
||||
|
||||
# set node_info for logging events
|
||||
node_info = None
|
||||
if hasattr(result, "node") and result.node:
|
||||
node_info = result.node.node_info
|
||||
if result.status == NodeStatus.Fail or (is_warning and result.status == NodeStatus.Warn):
|
||||
if newline:
|
||||
fire_event(Formatting(""))
|
||||
if is_warning:
|
||||
fire_event(
|
||||
RunResultWarning(
|
||||
@@ -112,8 +112,13 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals
|
||||
fire_event(
|
||||
CheckNodeTestFailure(relation_name=result.node.relation_name, node_info=node_info)
|
||||
)
|
||||
|
||||
elif result.status == NodeStatus.Skipped and result.message is not None:
|
||||
if newline:
|
||||
fire_event(Formatting(""), level=EventLevel.DEBUG)
|
||||
fire_event(RunResultError(msg=result.message), level=EventLevel.DEBUG)
|
||||
elif result.message is not None:
|
||||
if newline:
|
||||
fire_event(Formatting(""))
|
||||
fire_event(RunResultError(msg=result.message, node_info=node_info))
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from concurrent.futures import as_completed
|
||||
from datetime import datetime
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
from pathlib import Path
|
||||
from typing import AbstractSet, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import AbstractSet, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
|
||||
import dbt.exceptions
|
||||
import dbt.tracking
|
||||
@@ -31,6 +31,7 @@ from dbt.events.types import (
|
||||
EndRunResult,
|
||||
GenericExceptionOnRun,
|
||||
LogCancelLine,
|
||||
MarkSkippedChildren,
|
||||
NodeFinished,
|
||||
NodeStart,
|
||||
NothingToDo,
|
||||
@@ -48,6 +49,7 @@ from dbt.graph import (
|
||||
from dbt.parser.manifest import write_manifest
|
||||
from dbt.task.base import BaseRunner, ConfiguredTask
|
||||
from dbt_common.context import _INVOCATION_CONTEXT_VAR, get_invocation_context
|
||||
from dbt_common.dataclass_schema import StrEnum
|
||||
from dbt_common.events.contextvars import log_contextvars, task_contextvars
|
||||
from dbt_common.events.functions import fire_event, warn_or_error
|
||||
from dbt_common.events.types import Formatting
|
||||
@@ -58,6 +60,11 @@ from .printer import print_run_end_messages, print_run_result_error
|
||||
RESULT_FILE_NAME = "run_results.json"
|
||||
|
||||
|
||||
class GraphRunnableMode(StrEnum):
|
||||
Topological = "topological"
|
||||
Independent = "independent"
|
||||
|
||||
|
||||
class GraphRunnableTask(ConfiguredTask):
|
||||
MARK_DEPENDENT_ERRORS_STATUSES = [NodeStatus.Error]
|
||||
|
||||
@@ -102,7 +109,11 @@ class GraphRunnableTask(ConfiguredTask):
|
||||
|
||||
def get_selection_spec(self) -> SelectionSpec:
|
||||
default_selector_name = self.config.get_default_selector_name()
|
||||
if self.args.selector:
|
||||
spec: Union[SelectionSpec, bool]
|
||||
if hasattr(self.args, "inline") and self.args.inline:
|
||||
# We want an empty selection spec.
|
||||
spec = parse_difference(None, None)
|
||||
elif self.args.selector:
|
||||
# use pre-defined selector (--selector)
|
||||
spec = self.config.get_selector(self.args.selector)
|
||||
elif not (self.selection_arg or self.exclusion_arg) and default_selector_name:
|
||||
@@ -135,7 +146,15 @@ class GraphRunnableTask(ConfiguredTask):
|
||||
selector = self.get_node_selector()
|
||||
# Following uses self.selection_arg and self.exclusion_arg
|
||||
spec = self.get_selection_spec()
|
||||
return selector.get_graph_queue(spec)
|
||||
|
||||
preserve_edges = True
|
||||
if self.get_run_mode() == GraphRunnableMode.Independent:
|
||||
preserve_edges = False
|
||||
|
||||
return selector.get_graph_queue(spec, preserve_edges)
|
||||
|
||||
def get_run_mode(self) -> GraphRunnableMode:
|
||||
return GraphRunnableMode.Topological
|
||||
|
||||
def _runtime_initialize(self):
|
||||
self.compile_manifest()
|
||||
@@ -418,6 +437,13 @@ class GraphRunnableTask(ConfiguredTask):
|
||||
) -> None:
|
||||
if self.graph is None:
|
||||
raise DbtInternalError("graph is None in _mark_dependent_errors")
|
||||
fire_event(
|
||||
MarkSkippedChildren(
|
||||
unique_id=node_id,
|
||||
status=result.status,
|
||||
run_result=result.to_msg_dict(),
|
||||
)
|
||||
)
|
||||
for dep_node_id in self.graph.get_dependent_nodes(UniqueId(node_id)):
|
||||
self._skipped_children[dep_node_id] = cause
|
||||
|
||||
|
||||
@@ -126,6 +126,8 @@ class TestRunner(CompileRunner):
|
||||
def execute_data_test(self, data_test: TestNode, manifest: Manifest) -> TestResultData:
|
||||
context = generate_runtime_model_context(data_test, self.config, manifest)
|
||||
|
||||
hook_ctx = self.adapter.pre_model_hook(context)
|
||||
|
||||
materialization_macro = manifest.find_materialization_macro_by_name(
|
||||
self.config.project_name, data_test.get_materialization(), self.adapter.type()
|
||||
)
|
||||
@@ -142,8 +144,12 @@ class TestRunner(CompileRunner):
|
||||
|
||||
# generate materialization macro
|
||||
macro_func = MacroGenerator(materialization_macro, context)
|
||||
# execute materialization macro
|
||||
macro_func()
|
||||
try:
|
||||
# execute materialization macro
|
||||
macro_func()
|
||||
finally:
|
||||
self.adapter.post_model_hook(context, hook_ctx)
|
||||
|
||||
# load results from context
|
||||
# could eventually be returned directly by materialization
|
||||
result = context["load_result"]("main")
|
||||
@@ -198,6 +204,8 @@ class TestRunner(CompileRunner):
|
||||
# materialization, not compile the node.compiled_code
|
||||
context = generate_runtime_model_context(unit_test_node, self.config, unit_test_manifest)
|
||||
|
||||
hook_ctx = self.adapter.pre_model_hook(context)
|
||||
|
||||
materialization_macro = unit_test_manifest.find_materialization_macro_by_name(
|
||||
self.config.project_name, unit_test_node.get_materialization(), self.adapter.type()
|
||||
)
|
||||
@@ -215,14 +223,16 @@ class TestRunner(CompileRunner):
|
||||
|
||||
# generate materialization macro
|
||||
macro_func = MacroGenerator(materialization_macro, context)
|
||||
# execute materialization macro
|
||||
try:
|
||||
# execute materialization macro
|
||||
macro_func()
|
||||
except DbtBaseException as e:
|
||||
raise DbtRuntimeError(
|
||||
f"An error occurred during execution of unit test '{unit_test_def.name}'. "
|
||||
f"There may be an error in the unit test definition: check the data types.\n {e}"
|
||||
)
|
||||
finally:
|
||||
self.adapter.post_model_hook(context, hook_ctx)
|
||||
|
||||
# load results from context
|
||||
# could eventually be returned directly by materialization
|
||||
@@ -338,16 +348,17 @@ class TestRunner(CompileRunner):
|
||||
def _get_daff_diff(
|
||||
self, expected: "agate.Table", actual: "agate.Table", ordered: bool = False
|
||||
) -> daff.TableDiff:
|
||||
|
||||
expected_daff_table = daff.PythonTableView(list_rows_from_table(expected))
|
||||
actual_daff_table = daff.PythonTableView(list_rows_from_table(actual))
|
||||
|
||||
alignment = daff.Coopy.compareTables(expected_daff_table, actual_daff_table).align()
|
||||
result = daff.PythonTableView([])
|
||||
# Sort expected and actual inputs prior to creating daff diff to ensure order insensitivity
|
||||
# https://github.com/paulfitz/daff/issues/200
|
||||
expected_daff_table = daff.PythonTableView(list_rows_from_table(expected, sort=True))
|
||||
actual_daff_table = daff.PythonTableView(list_rows_from_table(actual, sort=True))
|
||||
|
||||
flags = daff.CompareFlags()
|
||||
flags.ordered = ordered
|
||||
|
||||
alignment = daff.Coopy.compareTables(expected_daff_table, actual_daff_table, flags).align()
|
||||
result = daff.PythonTableView([])
|
||||
|
||||
diff = daff.TableDiff(alignment, flags)
|
||||
diff.hilite(result)
|
||||
return diff
|
||||
@@ -408,10 +419,25 @@ def json_rows_from_table(table: "agate.Table") -> List[Dict[str, Any]]:
|
||||
|
||||
|
||||
# This was originally in agate_helper, but that was moved out into dbt_common
|
||||
def list_rows_from_table(table: "agate.Table") -> List[Any]:
|
||||
"Convert a table to a list of lists, where the first element represents the header"
|
||||
rows = [[col.name for col in table.columns]]
|
||||
def list_rows_from_table(table: "agate.Table", sort: bool = False) -> List[Any]:
|
||||
"""
|
||||
Convert given table to a list of lists, where the first element represents the header
|
||||
|
||||
By default, sort is False and no sort order is applied to the non-header rows of the given table.
|
||||
|
||||
If sort is True, sort the non-header rows hierarchically, treating None values as lower in order.
|
||||
Examples:
|
||||
* [['a','b','c'],[4,5,6],[1,2,3]] -> [['a','b','c'],[1,2,3],[4,5,6]]
|
||||
* [['a','b','c'],[4,5,6],[1,null,3]] -> [['a','b','c'],[1,null,3],[4,5,6]]
|
||||
* [['a','b','c'],[4,5,6],[null,2,3]] -> [['a','b','c'],[4,5,6],[null,2,3]]
|
||||
"""
|
||||
header = [col.name for col in table.columns]
|
||||
|
||||
rows = []
|
||||
for row in table.rows:
|
||||
rows.append(list(row.values()))
|
||||
|
||||
return rows
|
||||
if sort:
|
||||
rows = sorted(rows, key=lambda x: [(elem is None, elem) for elem in x])
|
||||
|
||||
return [header] + rows
|
||||
|
||||
41
core/dbt/tests/fixtures/project.py
vendored
41
core/dbt/tests/fixtures/project.py
vendored
@@ -282,8 +282,8 @@ def adapter(
|
||||
project_root,
|
||||
profiles_root,
|
||||
profiles_yml,
|
||||
dbt_project_yml,
|
||||
clean_up_logging,
|
||||
dbt_project_yml,
|
||||
):
|
||||
# The profiles.yml and dbt_project.yml should already be written out
|
||||
args = Namespace(
|
||||
@@ -385,7 +385,20 @@ def analyses():
|
||||
|
||||
# Write out the files provided by models, macros, properties, snapshots, seeds, tests, analyses
|
||||
@pytest.fixture(scope="class")
|
||||
def project_files(project_root, models, macros, snapshots, properties, seeds, tests, analyses):
|
||||
def project_files(
|
||||
project_root,
|
||||
models,
|
||||
macros,
|
||||
snapshots,
|
||||
properties,
|
||||
seeds,
|
||||
tests,
|
||||
analyses,
|
||||
selectors_yml,
|
||||
dependencies_yml,
|
||||
packages_yml,
|
||||
dbt_project_yml,
|
||||
):
|
||||
write_project_files(project_root, "models", {**models, **properties})
|
||||
write_project_files(project_root, "macros", macros)
|
||||
write_project_files(project_root, "snapshots", snapshots)
|
||||
@@ -515,12 +528,8 @@ def initialization(environment) -> None:
|
||||
enable_test_caching()
|
||||
|
||||
|
||||
# This is the main fixture that is used in all functional tests. It pulls in the other
|
||||
# fixtures that are necessary to set up a dbt project, and saves some of the information
|
||||
# in a TestProjInfo class, which it returns, so that individual test cases do not have
|
||||
# to pull in the other fixtures individually to access their information.
|
||||
@pytest.fixture(scope="class")
|
||||
def project(
|
||||
def project_setup(
|
||||
initialization,
|
||||
clean_up_logging,
|
||||
project_root,
|
||||
@@ -528,12 +537,7 @@ def project(
|
||||
request,
|
||||
unique_schema,
|
||||
profiles_yml,
|
||||
dbt_project_yml,
|
||||
packages_yml,
|
||||
dependencies_yml,
|
||||
selectors_yml,
|
||||
adapter,
|
||||
project_files,
|
||||
shared_data_dir,
|
||||
test_data_dir,
|
||||
logs_dir,
|
||||
@@ -587,3 +591,16 @@ def project(
|
||||
pass
|
||||
os.chdir(orig_cwd)
|
||||
cleanup_event_logger()
|
||||
|
||||
|
||||
# This is the main fixture that is used in all functional tests. It pulls in the other
|
||||
# fixtures that are necessary to set up a dbt project, and saves some of the information
|
||||
# in a TestProjInfo class, which it returns, so that individual test cases do not have
|
||||
# to pull in the other fixtures individually to access their information.
|
||||
# The order of arguments here determine which steps runs first.
|
||||
@pytest.fixture(scope="class")
|
||||
def project(
|
||||
project_setup: TestProjInfo,
|
||||
project_files,
|
||||
):
|
||||
return project_setup
|
||||
|
||||
@@ -90,7 +90,6 @@ def run_dbt(
|
||||
args.extend(["--project-dir", project_dir])
|
||||
if profiles_dir and "--profiles-dir" not in args:
|
||||
args.extend(["--profiles-dir", profiles_dir])
|
||||
|
||||
dbt = dbtRunner()
|
||||
res = dbt.invoke(args)
|
||||
|
||||
|
||||
@@ -69,9 +69,9 @@ setup(
|
||||
# Accept patches but avoid automatically updating past a set minor version range.
|
||||
"dbt-extractor>=0.5.0,<=0.6",
|
||||
"minimal-snowplow-tracker>=0.0.2,<0.1",
|
||||
"dbt-semantic-interfaces>=0.5.1,<0.6",
|
||||
"dbt-semantic-interfaces>=0.6.1,<0.7",
|
||||
# Minor versions for these are expected to be backwards-compatible
|
||||
"dbt-common>=1.1.0,<2.0",
|
||||
"dbt-common>=1.3.0,<2.0",
|
||||
"dbt-adapters>=1.1.1,<2.0",
|
||||
# ----
|
||||
# Expect compatibility with all new versions of these packages, so lower bounds only.
|
||||
|
||||
@@ -2,16 +2,20 @@ git+https://github.com/dbt-labs/dbt-adapters.git@main
|
||||
git+https://github.com/dbt-labs/dbt-adapters.git@main#subdirectory=dbt-tests-adapter
|
||||
git+https://github.com/dbt-labs/dbt-common.git@main
|
||||
git+https://github.com/dbt-labs/dbt-postgres.git@main
|
||||
black>=24.3.0,<25.0
|
||||
# black must match what's in .pre-commit-config.yaml to be sure local env matches CI
|
||||
black==22.3.0
|
||||
bumpversion
|
||||
ddtrace==2.3.0
|
||||
docutils
|
||||
flake8
|
||||
# flake8 must match what's in .pre-commit-config.yaml to be sure local env matches CI
|
||||
flake8==4.0.1
|
||||
flaky
|
||||
freezegun>=1.4.0,<1.5
|
||||
hypothesis
|
||||
ipdb
|
||||
isort>=5.12,<6
|
||||
# isort must match what's in .pre-commit-config.yaml to be sure local env matches CI
|
||||
isort==5.13.2
|
||||
# mypy must match what's in .pre-commit-config.yaml to be sure local env matches CI
|
||||
mypy==1.4.1
|
||||
pip-tools
|
||||
pre-commit
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
"dbt_version": {
|
||||
"type": "string",
|
||||
"default": "1.8.0a1"
|
||||
"default": "1.9.0a1"
|
||||
},
|
||||
"generated_at": {
|
||||
"type": "string"
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
},
|
||||
"dbt_version": {
|
||||
"type": "string",
|
||||
"default": "1.8.0b3"
|
||||
"default": "1.9.0a1"
|
||||
},
|
||||
"generated_at": {
|
||||
"type": "string"
|
||||
@@ -4388,18 +4388,10 @@
|
||||
"default": null
|
||||
},
|
||||
"primary_key": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -8257,6 +8249,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8281,6 +8279,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8371,6 +8375,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8395,6 +8405,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8442,6 +8458,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8466,6 +8488,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8546,6 +8574,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8570,6 +8604,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8769,6 +8809,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -8830,6 +8876,89 @@
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"cumulative_type_params": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "CumulativeTypeParams",
|
||||
"properties": {
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "MetricTimeWindow",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
"quarter",
|
||||
"year"
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"count",
|
||||
"granularity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"grain_to_date": {
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
"quarter",
|
||||
"year"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"period_agg": {
|
||||
"enum": [
|
||||
"first",
|
||||
"last",
|
||||
"average"
|
||||
],
|
||||
"default": "first"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
@@ -13453,18 +13582,10 @@
|
||||
"default": null
|
||||
},
|
||||
"primary_key": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -17104,6 +17225,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17128,6 +17255,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17218,6 +17351,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17242,6 +17381,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17289,6 +17434,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17313,6 +17464,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17393,6 +17550,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17417,6 +17580,12 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17616,6 +17785,12 @@
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -17677,6 +17852,89 @@
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"cumulative_type_params": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "CumulativeTypeParams",
|
||||
"properties": {
|
||||
"window": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "MetricTimeWindow",
|
||||
"properties": {
|
||||
"count": {
|
||||
"type": "integer"
|
||||
},
|
||||
"granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
"quarter",
|
||||
"year"
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"count",
|
||||
"granularity"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"grain_to_date": {
|
||||
"anyOf": [
|
||||
{
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
"quarter",
|
||||
"year"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"period_agg": {
|
||||
"enum": [
|
||||
"first",
|
||||
"last",
|
||||
"average"
|
||||
],
|
||||
"default": "first"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
@@ -17930,26 +18188,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"resource_type": {
|
||||
"enum": [
|
||||
"model",
|
||||
"analysis",
|
||||
"test",
|
||||
"snapshot",
|
||||
"operation",
|
||||
"seed",
|
||||
"rpc",
|
||||
"sql_operation",
|
||||
"doc",
|
||||
"source",
|
||||
"macro",
|
||||
"exposure",
|
||||
"metric",
|
||||
"group",
|
||||
"saved_query",
|
||||
"semantic_model",
|
||||
"unit_test",
|
||||
"fixture"
|
||||
]
|
||||
"const": "saved_query"
|
||||
},
|
||||
"package_name": {
|
||||
"type": "string"
|
||||
@@ -18741,6 +18980,12 @@
|
||||
"properties": {
|
||||
"time_granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
@@ -19468,26 +19713,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"resource_type": {
|
||||
"enum": [
|
||||
"model",
|
||||
"analysis",
|
||||
"test",
|
||||
"snapshot",
|
||||
"operation",
|
||||
"seed",
|
||||
"rpc",
|
||||
"sql_operation",
|
||||
"doc",
|
||||
"source",
|
||||
"macro",
|
||||
"exposure",
|
||||
"metric",
|
||||
"group",
|
||||
"saved_query",
|
||||
"semantic_model",
|
||||
"unit_test",
|
||||
"fixture"
|
||||
]
|
||||
"const": "saved_query"
|
||||
},
|
||||
"package_name": {
|
||||
"type": "string"
|
||||
@@ -20286,6 +20512,12 @@
|
||||
"properties": {
|
||||
"time_granularity": {
|
||||
"enum": [
|
||||
"nanosecond",
|
||||
"microsecond",
|
||||
"millisecond",
|
||||
"second",
|
||||
"minute",
|
||||
"hour",
|
||||
"day",
|
||||
"week",
|
||||
"month",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
"dbt_version": {
|
||||
"type": "string",
|
||||
"default": "1.8.0a1"
|
||||
"default": "1.9.0a1"
|
||||
},
|
||||
"generated_at": {
|
||||
"type": "string"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
},
|
||||
"dbt_version": {
|
||||
"type": "string",
|
||||
"default": "1.8.0a1"
|
||||
"default": "1.9.0a1"
|
||||
},
|
||||
"generated_at": {
|
||||
"type": "string"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import pytest
|
||||
|
||||
from dbt.tests.fixtures.project import write_project_files
|
||||
from dbt.tests.util import check_relations_equal, run_dbt
|
||||
|
||||
tests__get_columns_in_relation_sql = """
|
||||
@@ -73,16 +72,6 @@ class BaseAdapterMethod:
|
||||
"model.sql": models__model_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_files(
|
||||
self,
|
||||
project_root,
|
||||
tests,
|
||||
models,
|
||||
):
|
||||
write_project_files(project_root, "tests", tests)
|
||||
write_project_files(project_root, "models", models)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
return {
|
||||
|
||||
@@ -4,7 +4,8 @@ from pathlib import Path
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from dbt.exceptions import ProjectContractError
|
||||
from dbt.cli.main import dbtRunner
|
||||
from dbt.exceptions import DbtProjectError, ProjectContractError
|
||||
from dbt.tests.util import run_dbt, update_config_file, write_config_file
|
||||
|
||||
simple_model_sql = """
|
||||
@@ -118,3 +119,51 @@ class TestProjectDbtCloudConfigString:
|
||||
with pytest.raises(ProjectContractError) as excinfo:
|
||||
run_dbt()
|
||||
assert expected_err in str(excinfo.value)
|
||||
|
||||
|
||||
class TestVersionSpecifierChecksComeBeforeYamlValidation:
|
||||
def test_version_specifier_checks_before_yaml_validation(self, project) -> None:
|
||||
runner = dbtRunner()
|
||||
|
||||
# if no version specifier error, we should get a yaml validation error
|
||||
config_update = {"this-is-not-a-valid-key": "my-value-for-invalid-key"}
|
||||
update_config_file(config_update, "dbt_project.yml")
|
||||
result = runner.invoke(["parse"])
|
||||
assert result.exception is not None
|
||||
assert isinstance(result.exception, ProjectContractError)
|
||||
assert "Additional properties are not allowed" in str(result.exception)
|
||||
|
||||
# add bad version specifier, and assert we get the error for that
|
||||
update_config_file({"require-dbt-version": [">0.0.0", "<=0.0.1"]}, "dbt_project.yml")
|
||||
result = runner.invoke(["parse"])
|
||||
assert result.exception is not None
|
||||
assert isinstance(result.exception, DbtProjectError)
|
||||
assert "This version of dbt is not supported"
|
||||
|
||||
|
||||
class TestArchiveNotAllowed:
|
||||
"""At one point in time we supported an 'archive' key in projects, but no longer"""
|
||||
|
||||
def test_archive_not_allowed(self, project):
|
||||
runner = dbtRunner()
|
||||
|
||||
config_update = {
|
||||
"archive": {
|
||||
"source_schema": "a",
|
||||
"target_schema": "b",
|
||||
"tables": [
|
||||
{
|
||||
"source_table": "seed",
|
||||
"target_table": "archive_actual",
|
||||
"updated_at": "updated_at",
|
||||
"unique_key": """id || '-' || first_name""",
|
||||
},
|
||||
],
|
||||
}
|
||||
}
|
||||
update_config_file(config_update, "dbt_project.yml")
|
||||
|
||||
result = runner.invoke(["parse"])
|
||||
assert result.exception is not None
|
||||
assert isinstance(result.exception, ProjectContractError)
|
||||
assert "Additional properties are not allowed" in str(result.exception)
|
||||
|
||||
@@ -2,7 +2,6 @@ import os
|
||||
|
||||
import pytest
|
||||
|
||||
from dbt.exceptions import ParsingError
|
||||
from dbt.tests.util import (
|
||||
check_relations_equal,
|
||||
run_dbt,
|
||||
@@ -120,7 +119,7 @@ class TestInvalidSnapshotsMaterializationProj(object):
|
||||
snapshots_dir = os.path.join(project.project_root, "snapshots")
|
||||
write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql")
|
||||
|
||||
with pytest.raises(ParsingError):
|
||||
with pytest.raises(ValidationError):
|
||||
run_dbt()
|
||||
|
||||
|
||||
|
||||
@@ -249,3 +249,23 @@ class TestSchemaFileConfigs:
|
||||
write_file(extra_alt__untagged2_yml, project.project_root, "models", "untagged.yml")
|
||||
with pytest.raises(CompilationError):
|
||||
run_dbt(["run"])
|
||||
|
||||
|
||||
list_schema_yml = """
|
||||
- name: my_name
|
||||
- name: alt_name
|
||||
"""
|
||||
|
||||
|
||||
class TestListSchemaFile:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"my_model.sql": "select 1 as id",
|
||||
"schema.yml": list_schema_yml,
|
||||
}
|
||||
|
||||
def test_list_schema(self, project):
|
||||
with pytest.raises(ParsingError) as excinfo:
|
||||
run_dbt(["run"])
|
||||
assert "Dictionary expected" in str(excinfo.value)
|
||||
|
||||
@@ -53,9 +53,8 @@ class TestWarnErrorOptionsFromCLI:
|
||||
assert_deprecation_warning(result, catcher)
|
||||
|
||||
catcher.flush()
|
||||
runner.invoke(
|
||||
["run", "--warn-error-options", "{'include': 'all', 'silence': ['DeprecatedModel']}"]
|
||||
)
|
||||
result = runner.invoke(["run", "--warn-error-options", "{'silence': ['DeprecatedModel']}"])
|
||||
assert result.success
|
||||
assert len(catcher.caught_events) == 0
|
||||
|
||||
def test_can_raise_warning_to_error(
|
||||
@@ -131,13 +130,12 @@ class TestWarnErrorOptionsFromProject:
|
||||
result = runner.invoke(["run"])
|
||||
assert_deprecation_warning(result, catcher)
|
||||
|
||||
silence_options = {
|
||||
"flags": {"warn_error_options": {"include": "all", "silence": ["DeprecatedModel"]}}
|
||||
}
|
||||
silence_options = {"flags": {"warn_error_options": {"silence": ["DeprecatedModel"]}}}
|
||||
update_config_file(silence_options, project_root, "dbt_project.yml")
|
||||
|
||||
catcher.flush()
|
||||
runner.invoke(["run"])
|
||||
result = runner.invoke(["run"])
|
||||
assert result.success
|
||||
assert len(catcher.caught_events) == 0
|
||||
|
||||
def test_can_raise_warning_to_error(
|
||||
|
||||
111
tests/functional/data_tests/test_hooks.py
Normal file
111
tests/functional/data_tests/test_hooks.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from dbt.tests.util import run_dbt, run_dbt_and_capture
|
||||
from dbt_common.exceptions import CompilationError
|
||||
|
||||
orders_csv = """order_id,order_date,customer_id
|
||||
1,2024-06-01,1001
|
||||
2,2024-06-02,1002
|
||||
3,2024-06-03,1003
|
||||
4,2024-06-04,1004
|
||||
"""
|
||||
|
||||
|
||||
orders_model_sql = """
|
||||
with source as (
|
||||
select
|
||||
order_id,
|
||||
order_date,
|
||||
customer_id
|
||||
from {{ ref('seed_orders') }}
|
||||
),
|
||||
final as (
|
||||
select
|
||||
order_id,
|
||||
order_date,
|
||||
customer_id
|
||||
from source
|
||||
)
|
||||
select * from final
|
||||
"""
|
||||
|
||||
|
||||
orders_test_sql = """
|
||||
select *
|
||||
from {{ ref('orders') }}
|
||||
where order_id is null
|
||||
"""
|
||||
|
||||
|
||||
class BaseSingularTestHooks:
|
||||
@pytest.fixture(scope="class")
|
||||
def seeds(self):
|
||||
return {"seed_orders.csv": orders_csv}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {"orders.sql": orders_model_sql}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def tests(self):
|
||||
return {"orders_test.sql": orders_test_sql}
|
||||
|
||||
|
||||
class TestSingularTestPreHook(BaseSingularTestHooks):
|
||||
def test_data_test_runs_adapter_pre_hook_pass(self, project):
|
||||
results = run_dbt(["seed"])
|
||||
assert len(results) == 1
|
||||
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 1
|
||||
|
||||
mock_pre_model_hook = mock.Mock()
|
||||
with mock.patch.object(type(project.adapter), "pre_model_hook", mock_pre_model_hook):
|
||||
results = run_dbt(["test"], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
mock_pre_model_hook.assert_called_once()
|
||||
|
||||
def test_data_test_runs_adapter_pre_hook_fails(self, project):
|
||||
results = run_dbt(["seed"])
|
||||
assert len(results) == 1
|
||||
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 1
|
||||
|
||||
mock_pre_model_hook = mock.Mock()
|
||||
mock_pre_model_hook.side_effect = CompilationError("exception from adapter.pre_model_hook")
|
||||
with mock.patch.object(type(project.adapter), "pre_model_hook", mock_pre_model_hook):
|
||||
(_, log_output) = run_dbt_and_capture(["test"], expect_pass=False)
|
||||
assert "exception from adapter.pre_model_hook" in log_output
|
||||
|
||||
|
||||
class TestSingularTestPostHook(BaseSingularTestHooks):
|
||||
def test_data_test_runs_adapter_post_hook_pass(self, project):
|
||||
results = run_dbt(["seed"])
|
||||
assert len(results) == 1
|
||||
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 1
|
||||
|
||||
mock_post_model_hook = mock.Mock()
|
||||
with mock.patch.object(type(project.adapter), "post_model_hook", mock_post_model_hook):
|
||||
results = run_dbt(["test"], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
mock_post_model_hook.assert_called_once()
|
||||
|
||||
def test_data_test_runs_adapter_post_hook_fails(self, project):
|
||||
results = run_dbt(["seed"])
|
||||
assert len(results) == 1
|
||||
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 1
|
||||
|
||||
mock_post_model_hook = mock.Mock()
|
||||
mock_post_model_hook.side_effect = CompilationError(
|
||||
"exception from adapter.post_model_hook"
|
||||
)
|
||||
with mock.patch.object(type(project.adapter), "post_model_hook", mock_post_model_hook):
|
||||
(_, log_output) = run_dbt_and_capture(["test"], expect_pass=False)
|
||||
assert "exception from adapter.post_model_hook" in log_output
|
||||
@@ -36,6 +36,9 @@ class TestDbtRunner:
|
||||
res = dbt.invoke(["deps", "--warn-error", "--warn-error-options", '{"include": "all"}'])
|
||||
assert type(res.exception) == DbtUsageException
|
||||
|
||||
res = dbt.invoke(["compile", "--select", "models", "--inline", "select 1 as id"])
|
||||
assert type(res.exception) == DbtUsageException
|
||||
|
||||
def test_invalid_command(self, dbt: dbtRunner) -> None:
|
||||
res = dbt.invoke(["invalid-command"])
|
||||
assert type(res.exception) == DbtUsageException
|
||||
|
||||
@@ -108,6 +108,25 @@ models:
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
disabled_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: table_model
|
||||
config:
|
||||
contract:
|
||||
enforced: True
|
||||
enabled: False
|
||||
columns:
|
||||
- name: id
|
||||
data_type: integer
|
||||
data_tests:
|
||||
- unique:
|
||||
severity: error
|
||||
- not_null
|
||||
- name: name
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
modified_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
@@ -126,7 +145,7 @@ models:
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
disabled_contract_schema_yml = """
|
||||
unenforced_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: table_model
|
||||
@@ -144,6 +163,25 @@ models:
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
disabled_unenforced_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: table_model
|
||||
config:
|
||||
contract:
|
||||
enforced: False
|
||||
enabled: False
|
||||
columns:
|
||||
- name: id
|
||||
data_type: integer
|
||||
data_tests:
|
||||
- unique:
|
||||
severity: error
|
||||
- not_null
|
||||
- name: name
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
versioned_no_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
@@ -182,6 +220,27 @@ models:
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
disabled_versioned_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: table_model
|
||||
config:
|
||||
contract:
|
||||
enforced: True
|
||||
enabled: False
|
||||
versions:
|
||||
- v: 1
|
||||
columns:
|
||||
- name: id
|
||||
data_type: integer
|
||||
data_tests:
|
||||
- unique:
|
||||
severity: error
|
||||
- not_null
|
||||
- name: name
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
versioned_modified_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
@@ -202,7 +261,28 @@ models:
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
versioned_disabled_contract_schema_yml = """
|
||||
disabled_versioned_unenforced_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: table_model
|
||||
config:
|
||||
contract:
|
||||
enforced: False
|
||||
enabled: False
|
||||
versions:
|
||||
- v: 1
|
||||
columns:
|
||||
- name: id
|
||||
data_type: integer
|
||||
data_tests:
|
||||
- unique:
|
||||
severity: error
|
||||
- not_null
|
||||
- name: name
|
||||
data_type: text
|
||||
"""
|
||||
|
||||
versioned_unenforced_contract_schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: table_model
|
||||
|
||||
@@ -8,6 +8,7 @@ import pytest
|
||||
from dbt.exceptions import CompilationError, ContractBreakingChangeError
|
||||
from dbt.tests.util import (
|
||||
get_manifest,
|
||||
rm_file,
|
||||
run_dbt,
|
||||
run_dbt_and_capture,
|
||||
update_config_file,
|
||||
@@ -17,6 +18,9 @@ from tests.functional.defer_state.fixtures import (
|
||||
constraint_schema_yml,
|
||||
contract_schema_yml,
|
||||
disabled_contract_schema_yml,
|
||||
disabled_unenforced_contract_schema_yml,
|
||||
disabled_versioned_contract_schema_yml,
|
||||
disabled_versioned_unenforced_contract_schema_yml,
|
||||
ephemeral_model_sql,
|
||||
exposures_yml,
|
||||
infinite_macros_sql,
|
||||
@@ -33,10 +37,11 @@ from tests.functional.defer_state.fixtures import (
|
||||
table_model_now_incremental_sql,
|
||||
table_model_now_view_sql,
|
||||
table_model_sql,
|
||||
unenforced_contract_schema_yml,
|
||||
versioned_contract_schema_yml,
|
||||
versioned_disabled_contract_schema_yml,
|
||||
versioned_modified_contract_schema_yml,
|
||||
versioned_no_contract_schema_yml,
|
||||
versioned_unenforced_contract_schema_yml,
|
||||
view_model_now_table_sql,
|
||||
view_model_sql,
|
||||
)
|
||||
@@ -507,7 +512,7 @@ class TestChangedContractUnversioned(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model"
|
||||
CONTRACT_SCHEMA_YML = contract_schema_yml
|
||||
MODIFIED_SCHEMA_YML = modified_contract_schema_yml
|
||||
DISABLED_SCHEMA_YML = disabled_contract_schema_yml
|
||||
UNENFORCED_SCHEMA_YML = unenforced_contract_schema_yml
|
||||
NO_CONTRACT_SCHEMA_YML = no_contract_schema_yml
|
||||
|
||||
def test_changed_contract(self, project):
|
||||
@@ -570,8 +575,8 @@ class TestChangedContractUnversioned(BaseModifiedState):
|
||||
expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model"
|
||||
expected_change = "Contract enforcement was removed"
|
||||
|
||||
# Now disable the contract. Should throw a warning - force warning into an error.
|
||||
write_file(self.DISABLED_SCHEMA_YML, "models", "schema.yml")
|
||||
# Now unenforce the contract. Should throw a warning - force warning into an error.
|
||||
write_file(self.UNENFORCED_SCHEMA_YML, "models", "schema.yml")
|
||||
with pytest.raises(CompilationError):
|
||||
_, logs = run_dbt_and_capture(
|
||||
[
|
||||
@@ -591,7 +596,7 @@ class TestChangedContractVersioned(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model.v1"
|
||||
CONTRACT_SCHEMA_YML = versioned_contract_schema_yml
|
||||
MODIFIED_SCHEMA_YML = versioned_modified_contract_schema_yml
|
||||
DISABLED_SCHEMA_YML = versioned_disabled_contract_schema_yml
|
||||
UNENFORCED_SCHEMA_YML = versioned_unenforced_contract_schema_yml
|
||||
NO_CONTRACT_SCHEMA_YML = versioned_no_contract_schema_yml
|
||||
|
||||
def test_changed_contract_versioned(self, project):
|
||||
@@ -643,12 +648,138 @@ class TestChangedContractVersioned(BaseModifiedState):
|
||||
with pytest.raises(ContractBreakingChangeError):
|
||||
results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"])
|
||||
|
||||
# Now disable the contract. Should raise an error.
|
||||
write_file(self.DISABLED_SCHEMA_YML, "models", "schema.yml")
|
||||
# Now unenforce the contract. Should raise an error.
|
||||
write_file(self.UNENFORCED_SCHEMA_YML, "models", "schema.yml")
|
||||
with pytest.raises(ContractBreakingChangeError):
|
||||
results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"])
|
||||
|
||||
|
||||
class TestDeleteUnversionedContractedModel(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model"
|
||||
CONTRACT_SCHEMA_YML = contract_schema_yml
|
||||
|
||||
def test_delete_unversioned_contracted_model(self, project):
|
||||
# ensure table_model is contracted
|
||||
write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
self.run_and_save_state()
|
||||
|
||||
# delete versioned contracted model
|
||||
rm_file(project.project_root, "models", "table_model.sql")
|
||||
|
||||
# since the models are unversioned, they raise a warning but not an error
|
||||
_, logs = run_dbt_and_capture(
|
||||
["run", "--models", "state:modified.contract", "--state", "./state"]
|
||||
)
|
||||
|
||||
expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model"
|
||||
expected_change = "Contracted model 'model.test.table_model' was deleted or renamed"
|
||||
assert expected_warning in logs
|
||||
assert expected_change in logs
|
||||
|
||||
|
||||
class TestDeleteVersionedContractedModel(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model.v1"
|
||||
CONTRACT_SCHEMA_YML = versioned_contract_schema_yml
|
||||
|
||||
def test_delete_versioned_contracted_model(self, project):
|
||||
# ensure table_model is versioned + contracted
|
||||
write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
self.run_and_save_state()
|
||||
|
||||
# delete versioned contracted model
|
||||
rm_file(project.project_root, "models", "table_model.sql")
|
||||
|
||||
# since the models are versioned, they raise an error
|
||||
with pytest.raises(ContractBreakingChangeError) as e:
|
||||
run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"])
|
||||
|
||||
assert "Contracted model 'model.test.table_model.v1' was deleted or renamed." in str(
|
||||
e.value
|
||||
)
|
||||
|
||||
|
||||
class TestDisableUnversionedContractedModel(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model"
|
||||
CONTRACT_SCHEMA_YML = contract_schema_yml
|
||||
DISABLED_CONTRACT_SCHEMA_YML = disabled_contract_schema_yml
|
||||
|
||||
def test_disable_unversioned_contracted_model(self, project):
|
||||
# ensure table_model is contracted and enabled
|
||||
write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
self.run_and_save_state()
|
||||
|
||||
# disable unversioned + contracted model
|
||||
write_file(self.DISABLED_CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
|
||||
# since the models are unversioned, they raise a warning but not an error
|
||||
_, logs = run_dbt_and_capture(
|
||||
["run", "--models", "state:modified.contract", "--state", "./state"]
|
||||
)
|
||||
|
||||
expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model"
|
||||
expected_change = "Contracted model 'model.test.table_model' was disabled"
|
||||
assert expected_warning in logs
|
||||
assert expected_change in logs
|
||||
|
||||
|
||||
class TestDisableVersionedContractedModel(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model.v1"
|
||||
CONTRACT_SCHEMA_YML = versioned_contract_schema_yml
|
||||
DISABLED_CONTRACT_SCHEMA_YML = disabled_versioned_contract_schema_yml
|
||||
|
||||
def test_disable_versioned_contracted_model(self, project):
|
||||
# ensure table_model is versioned + contracted
|
||||
write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
self.run_and_save_state()
|
||||
|
||||
# disable versioned + contracted model
|
||||
write_file(self.DISABLED_CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
|
||||
# since the models are versioned, they raise an error
|
||||
with pytest.raises(ContractBreakingChangeError) as e:
|
||||
run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"])
|
||||
|
||||
assert "Contracted model 'model.test.table_model.v1' was disabled." in str(e.value)
|
||||
|
||||
|
||||
class TestDisableUnversionedUncontractedModel(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model"
|
||||
NO_CONTRACT_SCHEMA_YML = unenforced_contract_schema_yml
|
||||
DISABLED_NO_CONTRACT_SCHEMA_YML = disabled_unenforced_contract_schema_yml
|
||||
|
||||
def test_delete_versioned_contracted_model(self, project):
|
||||
# ensure table_model is not contracted
|
||||
write_file(self.NO_CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
self.run_and_save_state()
|
||||
|
||||
# disable uncontracted model
|
||||
write_file(self.DISABLED_NO_CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
|
||||
# since the models are unversioned, no warning or error is raised
|
||||
_, logs = run_dbt_and_capture(
|
||||
["run", "--models", "state:modified.contract", "--state", "./state"]
|
||||
)
|
||||
|
||||
assert "breaking change" not in logs.lower()
|
||||
|
||||
|
||||
class TestDisableVersionedUncontractedModel(BaseModifiedState):
|
||||
MODEL_UNIQUE_ID = "model.test.table_model.v1"
|
||||
NO_CONTRACT_SCHEMA_YML = versioned_unenforced_contract_schema_yml
|
||||
DISABLED_NO_CONTRACT_SCHEMA_YML = disabled_versioned_unenforced_contract_schema_yml
|
||||
|
||||
def test_delete_versioned_contracted_model(self, project):
|
||||
# ensure table_model is not contracted
|
||||
write_file(self.NO_CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
self.run_and_save_state()
|
||||
|
||||
# disable uncontracted model
|
||||
write_file(self.DISABLED_NO_CONTRACT_SCHEMA_YML, "models", "schema.yml")
|
||||
|
||||
# since the models are unversioned, no warning or error is raised
|
||||
run_dbt_and_capture(["run", "--models", "state:modified.contract", "--state", "./state"])
|
||||
|
||||
|
||||
class TestChangedConstraintUnversioned(BaseModifiedState):
|
||||
def test_changed_constraint(self, project):
|
||||
self.run_and_save_state()
|
||||
|
||||
@@ -39,7 +39,7 @@ class TestDepsOptions(object):
|
||||
- package: fivetran/fivetran_utils
|
||||
version: 0.4.7
|
||||
- package: dbt-labs/dbt_utils
|
||||
version: 1.1.1
|
||||
version: 1.2.0
|
||||
sha1_hash: 71304bca2138cf8004070b3573a1e17183c0c1a8
|
||||
"""
|
||||
)
|
||||
@@ -56,7 +56,7 @@ sha1_hash: 71304bca2138cf8004070b3573a1e17183c0c1a8
|
||||
- package: fivetran/fivetran_utils
|
||||
version: 0.4.7
|
||||
- package: dbt-labs/dbt_utils
|
||||
version: 1.1.1
|
||||
version: 1.2.0
|
||||
sha1_hash: 71304bca2138cf8004070b3573a1e17183c0c1a8
|
||||
"""
|
||||
)
|
||||
|
||||
@@ -11,7 +11,13 @@ import dbt.config
|
||||
import dbt.exceptions
|
||||
import dbt_common.exceptions
|
||||
import dbt_common.semver as semver
|
||||
from dbt.tests.util import check_relations_equal, run_dbt, run_dbt_and_capture
|
||||
from dbt import deprecations
|
||||
from dbt.tests.util import (
|
||||
check_relations_equal,
|
||||
run_dbt,
|
||||
run_dbt_and_capture,
|
||||
write_file,
|
||||
)
|
||||
from tests.functional.utils import up_one
|
||||
|
||||
# todo: make self.unique_schema to fixture
|
||||
@@ -353,3 +359,35 @@ class TestSimpleDependencyDuplicateName(BaseDependencyTest):
|
||||
|
||||
# needed to avoid compilation errors from duplicate package names in test autocleanup
|
||||
run_dbt(["clean"])
|
||||
|
||||
|
||||
source_with_tests = """
|
||||
sources:
|
||||
- name: my_source
|
||||
schema: invalid_schema
|
||||
tables:
|
||||
- name: my_table
|
||||
- name: seed_source
|
||||
schema: "{{ var('schema_override', target.schema) }}"
|
||||
tables:
|
||||
- name: "seed"
|
||||
identifier: "seed_subpackage_generate_alias_name"
|
||||
columns:
|
||||
- name: id
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
"""
|
||||
|
||||
|
||||
class TestDependencyTestsConfig(BaseDependencyTest):
|
||||
def test_dependency_tests_config(self, project):
|
||||
run_dbt(["deps"])
|
||||
# Write a file to local_dependency with a "tests" config
|
||||
write_file(
|
||||
source_with_tests, project.project_root, "local_dependency", "models", "schema.yml"
|
||||
)
|
||||
run_dbt(["parse"])
|
||||
# Check that project-test-config is NOT in active deprecations, since "tests" is only
|
||||
# in a dependent project.
|
||||
assert "project-test-config" not in deprecations.active_deprecations
|
||||
|
||||
0
tests/functional/fixtures/__init__.py
Normal file
0
tests/functional/fixtures/__init__.py
Normal file
32
tests/functional/fixtures/happy_path_fixture.py
Normal file
32
tests/functional/fixtures/happy_path_fixture.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
from distutils.dir_util import copy_tree
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def delete_files_in_directory(directory_path):
|
||||
try:
|
||||
with os.scandir(directory_path) as entries:
|
||||
for entry in entries:
|
||||
if entry.is_file():
|
||||
os.unlink(entry.path)
|
||||
print("All files deleted successfully.")
|
||||
except OSError:
|
||||
print("Error occurred while deleting files.")
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def happy_path_project_files(project_root):
|
||||
# copy fixture files to the project root
|
||||
delete_files_in_directory(project_root)
|
||||
copy_tree(
|
||||
os.path.dirname(os.path.realpath(__file__)) + "/happy_path_project", str(project_root)
|
||||
)
|
||||
|
||||
|
||||
# We do project_setup first because it will write out a dbt_project.yml.
|
||||
# This file will be overwritten by the files in happy_path_project later on.
|
||||
@pytest.fixture(scope="class")
|
||||
def happy_path_project(project_setup, happy_path_project_files):
|
||||
# A fixture that gives functional test the project living in happy_path_project
|
||||
return project_setup
|
||||
@@ -0,0 +1 @@
|
||||
select 4 as id
|
||||
17
tests/functional/fixtures/happy_path_project/dbt_project.yml
Normal file
17
tests/functional/fixtures/happy_path_project/dbt_project.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
analysis-paths:
|
||||
- analyses
|
||||
config-version: 2
|
||||
flags:
|
||||
send_anonymous_usage_stats: false
|
||||
macro-paths:
|
||||
- macros
|
||||
name: test
|
||||
profile: test
|
||||
seed-paths:
|
||||
- seeds
|
||||
seeds:
|
||||
quote_columns: false
|
||||
snapshot-paths:
|
||||
- snapshots
|
||||
test-paths:
|
||||
- tests
|
||||
@@ -0,0 +1,7 @@
|
||||
{% macro cool_macro() %}
|
||||
wow!
|
||||
{% endmacro %}
|
||||
|
||||
{% macro other_cool_macro(a, b) %}
|
||||
cool!
|
||||
{% endmacro %}
|
||||
@@ -0,0 +1,3 @@
|
||||
{% docs my_docs %}
|
||||
some docs
|
||||
{% enddocs %}
|
||||
@@ -0,0 +1,5 @@
|
||||
{{ config(materialized='ephemeral') }}
|
||||
|
||||
select
|
||||
1 as id,
|
||||
{{ dbt.date_trunc('day', dbt.current_timestamp()) }} as created_at
|
||||
@@ -0,0 +1,12 @@
|
||||
{{
|
||||
config(
|
||||
materialized = "incremental",
|
||||
incremental_strategy = "delete+insert",
|
||||
)
|
||||
}}
|
||||
|
||||
select * from {{ ref('seed') }}
|
||||
|
||||
{% if is_incremental() %}
|
||||
where a > (select max(a) from {{this}})
|
||||
{% endif %}
|
||||
@@ -0,0 +1,7 @@
|
||||
metrics:
|
||||
- name: total_outer
|
||||
type: simple
|
||||
description: The total count of outer
|
||||
label: Total Outer
|
||||
type_params:
|
||||
measure: total_outer_count
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user