mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-17 19:31:34 +00:00
Compare commits
23 Commits
arieldbt/p
...
jerco/pyth
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
75782a88ae | ||
|
|
3562637984 | ||
|
|
17aca39e1c | ||
|
|
59744f18bb | ||
|
|
f1326f526c | ||
|
|
834ac716fd | ||
|
|
0487b96098 | ||
|
|
dbd36f06e4 | ||
|
|
38ada8a68e | ||
|
|
e58edaab2d | ||
|
|
c202e005cd | ||
|
|
8129862b3c | ||
|
|
4e8aa007cf | ||
|
|
fe88bfabbf | ||
|
|
5328a64df2 | ||
|
|
87c9974be1 | ||
|
|
f3f509da92 | ||
|
|
5e8dcec2c5 | ||
|
|
56783446db | ||
|
|
207cc0383d | ||
|
|
49ecd6a6a4 | ||
|
|
c109f39d82 | ||
|
|
fd778dceb5 |
7
.changes/unreleased/Docs-20220920-152040.yaml
Normal file
7
.changes/unreleased/Docs-20220920-152040.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Docs
|
||||
body: Refer to exposures by their label by default.
|
||||
time: 2022-09-20T15:20:40.652948-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "306"
|
||||
PR: "307"
|
||||
7
.changes/unreleased/Features-20220716-142116.yaml
Normal file
7
.changes/unreleased/Features-20220716-142116.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: merge_exclude_columns for incremental materialization
|
||||
time: 2022-07-16T14:21:16.592519-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "5260"
|
||||
PR: "5457"
|
||||
7
.changes/unreleased/Features-20220912-222227.yaml
Normal file
7
.changes/unreleased/Features-20220912-222227.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Array macros
|
||||
time: 2022-09-12T22:22:27.475515-06:00
|
||||
custom:
|
||||
Author: graciegoheen dbeatty10
|
||||
Issue: "5520"
|
||||
PR: "5823"
|
||||
7
.changes/unreleased/Features-20220919-112903.yaml
Normal file
7
.changes/unreleased/Features-20220919-112903.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: add -fr flag shorthand
|
||||
time: 2022-09-19T11:29:03.774678-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "5878"
|
||||
PR: "5879"
|
||||
7
.changes/unreleased/Features-20220921-145222.yaml
Normal file
7
.changes/unreleased/Features-20220921-145222.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Support .dbtignore in project root to ignore certain files being read by dbt
|
||||
time: 2022-09-21T14:52:22.131627-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "5733"
|
||||
PR: "5897"
|
||||
9
.changes/unreleased/Features-20220926-130627.yaml
Normal file
9
.changes/unreleased/Features-20220926-130627.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
kind: Features
|
||||
body: This conditionally no-ops warehouse connection at compile depending on an env
|
||||
var, disabling introspection/queries during compilation only. This is a temporary
|
||||
solution to more complex permissions requirements for the semantic layer.
|
||||
time: 2022-09-26T13:06:27.591061-05:00
|
||||
custom:
|
||||
Author: racheldaniel
|
||||
Issue: "5936"
|
||||
PR: "5926"
|
||||
7
.changes/unreleased/Fixes-20220822-194238.yaml
Normal file
7
.changes/unreleased/Fixes-20220822-194238.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Fix typos of comments in core/dbt/adapters/
|
||||
time: 2022-08-22T19:42:38.593923+09:00
|
||||
custom:
|
||||
Author: yoiki
|
||||
Issue: "5690"
|
||||
PR: "5693"
|
||||
7
.changes/unreleased/Fixes-20220922-083926.yaml
Normal file
7
.changes/unreleased/Fixes-20220922-083926.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: shorthand for full refresh should be one character
|
||||
time: 2022-09-22T08:39:26.948671-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "5878"
|
||||
PR: "5908"
|
||||
7
.changes/unreleased/Fixes-20220923-143226.yaml
Normal file
7
.changes/unreleased/Fixes-20220923-143226.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Fix macro resolution order during static analysis for custom generic tests
|
||||
time: 2022-09-23T14:32:26.857376+02:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
Issue: "5720"
|
||||
PR: "5907"
|
||||
7
.changes/unreleased/Fixes-20220923-174504.yaml
Normal file
7
.changes/unreleased/Fixes-20220923-174504.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Fix race condition when invoking dbt via lib.py concurrently
|
||||
time: 2022-09-23T17:45:04.405026-04:00
|
||||
custom:
|
||||
Author: drewbanin
|
||||
Issue: "5919"
|
||||
PR: "5921"
|
||||
7
.changes/unreleased/Under the Hood-20220916-154712.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220916-154712.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Adding validation for metric expression attribute
|
||||
time: 2022-09-16T15:47:12.799002-05:00
|
||||
custom:
|
||||
Author: callum-mcdata
|
||||
Issue: "5871"
|
||||
PR: "5873"
|
||||
7
.changes/unreleased/Under the Hood-20220920-144842.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220920-144842.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Profiling and Adapter Management work with Click CLI
|
||||
time: 2022-09-20T14:48:42.070256-05:00
|
||||
custom:
|
||||
Author: iknox-fa
|
||||
Issue: "5531"
|
||||
PR: "5892"
|
||||
7
.changes/unreleased/Under the Hood-20220923-133525.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220923-133525.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Reparse references to deleted metric
|
||||
time: 2022-09-23T13:35:25.681656-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "5444"
|
||||
PR: "5920"
|
||||
@@ -44,7 +44,7 @@ custom:
|
||||
footerFormat: |
|
||||
{{- $contributorDict := dict }}
|
||||
{{- /* any names added to this list should be all lowercase for later matching purposes */}}
|
||||
{{- $core_team := list "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" }}
|
||||
{{- $core_team := list "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $authorList := splitList " " $change.Custom.Author }}
|
||||
{{- /* loop through all authors for a PR */}}
|
||||
|
||||
3
.github/workflows/jira-transition.yml
vendored
3
.github/workflows/jira-transition.yml
vendored
@@ -15,6 +15,9 @@ on:
|
||||
issues:
|
||||
types: [closed, deleted, reopened]
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-transition.yml@main
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -20,6 +20,9 @@ on:
|
||||
description: 'The release version number (i.e. 1.0.0b1)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
3
.github/workflows/schema-check.yml
vendored
3
.github/workflows/schema-check.yml
vendored
@@ -21,6 +21,9 @@ on:
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
|
||||
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
|
||||
|
||||
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@@ -3,6 +3,10 @@ on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/version-bump.yml
vendored
4
.github/workflows/version-bump.yml
vendored
@@ -20,6 +20,10 @@ on:
|
||||
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -384,7 +384,7 @@ class RelationsCache:
|
||||
relation = self.relations.pop(old_key)
|
||||
new_key = new_relation.key()
|
||||
|
||||
# relaton has to rename its innards, so it needs the _CachedRelation.
|
||||
# relation has to rename its innards, so it needs the _CachedRelation.
|
||||
relation.rename(new_relation)
|
||||
# update all the relations that refer to it
|
||||
for cached in self.relations.values():
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from typing import Type, Dict, Any, List, Optional, Set
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Type
|
||||
|
||||
from dbt.exceptions import RuntimeException, InternalException
|
||||
from dbt.include.global_project import (
|
||||
PACKAGE_PATH as GLOBAL_PROJECT_PATH,
|
||||
PROJECT_NAME as GLOBAL_PROJECT_NAME,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import AdapterImportError, PluginLoadError
|
||||
from dbt.contracts.connection import Credentials, AdapterRequiredConfig
|
||||
from dbt.adapters.protocol import (
|
||||
AdapterProtocol,
|
||||
AdapterConfig,
|
||||
RelationProtocol,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
|
||||
from dbt.exceptions import InternalException, RuntimeException
|
||||
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
|
||||
Adapter = AdapterProtocol
|
||||
|
||||
@@ -217,3 +211,12 @@ def get_adapter_package_names(name: Optional[str]) -> List[str]:
|
||||
|
||||
def get_adapter_type_names(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_type_names(name)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def adapter_management():
|
||||
reset_adapters()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cleanup_connections()
|
||||
|
||||
@@ -88,7 +88,7 @@ class AdapterProtocol( # type: ignore[misc]
|
||||
],
|
||||
):
|
||||
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
|
||||
# ClassVar due to the restirctiveness of PEP-526
|
||||
# ClassVar due to the restrictiveness of PEP-526
|
||||
# See: https://github.com/python/mypy/issues/5144
|
||||
AdapterSpecificConfigs: Type[AdapterConfig_T]
|
||||
Column: Type[Column_T]
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import inspect # This is temporary for RAT-ing
|
||||
import sys
|
||||
from copy import copy
|
||||
from pprint import pformat as pf # This is temporary for RAT-ing
|
||||
|
||||
import click
|
||||
from dbt.adapters.factory import adapter_management
|
||||
from dbt.cli import params as p
|
||||
from dbt.cli.flags import Flags
|
||||
from dbt.profiler import profiler
|
||||
|
||||
|
||||
def cli_runner():
|
||||
@@ -51,9 +52,19 @@ def cli(ctx, **kwargs):
|
||||
"""An ELT tool for managing your SQL transformations and data models.
|
||||
For more documentation on these commands, visit: docs.getdbt.com
|
||||
"""
|
||||
if kwargs.get("version", False):
|
||||
incomplete_flags = Flags()
|
||||
|
||||
# Profiling
|
||||
if incomplete_flags.RECORD_TIMING_INFO:
|
||||
ctx.with_resource(profiler(enable=True, outfile=incomplete_flags.RECORD_TIMING_INFO))
|
||||
|
||||
# Adapter management
|
||||
ctx.with_resource(adapter_management())
|
||||
|
||||
# Version info
|
||||
if incomplete_flags.VERSION:
|
||||
click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}")
|
||||
sys.exit()
|
||||
return
|
||||
else:
|
||||
del ctx.params["version"]
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ from pathlib import Path, PurePath
|
||||
|
||||
import click
|
||||
from dbt.cli.option_types import YAML
|
||||
from dbt.cli.resolvers import default_project_dir, default_profiles_dir
|
||||
|
||||
|
||||
# TODO: The name (reflected in flags) is a correction!
|
||||
# The original name was `SEND_ANONYMOUS_USAGE_STATS` and used an env var called "DBT_SEND_ANONYMOUS_USAGE_STATS"
|
||||
@@ -97,6 +99,7 @@ fail_fast = click.option(
|
||||
|
||||
full_refresh = click.option(
|
||||
"--full-refresh",
|
||||
"-f",
|
||||
envvar="DBT_FULL_REFRESH",
|
||||
help="If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.",
|
||||
is_flag=True,
|
||||
@@ -218,16 +221,16 @@ profiles_dir = click.option(
|
||||
"--profiles-dir",
|
||||
envvar="DBT_PROFILES_DIR",
|
||||
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
|
||||
default=None,
|
||||
type=click.Path(),
|
||||
default=default_profiles_dir(),
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
project_dir = click.option(
|
||||
"--project-dir",
|
||||
envvar=None,
|
||||
help="Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.",
|
||||
default=None,
|
||||
type=click.Path(),
|
||||
default=default_project_dir(),
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
quiet = click.option(
|
||||
@@ -241,7 +244,7 @@ record_timing_info = click.option(
|
||||
"-r",
|
||||
envvar=None,
|
||||
help="When this option is passed, dbt will output low-level timing stats to the specified file. Example: `--record-timing-info output.profile`",
|
||||
is_flag=True,
|
||||
type=click.Path(exists=False),
|
||||
)
|
||||
|
||||
resource_type = click.option(
|
||||
|
||||
11
core/dbt/cli/resolvers.py
Normal file
11
core/dbt/cli/resolvers.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def default_project_dir():
|
||||
paths = list(Path.cwd().parents)
|
||||
paths.insert(0, Path.cwd())
|
||||
return next((x for x in paths if (x / "dbt_project.yml").exists()), Path.cwd())
|
||||
|
||||
|
||||
def default_profiles_dir():
|
||||
return Path.cwd() if (Path.cwd() / "profiles.yml").exists() else Path.home() / ".dbt"
|
||||
@@ -12,6 +12,7 @@ import tarfile
|
||||
import requests
|
||||
import stat
|
||||
from typing import Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union
|
||||
from pathspec import PathSpec # type: ignore
|
||||
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
@@ -36,6 +37,7 @@ def find_matching(
|
||||
root_path: str,
|
||||
relative_paths_to_search: List[str],
|
||||
file_pattern: str,
|
||||
ignore_spec: Optional[PathSpec] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Given an absolute `root_path`, a list of relative paths to that
|
||||
@@ -57,19 +59,30 @@ def find_matching(
|
||||
reobj = re.compile(regex, re.IGNORECASE)
|
||||
|
||||
for relative_path_to_search in relative_paths_to_search:
|
||||
# potential speedup for ignore_spec
|
||||
# if ignore_spec.matches(relative_path_to_search):
|
||||
# continue
|
||||
absolute_path_to_search = os.path.join(root_path, relative_path_to_search)
|
||||
walk_results = os.walk(absolute_path_to_search)
|
||||
|
||||
for current_path, subdirectories, local_files in walk_results:
|
||||
# potential speedup for ignore_spec
|
||||
# relative_dir = os.path.relpath(current_path, root_path) + os.sep
|
||||
# if ignore_spec.match(relative_dir):
|
||||
# continue
|
||||
for local_file in local_files:
|
||||
absolute_path = os.path.join(current_path, local_file)
|
||||
relative_path = os.path.relpath(absolute_path, absolute_path_to_search)
|
||||
relative_path_to_root = os.path.join(relative_path_to_search, relative_path)
|
||||
|
||||
modification_time = 0.0
|
||||
try:
|
||||
modification_time = os.path.getmtime(absolute_path)
|
||||
except OSError:
|
||||
fire_event(SystemErrorRetrievingModTime(path=absolute_path))
|
||||
if reobj.match(local_file):
|
||||
if reobj.match(local_file) and (
|
||||
not ignore_spec or not ignore_spec.match_file(relative_path_to_root)
|
||||
):
|
||||
matching.append(
|
||||
{
|
||||
"searched_path": relative_path_to_search,
|
||||
|
||||
@@ -109,9 +109,15 @@ class MacroResolver:
|
||||
|
||||
def get_macro(self, local_package, macro_name):
|
||||
local_package_macros = {}
|
||||
# If the macro is explicitly prefixed with an internal namespace
|
||||
# (e.g. 'dbt.some_macro'), look there first
|
||||
if local_package in self.internal_package_names:
|
||||
local_package_macros = self.internal_packages[local_package]
|
||||
# If the macro is explicitly prefixed with a different package name
|
||||
# (e.g. 'dbt_utils.some_macro'), look there first
|
||||
if local_package not in self.internal_package_names and local_package in self.packages:
|
||||
local_package_macros = self.packages[local_package]
|
||||
# First: search the local packages for this macro
|
||||
# First: search the specified package for this macro
|
||||
if macro_name in local_package_macros:
|
||||
return local_package_macros[macro_name]
|
||||
# Now look up in the standard search order
|
||||
|
||||
@@ -277,11 +277,13 @@ class SchemaSourceFile(BaseSourceFile):
|
||||
self.tests[key][name] = []
|
||||
self.tests[key][name].append(node_unique_id)
|
||||
|
||||
# this is only used in unit tests
|
||||
def remove_tests(self, yaml_key, name):
|
||||
if yaml_key in self.tests:
|
||||
if name in self.tests[yaml_key]:
|
||||
del self.tests[yaml_key][name]
|
||||
|
||||
# this is only used in tests (unit + functional)
|
||||
def get_tests(self, yaml_key, name):
|
||||
if yaml_key in self.tests:
|
||||
if name in self.tests[yaml_key]:
|
||||
|
||||
@@ -33,6 +33,7 @@ from dbt.contracts.graph.parsed import (
|
||||
ParsedMacro,
|
||||
ParsedDocumentation,
|
||||
ParsedSourceDefinition,
|
||||
ParsedGenericTestNode,
|
||||
ParsedExposure,
|
||||
ParsedMetric,
|
||||
HasUniqueID,
|
||||
@@ -1112,8 +1113,13 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None):
|
||||
self.add_disabled_nofile(node)
|
||||
if isinstance(source_file, SchemaSourceFile):
|
||||
assert test_from
|
||||
source_file.add_test(node.unique_id, test_from)
|
||||
if isinstance(node, ParsedGenericTestNode):
|
||||
assert test_from
|
||||
source_file.add_test(node.unique_id, test_from)
|
||||
if isinstance(node, ParsedMetric):
|
||||
source_file.metrics.append(node.unique_id)
|
||||
if isinstance(node, ParsedExposure):
|
||||
source_file.exposures.append(node.unique_id)
|
||||
else:
|
||||
source_file.nodes.append(node.unique_id)
|
||||
|
||||
|
||||
@@ -824,8 +824,8 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
description: str
|
||||
label: str
|
||||
calculation_method: str
|
||||
expression: str
|
||||
timestamp: str
|
||||
expression: str
|
||||
filters: List[MetricFilter]
|
||||
time_grains: List[str]
|
||||
dimensions: List[str]
|
||||
|
||||
@@ -487,8 +487,8 @@ class UnparsedMetric(dbtClassMixin, Replaceable):
|
||||
label: str
|
||||
calculation_method: str
|
||||
timestamp: str
|
||||
expression: str
|
||||
description: str = ""
|
||||
expression: Union[str, int] = ""
|
||||
time_grains: List[str] = field(default_factory=list)
|
||||
dimensions: List[str] = field(default_factory=list)
|
||||
window: Optional[MetricTime] = None
|
||||
|
||||
@@ -50,3 +50,31 @@
|
||||
{{ return(result) }}
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}
|
||||
{{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}
|
||||
{%- set default_cols = dest_columns | map(attribute="quoted") | list -%}
|
||||
|
||||
{%- if merge_update_columns and merge_exclude_columns -%}
|
||||
{{ exceptions.raise_compiler_error(
|
||||
'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'
|
||||
)}}
|
||||
{%- elif merge_update_columns -%}
|
||||
{%- set update_columns = merge_update_columns -%}
|
||||
{%- elif merge_exclude_columns -%}
|
||||
{%- set update_columns = [] -%}
|
||||
{%- for column in dest_columns -%}
|
||||
{% if column.column | lower not in merge_exclude_columns | map("lower") | list %}
|
||||
{%- do update_columns.append(column.quoted) -%}
|
||||
{% endif %}
|
||||
{%- endfor -%}
|
||||
{%- else -%}
|
||||
{%- set update_columns = default_cols -%}
|
||||
{%- endif -%}
|
||||
|
||||
{{ return(update_columns) }}
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}
|
||||
{%- set predicates = [] if predicates is none else [] + predicates -%}
|
||||
{%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%}
|
||||
{%- set update_columns = config.get('merge_update_columns', default = dest_columns | map(attribute="quoted") | list) -%}
|
||||
{%- set merge_update_columns = config.get('merge_update_columns') -%}
|
||||
{%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}
|
||||
{%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}
|
||||
{%- set sql_header = config.get('sql_header', none) -%}
|
||||
|
||||
{% if unique_key %}
|
||||
|
||||
@@ -1,3 +1,26 @@
|
||||
{% macro build_dbt_relation_obj(model) %}
|
||||
|
||||
class dbtRelation:
|
||||
"""
|
||||
dbt.ref('model_a').rel -> 'database.schema.model_a'
|
||||
str(dbt.ref('model_a')) -> same
|
||||
|
||||
dbt.ref('model_a').df -> DataFrame pointing to 'database.schema.model_a'
|
||||
dbt.ref('model_a')() -> same
|
||||
|
||||
Could we make this return .df for just dbt.ref('model_a'),
|
||||
with no add'l func call, or is that impossible with Python classes ???
|
||||
"""
|
||||
def __init__(self, relation_name, dbt_load_df_function):
|
||||
self.rel = relation_name
|
||||
self.df = dbt_load_df_function(relation_name)
|
||||
def __str__(self):
|
||||
return self.relation_name
|
||||
def __call__(self):
|
||||
return self.df
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
{% macro build_ref_function(model) %}
|
||||
|
||||
{%- set ref_dict = {} -%}
|
||||
@@ -6,10 +29,10 @@
|
||||
{%- do ref_dict.update({_ref | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}
|
||||
{%- endfor -%}
|
||||
|
||||
def ref(*args,dbt_load_df_function):
|
||||
def ref(*args, dbt_load_df_function):
|
||||
refs = {{ ref_dict | tojson }}
|
||||
key = ".".join(args)
|
||||
return dbt_load_df_function(refs[key])
|
||||
return dbtRelation(refs[key], dbt_load_df_function)
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
@@ -24,7 +47,7 @@ def ref(*args,dbt_load_df_function):
|
||||
def source(*args, dbt_load_df_function):
|
||||
sources = {{ source_dict | tojson }}
|
||||
key = ".".join(args)
|
||||
return dbt_load_df_function(sources[key])
|
||||
return dbtRelation(sources[key], dbt_load_df_function)
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
@@ -47,6 +70,7 @@ config_dict = {{ config_dict }}
|
||||
# COMMAND ----------
|
||||
# this part is dbt logic for get ref work, do not modify
|
||||
|
||||
{{ build_dbt_relation_obj(model ) }}
|
||||
{{ build_ref_function(model ) }}
|
||||
{{ build_source_function(model ) }}
|
||||
{{ build_config_dict(model) }}
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
{% macro array_append(array, new_element) -%}
|
||||
{{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{# new_element must be the same data type as elements in array to match postgres functionality #}
|
||||
{% macro default__array_append(array, new_element) -%}
|
||||
array_append({{ array }}, {{ new_element }})
|
||||
{%- endmacro %}
|
||||
@@ -0,0 +1,7 @@
|
||||
{% macro array_concat(array_1, array_2) -%}
|
||||
{{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__array_concat(array_1, array_2) -%}
|
||||
array_cat({{ array_1 }}, {{ array_2 }})
|
||||
{%- endmacro %}
|
||||
@@ -0,0 +1,12 @@
|
||||
{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}
|
||||
{{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{# all inputs must be the same data type to match postgres functionality #}
|
||||
{% macro default__array_construct(inputs, data_type) -%}
|
||||
{% if inputs|length > 0 %}
|
||||
array[ {{ inputs|join(' , ') }} ]
|
||||
{% else %}
|
||||
array[]::{{data_type}}[]
|
||||
{% endif %}
|
||||
{%- endmacro %}
|
||||
File diff suppressed because one or more lines are too long
118
core/dbt/lib.py
118
core/dbt/lib.py
@@ -1,10 +1,64 @@
|
||||
# TODO: this file is one big TODO
|
||||
import os
|
||||
from dbt.contracts.results import RunningStatus, collect_timing_info
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import NodeCompiling, NodeExecuting
|
||||
from dbt.exceptions import RuntimeException
|
||||
from dbt import flags
|
||||
from collections import namedtuple
|
||||
from dbt.task.sql import SqlCompileRunner
|
||||
from dataclasses import dataclass
|
||||
|
||||
RuntimeArgs = namedtuple("RuntimeArgs", "project_dir profiles_dir single_threaded profile target")
|
||||
|
||||
@dataclass
|
||||
class RuntimeArgs:
|
||||
project_dir: str
|
||||
profiles_dir: str
|
||||
single_threaded: bool
|
||||
profile: str
|
||||
target: str
|
||||
|
||||
|
||||
class SqlCompileRunnerNoIntrospection(SqlCompileRunner):
|
||||
def compile_and_execute(self, manifest, ctx):
|
||||
"""
|
||||
This version of this method does not connect to the data warehouse.
|
||||
As a result, introspective queries at compilation will not be supported
|
||||
and will throw an error.
|
||||
|
||||
TODO: This is a temporary solution to more complex permissions requirements
|
||||
for the semantic layer, and thus largely duplicates the code in the parent class
|
||||
method. Once conditional credential usage is enabled, this should be removed.
|
||||
"""
|
||||
result = None
|
||||
ctx.node._event_status["node_status"] = RunningStatus.Compiling
|
||||
fire_event(
|
||||
NodeCompiling(
|
||||
node_info=ctx.node.node_info,
|
||||
unique_id=ctx.node.unique_id,
|
||||
)
|
||||
)
|
||||
with collect_timing_info("compile") as timing_info:
|
||||
# if we fail here, we still have a compiled node to return
|
||||
# this has the benefit of showing a build path for the errant
|
||||
# model
|
||||
ctx.node = self.compile(manifest)
|
||||
ctx.timing.append(timing_info)
|
||||
|
||||
# for ephemeral nodes, we only want to compile, not run
|
||||
if not ctx.node.is_ephemeral_model:
|
||||
ctx.node._event_status["node_status"] = RunningStatus.Executing
|
||||
fire_event(
|
||||
NodeExecuting(
|
||||
node_info=ctx.node.node_info,
|
||||
unique_id=ctx.node.unique_id,
|
||||
)
|
||||
)
|
||||
with collect_timing_info("execute") as timing_info:
|
||||
result = self.run(ctx.node, manifest)
|
||||
ctx.node = result.node
|
||||
|
||||
ctx.timing.append(timing_info)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_dbt_config(project_dir, args=None, single_threaded=False):
|
||||
@@ -17,27 +71,30 @@ def get_dbt_config(project_dir, args=None, single_threaded=False):
|
||||
else:
|
||||
profiles_dir = flags.DEFAULT_PROFILES_DIR
|
||||
|
||||
profile = args.profile if hasattr(args, "profile") else None
|
||||
target = args.target if hasattr(args, "target") else None
|
||||
|
||||
# Construct a phony config
|
||||
config = RuntimeConfig.from_args(
|
||||
RuntimeArgs(project_dir, profiles_dir, single_threaded, profile, target)
|
||||
runtime_args = RuntimeArgs(
|
||||
project_dir=project_dir,
|
||||
profiles_dir=profiles_dir,
|
||||
single_threaded=single_threaded,
|
||||
profile=getattr(args, "profile", None),
|
||||
target=getattr(args, "target", None),
|
||||
)
|
||||
# Clear previously registered adapters--
|
||||
# this fixes cacheing behavior on the dbt-server
|
||||
|
||||
# Construct a RuntimeConfig from phony args
|
||||
config = RuntimeConfig.from_args(runtime_args)
|
||||
|
||||
# Set global flags from arguments
|
||||
flags.set_from_args(args, config)
|
||||
dbt.adapters.factory.reset_adapters()
|
||||
# Load the relevant adapter
|
||||
|
||||
# This is idempotent, so we can call it repeatedly
|
||||
dbt.adapters.factory.register_adapter(config)
|
||||
# Set invocation id
|
||||
|
||||
# Make sure we have a valid invocation_id
|
||||
dbt.events.functions.set_invocation_id()
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_task_by_type(type):
|
||||
# TODO: we need to tell dbt-server what tasks are available
|
||||
from dbt.task.run import RunTask
|
||||
from dbt.task.list import ListTask
|
||||
from dbt.task.seed import SeedTask
|
||||
@@ -70,16 +127,13 @@ def create_task(type, args, manifest, config):
|
||||
def no_op(*args, **kwargs):
|
||||
pass
|
||||
|
||||
# TODO: yuck, let's rethink tasks a little
|
||||
task = task(args, config)
|
||||
|
||||
# Wow! We can monkeypatch taskCls.load_manifest to return _our_ manifest
|
||||
task.load_manifest = no_op
|
||||
task.manifest = manifest
|
||||
return task
|
||||
|
||||
|
||||
def _get_operation_node(manifest, project_path, sql):
|
||||
def _get_operation_node(manifest, project_path, sql, node_name):
|
||||
from dbt.parser.manifest import process_node
|
||||
from dbt.parser.sql import SqlBlockParser
|
||||
import dbt.adapters.factory
|
||||
@@ -92,26 +146,33 @@ def _get_operation_node(manifest, project_path, sql):
|
||||
)
|
||||
|
||||
adapter = dbt.adapters.factory.get_adapter(config)
|
||||
# TODO : This needs a real name?
|
||||
sql_node = block_parser.parse_remote(sql, "name")
|
||||
sql_node = block_parser.parse_remote(sql, node_name)
|
||||
process_node(config, manifest, sql_node)
|
||||
return config, sql_node, adapter
|
||||
|
||||
|
||||
def compile_sql(manifest, project_path, sql):
|
||||
from dbt.task.sql import SqlCompileRunner
|
||||
def compile_sql(manifest, project_path, sql, node_name="query"):
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql, node_name)
|
||||
allow_introspection = str(os.environ.get("__DBT_ALLOW_INTROSPECTION", "1")).lower() in (
|
||||
"true",
|
||||
"1",
|
||||
"on",
|
||||
)
|
||||
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql)
|
||||
runner = SqlCompileRunner(config, adapter, node, 1, 1)
|
||||
if allow_introspection:
|
||||
runner = SqlCompileRunner(config, adapter, node, 1, 1)
|
||||
else:
|
||||
runner = SqlCompileRunnerNoIntrospection(config, adapter, node, 1, 1)
|
||||
return runner.safe_run(manifest)
|
||||
|
||||
|
||||
def execute_sql(manifest, project_path, sql):
|
||||
def execute_sql(manifest, project_path, sql, node_name="query"):
|
||||
from dbt.task.sql import SqlExecuteRunner
|
||||
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql)
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql, node_name)
|
||||
|
||||
runner = SqlExecuteRunner(config, adapter, node, 1, 1)
|
||||
# TODO: use same interface for runner
|
||||
|
||||
return runner.safe_run(manifest)
|
||||
|
||||
|
||||
@@ -128,5 +189,4 @@ def deserialize_manifest(manifest_msgpack):
|
||||
|
||||
|
||||
def serialize_manifest(manifest):
|
||||
# TODO: what should this take as an arg?
|
||||
return manifest.to_msgpack()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from typing import List
|
||||
|
||||
from dbt.logger import log_cache_events, log_manager
|
||||
|
||||
import argparse
|
||||
@@ -43,7 +44,12 @@ import dbt.tracking
|
||||
|
||||
from dbt.utils import ExitCodes, args_to_dict
|
||||
from dbt.config.profile import read_user_config
|
||||
from dbt.exceptions import InternalException, NotImplementedException, FailedToConnectException
|
||||
from dbt.exceptions import (
|
||||
Exception as dbtException,
|
||||
InternalException,
|
||||
NotImplementedException,
|
||||
FailedToConnectException,
|
||||
)
|
||||
|
||||
|
||||
class DBTVersion(argparse.Action):
|
||||
@@ -143,7 +149,8 @@ def main(args=None):
|
||||
|
||||
except BaseException as e:
|
||||
fire_event(MainEncounteredError(exc=str(e)))
|
||||
fire_event(MainStackTrace(stack_trace=traceback.format_exc()))
|
||||
if not isinstance(e, dbtException):
|
||||
fire_event(MainStackTrace(stack_trace=traceback.format_exc()))
|
||||
exit_code = ExitCodes.UnhandledError.value
|
||||
|
||||
sys.exit(exit_code)
|
||||
@@ -618,6 +625,7 @@ def _add_table_mutability_arguments(*subparsers):
|
||||
for sub in subparsers:
|
||||
sub.add_argument(
|
||||
"--full-refresh",
|
||||
"-f",
|
||||
action="store_true",
|
||||
help="""
|
||||
If specified, dbt will drop incremental models and
|
||||
|
||||
@@ -20,9 +20,7 @@ class MacroParser(BaseParser[ParsedMacro]):
|
||||
# from the normal parsing flow.
|
||||
def get_paths(self) -> List[FilePath]:
|
||||
return filesystem_search(
|
||||
project=self.project,
|
||||
relative_dirs=self.project.macro_paths,
|
||||
extension=".sql",
|
||||
project=self.project, relative_dirs=self.project.macro_paths, extension=".sql"
|
||||
)
|
||||
|
||||
@property
|
||||
|
||||
@@ -86,11 +86,12 @@ class PythonParseVisitor(ast.NodeVisitor):
|
||||
def _safe_eval(self, node):
|
||||
try:
|
||||
return ast.literal_eval(node)
|
||||
except (SyntaxError, ValueError, TypeError) as exc:
|
||||
msg = validator_error_message(exc)
|
||||
raise ParsingException(msg, node=self.dbt_node) from exc
|
||||
except (MemoryError, RecursionError) as exc:
|
||||
msg = validator_error_message(exc)
|
||||
except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc:
|
||||
msg = validator_error_message(
|
||||
f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n"
|
||||
"https://docs.python.org/3/library/ast.html#ast.literal_eval\n"
|
||||
"In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures"
|
||||
)
|
||||
raise ParsingException(msg, node=self.dbt_node) from exc
|
||||
|
||||
def _get_call_literals(self, node):
|
||||
|
||||
@@ -245,6 +245,22 @@ class PartialParsing:
|
||||
if "overrides" in source:
|
||||
self.remove_source_override_target(source)
|
||||
|
||||
def delete_disabled(self, unique_id, file_id):
|
||||
# This node/metric/exposure is disabled. Find it and remove it from disabled dictionary.
|
||||
for dis_index, dis_node in enumerate(self.saved_manifest.disabled[unique_id]):
|
||||
if dis_node.file_id == file_id:
|
||||
node = dis_node
|
||||
index = dis_index
|
||||
break
|
||||
# Remove node from disabled
|
||||
del self.saved_manifest.disabled[unique_id][index]
|
||||
# if all nodes were removed for the unique id, delete the unique_id
|
||||
# from the disabled dict
|
||||
if not self.saved_manifest.disabled[unique_id]:
|
||||
self.saved_manifest.disabled.pop(unique_id)
|
||||
|
||||
return node
|
||||
|
||||
# Deletes for all non-schema files
|
||||
def delete_from_saved(self, file_id):
|
||||
# Look at all things touched by file, remove those
|
||||
@@ -319,15 +335,7 @@ class PartialParsing:
|
||||
and unique_id in self.saved_manifest.disabled
|
||||
):
|
||||
# This node is disabled. Find the node and remove it from disabled dictionary.
|
||||
for dis_index, dis_node in enumerate(self.saved_manifest.disabled[unique_id]):
|
||||
if dis_node.file_id == source_file.file_id:
|
||||
node = dis_node
|
||||
break
|
||||
if dis_node:
|
||||
# Remove node from disabled and unique_id from disabled dict if necessary
|
||||
del self.saved_manifest.disabled[unique_id][dis_index]
|
||||
if not self.saved_manifest.disabled[unique_id]:
|
||||
self.saved_manifest.disabled.pop(unique_id)
|
||||
node = self.delete_disabled(unique_id, source_file.file_id)
|
||||
else:
|
||||
# Has already been deleted by another action
|
||||
return
|
||||
@@ -885,34 +893,40 @@ class PartialParsing:
|
||||
self.add_to_pp_files(self.saved_files[macro_file_id])
|
||||
|
||||
# exposures are created only from schema files, so just delete
|
||||
# the exposure.
|
||||
# the exposure or the disabled exposure.
|
||||
def delete_schema_exposure(self, schema_file, exposure_dict):
|
||||
exposure_name = exposure_dict["name"]
|
||||
exposures = schema_file.exposures.copy()
|
||||
for unique_id in exposures:
|
||||
exposure = self.saved_manifest.exposures[unique_id]
|
||||
if unique_id in self.saved_manifest.exposures:
|
||||
exposure = self.saved_manifest.exposures[unique_id]
|
||||
if exposure.name == exposure_name:
|
||||
self.deleted_manifest.exposures[unique_id] = self.saved_manifest.exposures.pop(
|
||||
unique_id
|
||||
)
|
||||
schema_file.exposures.remove(unique_id)
|
||||
fire_event(PartialParsingDeletedExposure(unique_id=unique_id))
|
||||
elif unique_id in self.saved_manifest.disabled:
|
||||
self.delete_disabled(unique_id, schema_file.file_id)
|
||||
|
||||
# metric are created only from schema files, so just delete
|
||||
# the metric.
|
||||
# metrics are created only from schema files, but also can be referred to by other nodes
|
||||
def delete_schema_metric(self, schema_file, metric_dict):
|
||||
metric_name = metric_dict["name"]
|
||||
metrics = schema_file.metrics.copy()
|
||||
for unique_id in metrics:
|
||||
metric = self.saved_manifest.metrics[unique_id]
|
||||
if unique_id in self.saved_manifest.metrics:
|
||||
metric = self.saved_manifest.metrics[unique_id]
|
||||
if metric.name == metric_name:
|
||||
# Need to find everything that referenced this metric and schedule for parsing
|
||||
if unique_id in self.saved_manifest.child_map:
|
||||
self.schedule_nodes_for_parsing(self.saved_manifest.child_map[unique_id])
|
||||
self.deleted_manifest.metrics[unique_id] = self.saved_manifest.metrics.pop(
|
||||
unique_id
|
||||
)
|
||||
schema_file.metrics.remove(unique_id)
|
||||
fire_event(PartialParsingDeletedMetric(id=unique_id))
|
||||
elif unique_id in self.saved_manifest.disabled:
|
||||
self.delete_disabled(unique_id, schema_file.file_id)
|
||||
|
||||
def get_schema_element(self, elem_list, elem_name):
|
||||
for element in elem_list:
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
import pathspec # type: ignore
|
||||
import pathlib
|
||||
from dbt.clients.system import load_file_contents
|
||||
from dbt.contracts.files import (
|
||||
@@ -107,9 +109,9 @@ def load_seed_source_file(match: FilePath, project_name) -> SourceFile:
|
||||
|
||||
# Use the FilesystemSearcher to get a bunch of FilePaths, then turn
|
||||
# them into a bunch of FileSource objects
|
||||
def get_source_files(project, paths, extension, parse_file_type, saved_files):
|
||||
def get_source_files(project, paths, extension, parse_file_type, saved_files, ignore_spec):
|
||||
# file path list
|
||||
fp_list = filesystem_search(project, paths, extension)
|
||||
fp_list = filesystem_search(project, paths, extension, ignore_spec)
|
||||
# file block list
|
||||
fb_list = []
|
||||
for fp in fp_list:
|
||||
@@ -129,42 +131,84 @@ def get_source_files(project, paths, extension, parse_file_type, saved_files):
|
||||
return fb_list
|
||||
|
||||
|
||||
def read_files_for_parser(project, files, dirs, extensions, parse_ft, saved_files):
|
||||
def read_files_for_parser(project, files, dirs, extensions, parse_ft, saved_files, ignore_spec):
|
||||
parser_files = []
|
||||
for extension in extensions:
|
||||
source_files = get_source_files(project, dirs, extension, parse_ft, saved_files)
|
||||
source_files = get_source_files(
|
||||
project, dirs, extension, parse_ft, saved_files, ignore_spec
|
||||
)
|
||||
for sf in source_files:
|
||||
files[sf.file_id] = sf
|
||||
parser_files.append(sf.file_id)
|
||||
return parser_files
|
||||
|
||||
|
||||
def generate_dbt_ignore_spec(project_root):
|
||||
ignore_file_path = os.path.join(project_root, ".dbtignore")
|
||||
|
||||
ignore_spec = None
|
||||
if os.path.exists(ignore_file_path):
|
||||
with open(ignore_file_path) as f:
|
||||
ignore_spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, f)
|
||||
return ignore_spec
|
||||
|
||||
|
||||
# This needs to read files for multiple projects, so the 'files'
|
||||
# dictionary needs to be passed in. What determines the order of
|
||||
# the various projects? Is the root project always last? Do the
|
||||
# non-root projects need to be done separately in order?
|
||||
def read_files(project, files, parser_files, saved_files):
|
||||
|
||||
dbt_ignore_spec = generate_dbt_ignore_spec(project.project_root)
|
||||
project_files = {}
|
||||
|
||||
project_files["MacroParser"] = read_files_for_parser(
|
||||
project, files, project.macro_paths, [".sql"], ParseFileType.Macro, saved_files
|
||||
project,
|
||||
files,
|
||||
project.macro_paths,
|
||||
[".sql"],
|
||||
ParseFileType.Macro,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["ModelParser"] = read_files_for_parser(
|
||||
project, files, project.model_paths, [".sql", ".py"], ParseFileType.Model, saved_files
|
||||
project,
|
||||
files,
|
||||
project.model_paths,
|
||||
[".sql", ".py"],
|
||||
ParseFileType.Model,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SnapshotParser"] = read_files_for_parser(
|
||||
project, files, project.snapshot_paths, [".sql"], ParseFileType.Snapshot, saved_files
|
||||
project,
|
||||
files,
|
||||
project.snapshot_paths,
|
||||
[".sql"],
|
||||
ParseFileType.Snapshot,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["AnalysisParser"] = read_files_for_parser(
|
||||
project, files, project.analysis_paths, [".sql"], ParseFileType.Analysis, saved_files
|
||||
project,
|
||||
files,
|
||||
project.analysis_paths,
|
||||
[".sql"],
|
||||
ParseFileType.Analysis,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SingularTestParser"] = read_files_for_parser(
|
||||
project, files, project.test_paths, [".sql"], ParseFileType.SingularTest, saved_files
|
||||
project,
|
||||
files,
|
||||
project.test_paths,
|
||||
[".sql"],
|
||||
ParseFileType.SingularTest,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
# all generic tests within /tests must be nested under a /generic subfolder
|
||||
@@ -175,14 +219,27 @@ def read_files(project, files, parser_files, saved_files):
|
||||
[".sql"],
|
||||
ParseFileType.GenericTest,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SeedParser"] = read_files_for_parser(
|
||||
project, files, project.seed_paths, [".csv"], ParseFileType.Seed, saved_files
|
||||
project,
|
||||
files,
|
||||
project.seed_paths,
|
||||
[".csv"],
|
||||
ParseFileType.Seed,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["DocumentationParser"] = read_files_for_parser(
|
||||
project, files, project.docs_paths, [".md"], ParseFileType.Documentation, saved_files
|
||||
project,
|
||||
files,
|
||||
project.docs_paths,
|
||||
[".md"],
|
||||
ParseFileType.Documentation,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SchemaParser"] = read_files_for_parser(
|
||||
@@ -192,6 +249,7 @@ def read_files(project, files, parser_files, saved_files):
|
||||
[".yml", ".yaml"],
|
||||
ParseFileType.Schema,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
# Store the parser files for this particular project
|
||||
|
||||
@@ -1029,7 +1029,7 @@ class ExposureParser(YamlReader):
|
||||
if parsed.config.enabled:
|
||||
self.manifest.add_exposure(self.yaml.file, parsed)
|
||||
else:
|
||||
self.manifest.add_disabled_nofile(parsed)
|
||||
self.manifest.add_disabled(self.yaml.file, parsed)
|
||||
|
||||
def _generate_exposure_config(
|
||||
self, target: UnparsedExposure, fqn: List[str], package_name: str, rendered: bool
|
||||
@@ -1144,7 +1144,7 @@ class MetricParser(YamlReader):
|
||||
if parsed.config.enabled:
|
||||
self.manifest.add_metric(self.yaml.file, parsed)
|
||||
else:
|
||||
self.manifest.add_disabled_nofile(parsed)
|
||||
self.manifest.add_disabled(self.yaml.file, parsed)
|
||||
|
||||
def _generate_metric_config(
|
||||
self, target: UnparsedMetric, fqn: List[str], package_name: str, rendered: bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Callable, Iterable, Set, Union, Iterator, TypeVar, Generic
|
||||
from typing import List, Callable, Iterable, Set, Union, Iterator, TypeVar, Generic, Optional
|
||||
from pathspec import PathSpec # type: ignore
|
||||
|
||||
from dbt.clients.jinja import extract_toplevel_blocks, BlockTag
|
||||
from dbt.clients.system import find_matching
|
||||
@@ -61,11 +62,16 @@ class FullBlock(FileBlock):
|
||||
return self.block.full_block
|
||||
|
||||
|
||||
def filesystem_search(project: Project, relative_dirs: List[str], extension: str):
|
||||
def filesystem_search(
|
||||
project: Project,
|
||||
relative_dirs: List[str],
|
||||
extension: str,
|
||||
ignore_spec: Optional[PathSpec] = None,
|
||||
):
|
||||
ext = "[!.#~]*" + extension
|
||||
root = project.project_root
|
||||
file_path_list = []
|
||||
for result in find_matching(root, relative_dirs, ext):
|
||||
for result in find_matching(root, relative_dirs, ext, ignore_spec):
|
||||
if "searched_path" not in result or "relative_path" not in result:
|
||||
raise InternalException("Invalid result from find_matching: {}".format(result))
|
||||
file_match = FilePath(
|
||||
|
||||
@@ -63,7 +63,7 @@ class SourcePatcher:
|
||||
self.sources[unpatched.unique_id] = unpatched
|
||||
continue
|
||||
# returns None if there is no patch
|
||||
patch = self.get_patch_for(unpatched) # type: ignore[unreachable] # CT-564 / GH 5169
|
||||
patch = self.get_patch_for(unpatched)
|
||||
|
||||
# returns unpatched if there is no patch
|
||||
patched = self.patch_source(unpatched, patch)
|
||||
@@ -213,8 +213,8 @@ class SourcePatcher:
|
||||
self,
|
||||
unpatched: UnpatchedSourceDefinition,
|
||||
) -> Optional[SourcePatch]:
|
||||
if isinstance(unpatched, ParsedSourceDefinition): # type: ignore[unreachable] # CT-564 / GH 5169
|
||||
return None # type: ignore[unreachable] # CT-564 / GH 5169
|
||||
if isinstance(unpatched, ParsedSourceDefinition):
|
||||
return None
|
||||
key = (unpatched.package_name, unpatched.source.name)
|
||||
patch: Optional[SourcePatch] = self.manifest.source_patches.get(key)
|
||||
if patch is None:
|
||||
|
||||
@@ -62,6 +62,7 @@ setup(
|
||||
"dbt-extractor~=0.4.1",
|
||||
"typing-extensions>=3.7.4",
|
||||
"werkzeug>=1,<3",
|
||||
"pathspec~=0.9.0",
|
||||
# the following are all to match snowflake-connector-python
|
||||
"requests<3.0.0",
|
||||
"idna>=2.5,<4",
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
{%
|
||||
set metric_list = [
|
||||
metric('number_of_people'),
|
||||
metric('collective_tenure')
|
||||
]
|
||||
%}
|
||||
|
||||
{% if not execute %}
|
||||
|
||||
{% set metric_names = [] %}
|
||||
{% for m in metric_list %}
|
||||
{% do metric_names.append(m.metric_name) %}
|
||||
{% endfor %}
|
||||
|
||||
-- this config does nothing, but it lets us check these values
|
||||
{{ config(metric_names = metric_names) }}
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
select 1 as fun
|
||||
@@ -0,0 +1,17 @@
|
||||
version: 2
|
||||
|
||||
metrics:
|
||||
|
||||
- model: "ref('people')"
|
||||
name: number_of_people
|
||||
description: Total count of people
|
||||
label: "Number of people"
|
||||
calculation_method: count
|
||||
expression: "*"
|
||||
timestamp: created_at
|
||||
time_grains: [day, week, month]
|
||||
dimensions:
|
||||
- favorite_color
|
||||
- loves_dbt
|
||||
meta:
|
||||
my_meta: 'replaced'
|
||||
@@ -52,7 +52,7 @@ class BasePPTest(DBTIntegrationTest):
|
||||
class MetricsTest(BasePPTest):
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_env_vars_models(self):
|
||||
def test_postgres_metrics(self):
|
||||
self.setup_directories()
|
||||
# initial run
|
||||
self.copy_file('test-files/people.sql', 'models/people.sql')
|
||||
@@ -89,3 +89,18 @@ class MetricsTest(BasePPTest):
|
||||
expected_depends_on_nodes = ['model.test.people']
|
||||
self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes)
|
||||
|
||||
# Add model referring to metric
|
||||
self.copy_file('test-files/metric_model_a.sql', 'models/metric_model_a.sql')
|
||||
results = self.run_dbt(["run"])
|
||||
manifest = get_manifest()
|
||||
model_a = manifest.nodes['model.test.metric_model_a']
|
||||
expected_depends_on_nodes = ['metric.test.number_of_people', 'metric.test.collective_tenure']
|
||||
self.assertEqual(model_a.depends_on.nodes, expected_depends_on_nodes)
|
||||
|
||||
# Then delete a metric
|
||||
self.copy_file('test-files/people_metrics3.yml', 'models/people_metrics.yml')
|
||||
with self.assertRaises(CompilationException):
|
||||
# We use "parse" here and not "run" because we're checking that the CompilationException
|
||||
# occurs at parse time, not compilation
|
||||
results = self.run_dbt(["parse"])
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ class GraphTest(unittest.TestCase):
|
||||
|
||||
# Create file filesystem searcher
|
||||
self.filesystem_search = patch('dbt.parser.read_files.filesystem_search')
|
||||
def mock_filesystem_search(project, relative_dirs, extension):
|
||||
def mock_filesystem_search(project, relative_dirs, extension, ignore_spec):
|
||||
if 'sql' not in extension:
|
||||
return []
|
||||
if 'models' not in relative_dirs:
|
||||
|
||||
63
test/unit/test_lib.py
Normal file
63
test/unit/test_lib.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest import mock
|
||||
from dbt.contracts.results import RunningStatus
|
||||
from dbt.lib import compile_sql
|
||||
from dbt.adapters.postgres import Plugin
|
||||
|
||||
from test.unit.utils import clear_plugin, inject_adapter
|
||||
|
||||
|
||||
class MockContext:
|
||||
def __init__(self, node):
|
||||
self.timing = []
|
||||
self.node = mock.MagicMock()
|
||||
self.node._event_status = {
|
||||
"node_status": RunningStatus.Started
|
||||
}
|
||||
self.node.is_ephemeral_model = True
|
||||
|
||||
def noop_ephemeral_result(*args):
|
||||
return None
|
||||
|
||||
class TestSqlCompileRunnerNoIntrospection(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.manifest = {'mock':'manifest'}
|
||||
self.adapter = Plugin.adapter({})
|
||||
self.adapter.connection_for = mock.MagicMock()
|
||||
self.ephemeral_result = lambda: None
|
||||
inject_adapter(self.adapter, Plugin)
|
||||
|
||||
def tearDown(self):
|
||||
clear_plugin(Plugin)
|
||||
|
||||
@mock.patch('dbt.lib._get_operation_node')
|
||||
@mock.patch('dbt.task.sql.GenericSqlRunner.compile')
|
||||
@mock.patch('dbt.task.sql.GenericSqlRunner.ephemeral_result', noop_ephemeral_result)
|
||||
@mock.patch('dbt.task.base.ExecutionContext', MockContext)
|
||||
def test__compile_and_execute__with_connection(self, mock_compile, mock_get_node):
|
||||
"""
|
||||
By default, env var for allowing introspection is true, and calling this
|
||||
method should defer to the parent method.
|
||||
"""
|
||||
mock_get_node.return_value = ({}, None, self.adapter)
|
||||
compile_sql(self.manifest, 'some/path', None)
|
||||
|
||||
mock_compile.assert_called_once_with(self.manifest)
|
||||
self.adapter.connection_for.assert_called_once()
|
||||
|
||||
|
||||
@mock.patch('dbt.lib._get_operation_node')
|
||||
@mock.patch('dbt.task.sql.GenericSqlRunner.compile')
|
||||
@mock.patch('dbt.task.sql.GenericSqlRunner.ephemeral_result', noop_ephemeral_result)
|
||||
@mock.patch('dbt.task.base.ExecutionContext', MockContext)
|
||||
def test__compile_and_execute__without_connection(self, mock_compile, mock_get_node):
|
||||
"""
|
||||
Ensure that compile is called but does not attempt warehouse connection
|
||||
"""
|
||||
with mock.patch.dict(os.environ, {"__DBT_ALLOW_INTROSPECTION": "0"}):
|
||||
mock_get_node.return_value = ({}, None, self.adapter)
|
||||
compile_sql(self.manifest, 'some/path', None)
|
||||
|
||||
mock_compile.assert_called_once_with(self.manifest)
|
||||
self.adapter.connection_for.assert_not_called()
|
||||
@@ -713,6 +713,24 @@ def model(dbt, session):
|
||||
with self.assertRaises(CompilationException):
|
||||
self.parser.parse_file(block)
|
||||
|
||||
def test_parse_ref_with_non_string(self):
|
||||
py_code = """
|
||||
def model(dbt, session):
|
||||
|
||||
model_names = ["orders", "customers"]
|
||||
models = []
|
||||
|
||||
for model_name in model_names:
|
||||
models.extend(dbt.ref(model_name))
|
||||
|
||||
return models[0]
|
||||
"""
|
||||
block = self.file_block_for(py_code, 'nested/py_model.py')
|
||||
self.parser.manifest.files[block.file.file_id] = block.file
|
||||
with self.assertRaises(ParsingException):
|
||||
self.parser.parse_file(block)
|
||||
|
||||
|
||||
|
||||
class StaticModelParserTest(BaseParserTest):
|
||||
def setUp(self):
|
||||
|
||||
@@ -4,6 +4,7 @@ import stat
|
||||
import unittest
|
||||
import tarfile
|
||||
import io
|
||||
import pathspec
|
||||
from pathlib import Path
|
||||
from tempfile import mkdtemp, NamedTemporaryFile
|
||||
|
||||
@@ -11,6 +12,7 @@ from dbt.exceptions import ExecutableError, WorkingDirectoryError
|
||||
import dbt.clients.system
|
||||
|
||||
|
||||
|
||||
class SystemClient(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@@ -151,7 +153,9 @@ class TestFindMatching(unittest.TestCase):
|
||||
file_path = os.path.dirname(named_file.name)
|
||||
relative_path = os.path.basename(file_path)
|
||||
out = dbt.clients.system.find_matching(
|
||||
self.base_dir, [relative_path], '*.sql'
|
||||
self.base_dir,
|
||||
[relative_path],
|
||||
'*.sql',
|
||||
)
|
||||
expected_output = [{
|
||||
'searched_path': relative_path,
|
||||
@@ -166,7 +170,9 @@ class TestFindMatching(unittest.TestCase):
|
||||
file_path = os.path.dirname(named_file.name)
|
||||
relative_path = os.path.basename(file_path)
|
||||
out = dbt.clients.system.find_matching(
|
||||
self.base_dir, [relative_path], '*.sql'
|
||||
self.base_dir,
|
||||
[relative_path],
|
||||
'*.sql'
|
||||
)
|
||||
expected_output = [{
|
||||
'searched_path': relative_path,
|
||||
@@ -180,7 +186,25 @@ class TestFindMatching(unittest.TestCase):
|
||||
with NamedTemporaryFile(
|
||||
prefix='sql-files', suffix='.SQLT', dir=self.tempdir
|
||||
):
|
||||
out = dbt.clients.system.find_matching(self.tempdir, [''], '*.sql')
|
||||
out = dbt.clients.system.find_matching(
|
||||
self.tempdir,
|
||||
[''],
|
||||
'*.sql'
|
||||
)
|
||||
self.assertEqual(out, [])
|
||||
|
||||
def test_ignore_spec(self):
|
||||
with NamedTemporaryFile(
|
||||
prefix='sql-files', suffix='.sql', dir=self.tempdir
|
||||
):
|
||||
out = dbt.clients.system.find_matching(
|
||||
self.tempdir,
|
||||
[''],
|
||||
'*.sql',
|
||||
pathspec.PathSpec.from_lines(
|
||||
pathspec.patterns.GitWildMatchPattern, "sql-files*".splitlines()
|
||||
)
|
||||
)
|
||||
self.assertEqual(out, [])
|
||||
|
||||
def tearDown(self):
|
||||
|
||||
93
tests/adapter/dbt/tests/adapter/python_model/test_spark.py
Normal file
93
tests/adapter/dbt/tests/adapter/python_model/test_spark.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import pytest
|
||||
from dbt.tests.util import run_dbt
|
||||
|
||||
PANDAS_MODEL = """
|
||||
import pandas as pd
|
||||
|
||||
def model(dbt, session):
|
||||
dbt.config(
|
||||
materialized="table",
|
||||
)
|
||||
|
||||
df = pd.DataFrame(
|
||||
{'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
|
||||
'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
|
||||
'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],
|
||||
'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]}
|
||||
)
|
||||
|
||||
return df
|
||||
"""
|
||||
PYSPARK_MODEL = """
|
||||
def model(dbt, session):
|
||||
dbt.config(
|
||||
materialized="table",
|
||||
)
|
||||
|
||||
df = spark.createDataFrame(
|
||||
[
|
||||
("Buenos Aires", "Argentina", -34.58, -58.66),
|
||||
("Brasilia", "Brazil", -15.78, -47.91),
|
||||
("Santiago", "Chile", -33.45, -70.66),
|
||||
("Bogota", "Colombia", 4.60, -74.08),
|
||||
("Caracas", "Venezuela", 10.48, -66.86),
|
||||
],
|
||||
["City", "Country", "Latitude", "Longitude"]
|
||||
)
|
||||
|
||||
return df
|
||||
"""
|
||||
|
||||
PANDAS_ON_SPARK_MODEL = """
|
||||
import pyspark.pandas as ps
|
||||
|
||||
|
||||
def model(dbt, session):
|
||||
dbt.config(
|
||||
materialized="table",
|
||||
)
|
||||
|
||||
df = ps.DataFrame(
|
||||
{'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
|
||||
'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
|
||||
'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],
|
||||
'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]}
|
||||
)
|
||||
|
||||
return df
|
||||
"""
|
||||
|
||||
KOALAS_MODEL = """
|
||||
import databricks.koalas as ks
|
||||
|
||||
|
||||
def model(dbt, session):
|
||||
dbt.config(
|
||||
materialized="table",
|
||||
)
|
||||
|
||||
df = ks.DataFrame(
|
||||
{'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
|
||||
'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
|
||||
'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],
|
||||
'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]}
|
||||
)
|
||||
|
||||
return df
|
||||
"""
|
||||
|
||||
|
||||
class BasePySparkTests:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"pandas_df.py": PANDAS_MODEL,
|
||||
"pyspark_df.py": PYSPARK_MODEL,
|
||||
"pandas_on_spark_df.py": PANDAS_ON_SPARK_MODEL,
|
||||
"koalas_df.py": KOALAS_MODEL,
|
||||
}
|
||||
|
||||
def test_different_dataframes(self, project):
|
||||
# test
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 4
|
||||
22
tests/adapter/dbt/tests/adapter/utils/base_array_utils.py
Normal file
22
tests/adapter/dbt/tests/adapter/utils/base_array_utils.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from dbt.tests.adapter.utils.base_utils import BaseUtils
|
||||
from dbt.tests.util import run_dbt, check_relations_equal, get_relation_columns
|
||||
|
||||
|
||||
class BaseArrayUtils(BaseUtils):
|
||||
def assert_columns_equal(self, project, expected_cols, actual_cols):
|
||||
assert (
|
||||
expected_cols == actual_cols
|
||||
), f"Type difference detected: {expected_cols} vs. {actual_cols}"
|
||||
|
||||
def test_expected_actual(self, project):
|
||||
run_dbt(["build"])
|
||||
|
||||
# check contents equal
|
||||
check_relations_equal(project.adapter, ["expected", "actual"])
|
||||
|
||||
# check types equal
|
||||
expected_cols = get_relation_columns(project.adapter, "expected")
|
||||
actual_cols = get_relation_columns(project.adapter, "actual")
|
||||
print(f"Expected: {expected_cols}")
|
||||
print(f"Actual: {actual_cols}")
|
||||
self.assert_columns_equal(project, expected_cols, actual_cols)
|
||||
@@ -0,0 +1,12 @@
|
||||
# array_append
|
||||
|
||||
models__array_append_expected_sql = """
|
||||
select 1 as id, {{ array_construct([1,2,3,4]) }} as array_col union all
|
||||
select 2 as id, {{ array_construct([4]) }} as array_col
|
||||
"""
|
||||
|
||||
|
||||
models__array_append_actual_sql = """
|
||||
select 1 as id, {{ array_append(array_construct([1,2,3]), 4) }} as array_col union all
|
||||
select 2 as id, {{ array_append(array_construct([]), 4) }} as array_col
|
||||
"""
|
||||
@@ -0,0 +1,14 @@
|
||||
# array_concat
|
||||
|
||||
models__array_concat_expected_sql = """
|
||||
select 1 as id, {{ array_construct([1,2,3,4,5,6]) }} as array_col union all
|
||||
select 2 as id, {{ array_construct([2]) }} as array_col union all
|
||||
select 3 as id, {{ array_construct([3]) }} as array_col
|
||||
"""
|
||||
|
||||
|
||||
models__array_concat_actual_sql = """
|
||||
select 1 as id, {{ array_concat(array_construct([1,2,3]), array_construct([4,5,6])) }} as array_col union all
|
||||
select 2 as id, {{ array_concat(array_construct([]), array_construct([2])) }} as array_col union all
|
||||
select 3 as id, {{ array_concat(array_construct([3]), array_construct([])) }} as array_col
|
||||
"""
|
||||
@@ -0,0 +1,12 @@
|
||||
# array_construct
|
||||
|
||||
models__array_construct_expected_sql = """
|
||||
select 1 as id, {{ array_construct([1,2,3]) }} as array_col union all
|
||||
select 2 as id, {{ array_construct([]) }} as array_col
|
||||
"""
|
||||
|
||||
|
||||
models__array_construct_actual_sql = """
|
||||
select 1 as id, {{ array_construct([1,2,3]) }} as array_col union all
|
||||
select 2 as id, {{ array_construct([]) }} as array_col
|
||||
"""
|
||||
19
tests/adapter/dbt/tests/adapter/utils/test_array_append.py
Normal file
19
tests/adapter/dbt/tests/adapter/utils/test_array_append.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import pytest
|
||||
from dbt.tests.adapter.utils.base_array_utils import BaseArrayUtils
|
||||
from dbt.tests.adapter.utils.fixture_array_append import (
|
||||
models__array_append_actual_sql,
|
||||
models__array_append_expected_sql,
|
||||
)
|
||||
|
||||
|
||||
class BaseArrayAppend(BaseArrayUtils):
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"actual.sql": models__array_append_actual_sql,
|
||||
"expected.sql": models__array_append_expected_sql,
|
||||
}
|
||||
|
||||
|
||||
class TestArrayAppend(BaseArrayAppend):
|
||||
pass
|
||||
19
tests/adapter/dbt/tests/adapter/utils/test_array_concat.py
Normal file
19
tests/adapter/dbt/tests/adapter/utils/test_array_concat.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import pytest
|
||||
from dbt.tests.adapter.utils.base_array_utils import BaseArrayUtils
|
||||
from dbt.tests.adapter.utils.fixture_array_concat import (
|
||||
models__array_concat_actual_sql,
|
||||
models__array_concat_expected_sql,
|
||||
)
|
||||
|
||||
|
||||
class BaseArrayConcat(BaseArrayUtils):
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"actual.sql": models__array_concat_actual_sql,
|
||||
"expected.sql": models__array_concat_expected_sql,
|
||||
}
|
||||
|
||||
|
||||
class TestArrayConcat(BaseArrayConcat):
|
||||
pass
|
||||
@@ -0,0 +1,19 @@
|
||||
import pytest
|
||||
from dbt.tests.adapter.utils.base_array_utils import BaseArrayUtils
|
||||
from dbt.tests.adapter.utils.fixture_array_construct import (
|
||||
models__array_construct_actual_sql,
|
||||
models__array_construct_expected_sql,
|
||||
)
|
||||
|
||||
|
||||
class BaseArrayConstruct(BaseArrayUtils):
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"actual.sql": models__array_construct_actual_sql,
|
||||
"expected.sql": models__array_construct_expected_sql,
|
||||
}
|
||||
|
||||
|
||||
class TestArrayConstruct(BaseArrayConstruct):
|
||||
pass
|
||||
@@ -0,0 +1,67 @@
|
||||
import pytest
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
from dbt.tests.util import run_dbt
|
||||
from dbt.tests.util import relation_from_name
|
||||
|
||||
|
||||
models__current_ts_sql = """
|
||||
select {{ dbt.current_timestamp() }} as current_ts_column
|
||||
"""
|
||||
|
||||
|
||||
def is_aware(dt: datetime) -> bool:
|
||||
return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
|
||||
|
||||
|
||||
def is_naive(dt: datetime) -> bool:
|
||||
return not is_aware(dt)
|
||||
|
||||
|
||||
class BaseCurrentTimestamp:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"current_ts.sql": models__current_ts_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def current_timestamp(self, project):
|
||||
run_dbt(["build"])
|
||||
relation = relation_from_name(project.adapter, "current_ts")
|
||||
result = project.run_sql(f"select current_ts_column from {relation}", fetch="one")
|
||||
sql_timestamp = result[0] if result is not None else None
|
||||
return sql_timestamp
|
||||
|
||||
def test_current_timestamp_matches_utc(self, current_timestamp):
|
||||
sql_timestamp = current_timestamp
|
||||
now_utc = self.utcnow_matching_type(sql_timestamp)
|
||||
# Plenty of wiggle room if clocks aren't perfectly sync'd, etc
|
||||
tolerance = timedelta(minutes=1)
|
||||
assert (sql_timestamp > (now_utc - tolerance)) and (
|
||||
sql_timestamp < (now_utc + tolerance)
|
||||
), f"SQL timestamp {sql_timestamp.isoformat()} is not close enough to Python UTC {now_utc.isoformat()}"
|
||||
|
||||
def utcnow_matching_type(self, dt: datetime) -> datetime:
|
||||
"""
|
||||
Current UTC datetime with the same timezone-awareness (or naiveness) as the input.
|
||||
"""
|
||||
return datetime.now(timezone.utc) if is_aware(dt) else datetime.utcnow()
|
||||
|
||||
|
||||
class BaseCurrentTimestampAware(BaseCurrentTimestamp):
|
||||
def test_current_timestamp_type(self, current_timestamp):
|
||||
assert is_aware(current_timestamp)
|
||||
|
||||
|
||||
class BaseCurrentTimestampNaive(BaseCurrentTimestamp):
|
||||
def test_current_timestamp_type(self, current_timestamp):
|
||||
assert is_naive(current_timestamp)
|
||||
|
||||
|
||||
# Use either BaseCurrentTimestampAware or BaseCurrentTimestampNaive but not both
|
||||
class TestCurrentTimestamp(BaseCurrentTimestampAware):
|
||||
pass
|
||||
6
tests/fixtures/jaffle_shop.py
vendored
6
tests/fixtures/jaffle_shop.py
vendored
@@ -379,8 +379,14 @@ class JaffleShopProject:
|
||||
"customers.sql": customers_sql,
|
||||
"docs.md": docs_md,
|
||||
"orders.sql": orders_sql,
|
||||
"ignored_model1.sql": "select 1 as id",
|
||||
"ignored_model2.sql": "select 1 as id",
|
||||
"overview.md": overview_md,
|
||||
"schema.yml": schema_yml,
|
||||
"ignore_folder": {
|
||||
"model1.sql": "select 1 as id",
|
||||
"model2.sql": "select 1 as id",
|
||||
},
|
||||
"staging": {
|
||||
"schema.yml": staging_schema_yml,
|
||||
"stg_customers.sql": staging_stg_customers_sql,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from dbt.tests.util import run_dbt, get_manifest, run_dbt_and_capture
|
||||
from dbt.tests.util import run_dbt, get_manifest, run_dbt_and_capture, write_file
|
||||
|
||||
|
||||
from tests.fixtures.jaffle_shop import JaffleShopProject
|
||||
@@ -6,6 +6,8 @@ from tests.fixtures.jaffle_shop import JaffleShopProject
|
||||
|
||||
class TestBasic(JaffleShopProject):
|
||||
def test_basic(self, project):
|
||||
# test .dbtignore works
|
||||
write_file("models/ignore*.sql\nignore_folder", project.project_root, ".dbtignore")
|
||||
# Create the data from seeds
|
||||
results = run_dbt(["seed"])
|
||||
|
||||
|
||||
@@ -138,20 +138,20 @@ class TestRuntimeMaterialization:
|
||||
project,
|
||||
):
|
||||
# initial full-refresh should have no effect
|
||||
results = run_dbt(["run", "--full-refresh"])
|
||||
results = run_dbt(["run", "-f"])
|
||||
assert len(results) == 3
|
||||
|
||||
check_relations_equal(project.adapter, ["seed", "view", "incremental", "materialized"])
|
||||
|
||||
# adds one record to the incremental model. full-refresh should truncate then re-run
|
||||
project.run_sql(invalidate_incremental_sql)
|
||||
results = run_dbt(["run", "--full-refresh"])
|
||||
results = run_dbt(["run", "-f"])
|
||||
assert len(results) == 3
|
||||
check_relations_equal(project.adapter, ["seed", "incremental"])
|
||||
|
||||
project.run_sql(update_sql)
|
||||
|
||||
results = run_dbt(["run", "--full-refresh"])
|
||||
results = run_dbt(["run", "-f"])
|
||||
assert len(results) == 3
|
||||
|
||||
check_relations_equal(project.adapter, ["seed", "view", "incremental", "materialized"])
|
||||
@@ -181,7 +181,7 @@ class TestRuntimeMaterialization:
|
||||
project.run_sql(create_incremental__dbt_tmp_sql)
|
||||
assert len(results) == 1
|
||||
|
||||
results = run_dbt(["run", "--model", "incremental", "--full-refresh"])
|
||||
results = run_dbt(["run", "--model", "incremental", "-f"])
|
||||
assert len(results) == 1
|
||||
|
||||
check_table_does_not_exist(project.adapter, "incremental__dbt_tmp")
|
||||
|
||||
@@ -188,6 +188,42 @@ class TestInvalidMetricMissingModel:
|
||||
run_dbt(["run"])
|
||||
|
||||
|
||||
invalid_metrics__missing_expression_yml = """
|
||||
version: 2
|
||||
metrics:
|
||||
- name: number_of_people
|
||||
label: "Number of people"
|
||||
model: "ref(people)"
|
||||
description: Total count of people
|
||||
calculation_method: count
|
||||
timestamp: created_at
|
||||
time_grains: [day, week, month]
|
||||
dimensions:
|
||||
- favorite_color
|
||||
- loves_dbt
|
||||
meta:
|
||||
my_meta: 'testing'
|
||||
"""
|
||||
|
||||
|
||||
class TestInvalidMetricMissingExpression:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"people_metrics.yml": invalid_metrics__missing_expression_yml,
|
||||
"people.sql": models__people_sql,
|
||||
}
|
||||
|
||||
# tests that we get a ParsingException with a missing expression
|
||||
def test_simple_metric(
|
||||
self,
|
||||
project,
|
||||
):
|
||||
# initial run
|
||||
with pytest.raises(ParsingException):
|
||||
run_dbt(["run"])
|
||||
|
||||
|
||||
names_with_spaces_metrics_yml = """
|
||||
version: 2
|
||||
|
||||
|
||||
181
tests/functional/partial_parsing/test_pp_disabled_config.py
Normal file
181
tests/functional/partial_parsing/test_pp_disabled_config.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import pytest
|
||||
from dbt.tests.util import run_dbt, write_file, get_manifest
|
||||
|
||||
model_one_sql = """
|
||||
select 1 as fun
|
||||
"""
|
||||
|
||||
schema1_yml = """
|
||||
version: 2
|
||||
|
||||
models:
|
||||
- name: model_one
|
||||
|
||||
metrics:
|
||||
|
||||
- name: number_of_people
|
||||
label: "Number of people"
|
||||
description: Total count of people
|
||||
model: "ref('model_one')"
|
||||
calculation_method: count
|
||||
expression: "*"
|
||||
timestamp: created_at
|
||||
time_grains: [day, week, month]
|
||||
dimensions:
|
||||
- favorite_color
|
||||
- loves_dbt
|
||||
meta:
|
||||
my_meta: 'testing'
|
||||
|
||||
exposures:
|
||||
- name: proxy_for_dashboard
|
||||
description: "My Exposure"
|
||||
type: "dashboard"
|
||||
owner:
|
||||
name: "Dashboard Tester"
|
||||
email: "tester@dashboard.com"
|
||||
depends_on:
|
||||
- ref("model_one")
|
||||
"""
|
||||
|
||||
schema2_yml = """
|
||||
version: 2
|
||||
|
||||
models:
|
||||
- name: model_one
|
||||
|
||||
metrics:
|
||||
|
||||
- name: number_of_people
|
||||
label: "Number of people"
|
||||
description: Total count of people
|
||||
config:
|
||||
enabled: false
|
||||
model: "ref('model_one')"
|
||||
calculation_method: count
|
||||
expression: "*"
|
||||
timestamp: created_at
|
||||
time_grains: [day, week, month]
|
||||
dimensions:
|
||||
- favorite_color
|
||||
- loves_dbt
|
||||
meta:
|
||||
my_meta: 'testing'
|
||||
|
||||
exposures:
|
||||
- name: proxy_for_dashboard
|
||||
description: "My Exposure"
|
||||
config:
|
||||
enabled: false
|
||||
type: "dashboard"
|
||||
owner:
|
||||
name: "Dashboard Tester"
|
||||
email: "tester@dashboard.com"
|
||||
depends_on:
|
||||
- ref("model_one")
|
||||
"""
|
||||
|
||||
schema3_yml = """
|
||||
version: 2
|
||||
|
||||
models:
|
||||
- name: model_one
|
||||
|
||||
metrics:
|
||||
|
||||
- name: number_of_people
|
||||
label: "Number of people"
|
||||
description: Total count of people
|
||||
model: "ref('model_one')"
|
||||
calculation_method: count
|
||||
expression: "*"
|
||||
timestamp: created_at
|
||||
time_grains: [day, week, month]
|
||||
dimensions:
|
||||
- favorite_color
|
||||
- loves_dbt
|
||||
meta:
|
||||
my_meta: 'testing'
|
||||
"""
|
||||
|
||||
schema4_yml = """
|
||||
version: 2
|
||||
|
||||
models:
|
||||
- name: model_one
|
||||
|
||||
exposures:
|
||||
- name: proxy_for_dashboard
|
||||
description: "My Exposure"
|
||||
config:
|
||||
enabled: false
|
||||
type: "dashboard"
|
||||
owner:
|
||||
name: "Dashboard Tester"
|
||||
email: "tester@dashboard.com"
|
||||
depends_on:
|
||||
- ref("model_one")
|
||||
"""
|
||||
|
||||
|
||||
class TestDisabled:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"model_one.sql": model_one_sql,
|
||||
"schema.yml": schema1_yml,
|
||||
}
|
||||
|
||||
def test_pp_disabled(self, project):
|
||||
expected_exposure = "exposure.test.proxy_for_dashboard"
|
||||
expected_metric = "metric.test.number_of_people"
|
||||
|
||||
run_dbt(["seed"])
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 1
|
||||
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert expected_exposure in manifest.exposures
|
||||
assert expected_metric in manifest.metrics
|
||||
assert expected_exposure not in manifest.disabled
|
||||
assert expected_metric not in manifest.disabled
|
||||
|
||||
# Update schema file with disabled metric and exposure
|
||||
write_file(schema2_yml, project.project_root, "models", "schema.yml")
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 1
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert expected_exposure not in manifest.exposures
|
||||
assert expected_metric not in manifest.metrics
|
||||
assert expected_exposure in manifest.disabled
|
||||
assert expected_metric in manifest.disabled
|
||||
|
||||
# Update schema file with enabled metric and exposure
|
||||
write_file(schema1_yml, project.project_root, "models", "schema.yml")
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 1
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert expected_exposure in manifest.exposures
|
||||
assert expected_metric in manifest.metrics
|
||||
assert expected_exposure not in manifest.disabled
|
||||
assert expected_metric not in manifest.disabled
|
||||
|
||||
# Update schema file - remove exposure, enable metric
|
||||
write_file(schema3_yml, project.project_root, "models", "schema.yml")
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 1
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert expected_exposure not in manifest.exposures
|
||||
assert expected_metric in manifest.metrics
|
||||
assert expected_exposure not in manifest.disabled
|
||||
assert expected_metric not in manifest.disabled
|
||||
|
||||
# Update schema file - add back exposure, remove metric
|
||||
write_file(schema4_yml, project.project_root, "models", "schema.yml")
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 1
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert expected_exposure not in manifest.exposures
|
||||
assert expected_metric not in manifest.metrics
|
||||
assert expected_exposure in manifest.disabled
|
||||
assert expected_metric not in manifest.disabled
|
||||
@@ -1,7 +1,3 @@
|
||||
import pytest
|
||||
from dbt.tests.fixtures.project import write_project_files
|
||||
|
||||
|
||||
wrong_specification_block__schema_yml = """
|
||||
version: 2
|
||||
models:
|
||||
@@ -1245,264 +1241,31 @@ select 1 as "Id"
|
||||
|
||||
"""
|
||||
|
||||
alt_local_utils__macros__type_timestamp_sql = """
|
||||
{%- macro type_timestamp() -%}
|
||||
{{ return(adapter.dispatch('type_timestamp', 'local_utils')()) }}
|
||||
{%- endmacro -%}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def wrong_specification_block():
|
||||
return {"schema.yml": wrong_specification_block__schema_yml}
|
||||
{% macro default__type_timestamp() %}
|
||||
{{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}
|
||||
{% endmacro %}
|
||||
"""
|
||||
|
||||
macro_resolution_order_macros__my_custom_test_sql = """
|
||||
{% test my_custom_test(model) %}
|
||||
select cast(current_timestamp as {{ dbt.type_timestamp() }})
|
||||
limit 0
|
||||
{% endtest %}
|
||||
"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_context_where_subq_models():
|
||||
return {
|
||||
"schema.yml": test_context_where_subq_models__schema_yml,
|
||||
"model_a.sql": test_context_where_subq_models__model_a_sql,
|
||||
}
|
||||
macro_resolution_order_models__my_model_sql = """
|
||||
select 1 as id
|
||||
"""
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_utils():
|
||||
return {
|
||||
"dbt_project.yml": test_utils__dbt_project_yml,
|
||||
"macros": {
|
||||
"current_timestamp.sql": test_utils__macros__current_timestamp_sql,
|
||||
"custom_test.sql": test_utils__macros__custom_test_sql,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def local_dependency():
|
||||
return {
|
||||
"dbt_project.yml": local_dependency__dbt_project_yml,
|
||||
"macros": {"equality.sql": local_dependency__macros__equality_sql},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def case_sensitive_models():
|
||||
return {
|
||||
"schema.yml": case_sensitive_models__schema_yml,
|
||||
"lowercase.sql": case_sensitive_models__lowercase_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_context_macros():
|
||||
return {
|
||||
"my_test.sql": test_context_macros__my_test_sql,
|
||||
"test_my_datediff.sql": test_context_macros__test_my_datediff_sql,
|
||||
"custom_schema_tests.sql": test_context_macros__custom_schema_tests_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_context_models_namespaced():
|
||||
return {
|
||||
"schema.yml": test_context_models_namespaced__schema_yml,
|
||||
"model_c.sql": test_context_models_namespaced__model_c_sql,
|
||||
"model_b.sql": test_context_models_namespaced__model_b_sql,
|
||||
"model_a.sql": test_context_models_namespaced__model_a_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def macros_v2():
|
||||
return {
|
||||
"override_get_test_macros_fail": {
|
||||
"get_test_sql.sql": macros_v2__override_get_test_macros_fail__get_test_sql_sql
|
||||
},
|
||||
"macros": {"tests.sql": macros_v2__macros__tests_sql},
|
||||
"override_get_test_macros": {
|
||||
"get_test_sql.sql": macros_v2__override_get_test_macros__get_test_sql_sql
|
||||
},
|
||||
"custom-configs": {"test.sql": macros_v2__custom_configs__test_sql},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_context_macros_namespaced():
|
||||
return {
|
||||
"my_test.sql": test_context_macros_namespaced__my_test_sql,
|
||||
"custom_schema_tests.sql": test_context_macros_namespaced__custom_schema_tests_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def seeds():
|
||||
return {"some_seed.csv": seeds__some_seed_csv}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_context_models():
|
||||
return {
|
||||
"schema.yml": test_context_models__schema_yml,
|
||||
"model_c.sql": test_context_models__model_c_sql,
|
||||
"model_b.sql": test_context_models__model_b_sql,
|
||||
"model_a.sql": test_context_models__model_a_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def name_collision():
|
||||
return {
|
||||
"schema.yml": name_collision__schema_yml,
|
||||
"base.sql": name_collision__base_sql,
|
||||
"base_extension.sql": name_collision__base_extension_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def dupe_tests_collide():
|
||||
return {
|
||||
"schema.yml": dupe_generic_tests_collide__schema_yml,
|
||||
"model_a.sql": dupe_generic_tests_collide__model_a,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def custom_generic_test_config_custom_macros():
|
||||
return {
|
||||
"schema.yml": custom_generic_test_config_custom_macro__schema_yml,
|
||||
"model_a.sql": custom_generic_test_config_custom_macro__model_a,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def custom_generic_test_names():
|
||||
return {
|
||||
"schema.yml": custom_generic_test_names__schema_yml,
|
||||
"model_a.sql": custom_generic_test_names__model_a,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def custom_generic_test_names_alt_format():
|
||||
return {
|
||||
"schema.yml": custom_generic_test_names_alt_format__schema_yml,
|
||||
"model_a.sql": custom_generic_test_names_alt_format__model_a,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_context_where_subq_macros():
|
||||
return {"custom_generic_test.sql": test_context_where_subq_macros__custom_generic_test_sql}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def invalid_schema_models():
|
||||
return {
|
||||
"schema.yml": invalid_schema_models__schema_yml,
|
||||
"model.sql": invalid_schema_models__model_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def all_models():
|
||||
return {
|
||||
"render_test_cli_arg_models": {
|
||||
"schema.yml": models_v2__render_test_cli_arg_models__schema_yml,
|
||||
"model.sql": models_v2__render_test_cli_arg_models__model_sql,
|
||||
},
|
||||
"override_get_test_models": {
|
||||
"schema.yml": models_v2__override_get_test_models__schema_yml,
|
||||
"my_model_warning.sql": models_v2__override_get_test_models__my_model_warning_sql,
|
||||
"my_model_pass.sql": models_v2__override_get_test_models__my_model_pass_sql,
|
||||
"my_model_failure.sql": models_v2__override_get_test_models__my_model_failure_sql,
|
||||
},
|
||||
"models": {
|
||||
"schema.yml": models_v2__models__schema_yml,
|
||||
"table_summary.sql": models_v2__models__table_summary_sql,
|
||||
"table_failure_summary.sql": models_v2__models__table_failure_summary_sql,
|
||||
"table_disabled.sql": models_v2__models__table_disabled_sql,
|
||||
"table_failure_null_relation.sql": models_v2__models__table_failure_null_relation_sql,
|
||||
"table_failure_copy.sql": models_v2__models__table_failure_copy_sql,
|
||||
"table_copy.sql": models_v2__models__table_copy_sql,
|
||||
},
|
||||
"malformed": {
|
||||
"schema.yml": models_v2__malformed__schema_yml,
|
||||
"table_summary.sql": models_v2__malformed__table_summary_sql,
|
||||
"table_copy.sql": models_v2__malformed__table_copy_sql,
|
||||
},
|
||||
"override_get_test_models_fail": {
|
||||
"schema.yml": models_v2__override_get_test_models_fail__schema_yml,
|
||||
"my_model.sql": models_v2__override_get_test_models_fail__my_model_sql,
|
||||
},
|
||||
"custom-configs": {
|
||||
"schema.yml": models_v2__custom_configs__schema_yml,
|
||||
"table_copy_another_one.sql": models_v2__custom_configs__table_copy_another_one_sql,
|
||||
"table_copy.sql": models_v2__custom_configs__table_copy_sql,
|
||||
"table.copy.with.dots.sql": models_v2__custom_configs__table_copy_with_dots_sql,
|
||||
},
|
||||
"render_test_configured_arg_models": {
|
||||
"schema.yml": models_v2__render_test_configured_arg_models__schema_yml,
|
||||
"model.sql": models_v2__render_test_configured_arg_models__model_sql,
|
||||
},
|
||||
"custom": {
|
||||
"schema.yml": models_v2__custom__schema_yml,
|
||||
"table_copy.sql": models_v2__custom__table_copy_sql,
|
||||
},
|
||||
"limit_null": {
|
||||
"schema.yml": models_v2__limit_null__schema_yml,
|
||||
"table_warning_limit_null.sql": models_v2__limit_null__table_warning_limit_null_sql,
|
||||
"table_limit_null.sql": models_v2__limit_null__table_limit_null_sql,
|
||||
"table_failure_limit_null.sql": models_v2__limit_null__table_failure_limit_null_sql,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def local_utils():
|
||||
return {
|
||||
"dbt_project.yml": local_utils__dbt_project_yml,
|
||||
"macros": {
|
||||
"datediff.sql": local_utils__macros__datediff_sql,
|
||||
"current_timestamp.sql": local_utils__macros__current_timestamp_sql,
|
||||
"custom_test.sql": local_utils__macros__custom_test_sql,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def ephemeral():
|
||||
return {
|
||||
"schema.yml": ephemeral__schema_yml,
|
||||
"ephemeral.sql": ephemeral__ephemeral_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def quote_required_models():
|
||||
return {
|
||||
"schema.yml": quote_required_models__schema_yml,
|
||||
"model_again.sql": quote_required_models__model_again_sql,
|
||||
"model_noquote.sql": quote_required_models__model_noquote_sql,
|
||||
"model.sql": quote_required_models__model_sql,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_files(
|
||||
project_root,
|
||||
test_utils,
|
||||
local_dependency,
|
||||
test_context_macros,
|
||||
macros_v2,
|
||||
test_context_macros_namespaced,
|
||||
seeds,
|
||||
test_context_where_subq_macros,
|
||||
models,
|
||||
local_utils,
|
||||
):
|
||||
write_project_files(project_root, "test_utils", test_utils)
|
||||
write_project_files(project_root, "local_dependency", local_dependency)
|
||||
write_project_files(project_root, "test-context-macros", test_context_macros)
|
||||
write_project_files(project_root, "macros-v2", macros_v2)
|
||||
write_project_files(
|
||||
project_root, "test-context-macros-namespaced", test_context_macros_namespaced
|
||||
)
|
||||
write_project_files(project_root, "seeds", seeds)
|
||||
write_project_files(
|
||||
project_root, "test-context-where-subq-macros", test_context_where_subq_macros
|
||||
)
|
||||
write_project_files(project_root, "models", models)
|
||||
write_project_files(project_root, "local_utils", local_utils)
|
||||
macro_resolution_order_models__config_yml = """
|
||||
version: 2
|
||||
models:
|
||||
- name: my_model
|
||||
tests:
|
||||
- my_custom_test
|
||||
"""
|
||||
|
||||
@@ -5,31 +5,94 @@ import re
|
||||
from dbt.tests.util import run_dbt, write_file
|
||||
from dbt.tests.fixtures.project import write_project_files
|
||||
from tests.fixtures.dbt_integration_project import dbt_integration_project # noqa: F401
|
||||
from tests.functional.schema_tests.fixtures import ( # noqa: F401
|
||||
wrong_specification_block,
|
||||
test_context_where_subq_models,
|
||||
test_utils,
|
||||
local_dependency,
|
||||
case_sensitive_models,
|
||||
test_context_macros,
|
||||
test_context_models_namespaced,
|
||||
macros_v2,
|
||||
test_context_macros_namespaced,
|
||||
seeds,
|
||||
test_context_models,
|
||||
name_collision,
|
||||
dupe_tests_collide,
|
||||
custom_generic_test_config_custom_macros,
|
||||
custom_generic_test_names,
|
||||
custom_generic_test_names_alt_format,
|
||||
test_context_where_subq_macros,
|
||||
invalid_schema_models,
|
||||
all_models,
|
||||
local_utils,
|
||||
ephemeral,
|
||||
quote_required_models,
|
||||
project_files,
|
||||
from tests.functional.schema_tests.fixtures import (
|
||||
wrong_specification_block__schema_yml,
|
||||
test_context_where_subq_models__schema_yml,
|
||||
test_context_where_subq_models__model_a_sql,
|
||||
test_utils__dbt_project_yml,
|
||||
test_utils__macros__current_timestamp_sql,
|
||||
test_utils__macros__custom_test_sql,
|
||||
local_dependency__dbt_project_yml,
|
||||
local_dependency__macros__equality_sql,
|
||||
case_sensitive_models__schema_yml,
|
||||
case_sensitive_models__lowercase_sql,
|
||||
test_context_macros__my_test_sql,
|
||||
test_context_macros__test_my_datediff_sql,
|
||||
test_context_macros__custom_schema_tests_sql,
|
||||
test_context_models_namespaced__schema_yml,
|
||||
test_context_models_namespaced__model_c_sql,
|
||||
test_context_models_namespaced__model_b_sql,
|
||||
test_context_models_namespaced__model_a_sql,
|
||||
macros_v2__override_get_test_macros_fail__get_test_sql_sql,
|
||||
macros_v2__macros__tests_sql,
|
||||
macros_v2__custom_configs__test_sql,
|
||||
macros_v2__override_get_test_macros__get_test_sql_sql,
|
||||
test_context_macros_namespaced__my_test_sql,
|
||||
test_context_macros_namespaced__custom_schema_tests_sql,
|
||||
seeds__some_seed_csv,
|
||||
test_context_models__schema_yml,
|
||||
test_context_models__model_c_sql,
|
||||
test_context_models__model_b_sql,
|
||||
test_context_models__model_a_sql,
|
||||
name_collision__schema_yml,
|
||||
name_collision__base_sql,
|
||||
name_collision__base_extension_sql,
|
||||
dupe_generic_tests_collide__schema_yml,
|
||||
dupe_generic_tests_collide__model_a,
|
||||
custom_generic_test_config_custom_macro__schema_yml,
|
||||
custom_generic_test_config_custom_macro__model_a,
|
||||
custom_generic_test_names__schema_yml,
|
||||
custom_generic_test_names__model_a,
|
||||
custom_generic_test_names_alt_format__schema_yml,
|
||||
custom_generic_test_names_alt_format__model_a,
|
||||
test_context_where_subq_macros__custom_generic_test_sql,
|
||||
invalid_schema_models__schema_yml,
|
||||
invalid_schema_models__model_sql,
|
||||
models_v2__models__schema_yml,
|
||||
models_v2__models__table_summary_sql,
|
||||
models_v2__models__table_failure_summary_sql,
|
||||
models_v2__models__table_disabled_sql,
|
||||
models_v2__models__table_failure_null_relation_sql,
|
||||
models_v2__models__table_failure_copy_sql,
|
||||
models_v2__models__table_copy_sql,
|
||||
models_v2__limit_null__schema_yml,
|
||||
models_v2__limit_null__table_warning_limit_null_sql,
|
||||
models_v2__limit_null__table_limit_null_sql,
|
||||
models_v2__limit_null__table_failure_limit_null_sql,
|
||||
models_v2__override_get_test_models__schema_yml,
|
||||
models_v2__override_get_test_models__my_model_warning_sql,
|
||||
models_v2__override_get_test_models__my_model_pass_sql,
|
||||
models_v2__override_get_test_models__my_model_failure_sql,
|
||||
models_v2__override_get_test_models_fail__schema_yml,
|
||||
models_v2__override_get_test_models_fail__my_model_sql,
|
||||
models_v2__malformed__schema_yml,
|
||||
models_v2__malformed__table_summary_sql,
|
||||
models_v2__malformed__table_copy_sql,
|
||||
models_v2__custom_configs__schema_yml,
|
||||
models_v2__custom_configs__table_copy_another_one_sql,
|
||||
models_v2__custom_configs__table_copy_sql,
|
||||
models_v2__custom_configs__table_copy_with_dots_sql,
|
||||
models_v2__custom__schema_yml,
|
||||
models_v2__custom__table_copy_sql,
|
||||
models_v2__render_test_cli_arg_models__schema_yml,
|
||||
models_v2__render_test_cli_arg_models__model_sql,
|
||||
models_v2__render_test_configured_arg_models__schema_yml,
|
||||
models_v2__render_test_configured_arg_models__model_sql,
|
||||
local_utils__dbt_project_yml,
|
||||
local_utils__macros__datediff_sql,
|
||||
local_utils__macros__current_timestamp_sql,
|
||||
local_utils__macros__custom_test_sql,
|
||||
ephemeral__schema_yml,
|
||||
ephemeral__ephemeral_sql,
|
||||
quote_required_models__schema_yml,
|
||||
quote_required_models__model_again_sql,
|
||||
quote_required_models__model_noquote_sql,
|
||||
quote_required_models__model_sql,
|
||||
case_sensitive_models__uppercase_SQL,
|
||||
macro_resolution_order_macros__my_custom_test_sql,
|
||||
macro_resolution_order_models__config_yml,
|
||||
macro_resolution_order_models__my_model_sql,
|
||||
alt_local_utils__macros__type_timestamp_sql,
|
||||
)
|
||||
from dbt.exceptions import ParsingException, CompilationException
|
||||
from dbt.contracts.results import TestStatus
|
||||
@@ -42,8 +105,16 @@ class TestSchemaTests:
|
||||
project.run_sql_file(os.path.join(project.test_data_dir, "seed_failure.sql"))
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["models"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__models__schema_yml,
|
||||
"table_summary.sql": models_v2__models__table_summary_sql,
|
||||
"table_failure_summary.sql": models_v2__models__table_failure_summary_sql,
|
||||
"table_disabled.sql": models_v2__models__table_disabled_sql,
|
||||
"table_failure_null_relation.sql": models_v2__models__table_failure_null_relation_sql,
|
||||
"table_failure_copy.sql": models_v2__models__table_failure_copy_sql,
|
||||
"table_copy.sql": models_v2__models__table_copy_sql,
|
||||
}
|
||||
|
||||
def assertTestFailed(self, result):
|
||||
assert result.status == "fail"
|
||||
@@ -117,8 +188,13 @@ class TestLimitedSchemaTests:
|
||||
project.run_sql_file(os.path.join(project.test_data_dir, "seed.sql"))
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["limit_null"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__limit_null__schema_yml,
|
||||
"table_warning_limit_null.sql": models_v2__limit_null__table_warning_limit_null_sql,
|
||||
"table_limit_null.sql": models_v2__limit_null__table_limit_null_sql,
|
||||
"table_failure_limit_null.sql": models_v2__limit_null__table_failure_limit_null_sql,
|
||||
}
|
||||
|
||||
def assertTestFailed(self, result):
|
||||
assert result.status == "fail"
|
||||
@@ -163,8 +239,13 @@ class TestLimitedSchemaTests:
|
||||
class TestDefaultBoolType:
|
||||
# test with default True/False in get_test_sql macro
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["override_get_test_models"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__override_get_test_models__schema_yml,
|
||||
"my_model_warning.sql": models_v2__override_get_test_models__my_model_warning_sql,
|
||||
"my_model_pass.sql": models_v2__override_get_test_models__my_model_pass_sql,
|
||||
"my_model_failure.sql": models_v2__override_get_test_models__my_model_failure_sql,
|
||||
}
|
||||
|
||||
def assertTestFailed(self, result):
|
||||
assert result.status == "fail"
|
||||
@@ -207,10 +288,24 @@ class TestDefaultBoolType:
|
||||
|
||||
|
||||
class TestOtherBoolType:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
macros_v2_file = {
|
||||
"override_get_test_macros": {
|
||||
"get_test_sql.sql": macros_v2__override_get_test_macros__get_test_sql_sql
|
||||
},
|
||||
}
|
||||
write_project_files(project_root, "macros-v2", macros_v2_file)
|
||||
|
||||
# test with expected 0/1 in custom get_test_sql macro
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["override_get_test_models"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__override_get_test_models__schema_yml,
|
||||
"my_model_warning.sql": models_v2__override_get_test_models__my_model_warning_sql,
|
||||
"my_model_pass.sql": models_v2__override_get_test_models__my_model_pass_sql,
|
||||
"my_model_failure.sql": models_v2__override_get_test_models__my_model_failure_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -260,10 +355,22 @@ class TestOtherBoolType:
|
||||
|
||||
|
||||
class TestNonBoolType:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
macros_v2_file = {
|
||||
"override_get_test_macros_fail": {
|
||||
"get_test_sql.sql": macros_v2__override_get_test_macros_fail__get_test_sql_sql
|
||||
},
|
||||
}
|
||||
write_project_files(project_root, "macros-v2", macros_v2_file)
|
||||
|
||||
# test with invalid 'x'/'y' in custom get_test_sql macro
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["override_get_test_models_fail"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__override_get_test_models_fail__schema_yml,
|
||||
"my_model.sql": models_v2__override_get_test_models_fail__my_model_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -291,8 +398,12 @@ class TestMalformedSchemaTests:
|
||||
project.run_sql_file(os.path.join(project.test_data_dir, "seed.sql"))
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["malformed"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__malformed__schema_yml,
|
||||
"table_summary.sql": models_v2__malformed__table_summary_sql,
|
||||
"table_copy.sql": models_v2__malformed__table_copy_sql,
|
||||
}
|
||||
|
||||
def test_malformed_schema_will_break_run(
|
||||
self,
|
||||
@@ -303,14 +414,22 @@ class TestMalformedSchemaTests:
|
||||
|
||||
|
||||
class TestCustomConfigSchemaTests:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["custom-configs"]
|
||||
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project):
|
||||
def setUp(self, project, project_root):
|
||||
project.run_sql_file(os.path.join(project.test_data_dir, "seed.sql"))
|
||||
|
||||
macros_v2_file = {"custom-configs": {"test.sql": macros_v2__custom_configs__test_sql}}
|
||||
write_project_files(project_root, "macros-v2", macros_v2_file)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__custom_configs__schema_yml,
|
||||
"table_copy_another_one.sql": models_v2__custom_configs__table_copy_another_one_sql,
|
||||
"table_copy.sql": models_v2__custom_configs__table_copy_sql,
|
||||
"table.copy.with.dots.sql": models_v2__custom_configs__table_copy_with_dots_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
return {
|
||||
@@ -333,8 +452,11 @@ class TestCustomConfigSchemaTests:
|
||||
|
||||
class TestHooksInTests:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, ephemeral): # noqa: F811
|
||||
return ephemeral
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": ephemeral__schema_yml,
|
||||
"ephemeral.sql": ephemeral__ephemeral_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -359,8 +481,11 @@ class TestHooksInTests:
|
||||
|
||||
class TestHooksForWhich:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, ephemeral): # noqa: F811
|
||||
return ephemeral
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": ephemeral__schema_yml,
|
||||
"ephemeral.sql": ephemeral__ephemeral_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -393,6 +518,17 @@ class TestCustomSchemaTests:
|
||||
write_project_files(project_root, "dbt_integration_project", dbt_integration_project)
|
||||
project.run_sql_file(os.path.join(project.test_data_dir, "seed.sql"))
|
||||
|
||||
local_dependency_files = {
|
||||
"dbt_project.yml": local_dependency__dbt_project_yml,
|
||||
"macros": {"equality.sql": local_dependency__macros__equality_sql},
|
||||
}
|
||||
write_project_files(project_root, "local_dependency", local_dependency_files)
|
||||
|
||||
macros_v2_file = {
|
||||
"macros": {"tests.sql": macros_v2__macros__tests_sql},
|
||||
}
|
||||
write_project_files(project_root, "macros-v2", macros_v2_file)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def packages(self):
|
||||
return {
|
||||
@@ -417,8 +553,11 @@ class TestCustomSchemaTests:
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["custom"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__custom__schema_yml,
|
||||
"table_copy.sql": models_v2__custom__table_copy_sql,
|
||||
}
|
||||
|
||||
def test_schema_tests(
|
||||
self,
|
||||
@@ -443,8 +582,13 @@ class TestCustomSchemaTests:
|
||||
|
||||
class TestQuotedSchemaTestColumns:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, quote_required_models): # noqa: F811
|
||||
return quote_required_models
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": quote_required_models__schema_yml,
|
||||
"model_again.sql": quote_required_models__model_again_sql,
|
||||
"model_noquote.sql": quote_required_models__model_noquote_sql,
|
||||
"model.sql": quote_required_models__model_sql,
|
||||
}
|
||||
|
||||
def test_quote_required_column(
|
||||
self,
|
||||
@@ -465,9 +609,19 @@ class TestQuotedSchemaTestColumns:
|
||||
|
||||
|
||||
class TestCliVarsSchemaTests:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
macros_v2_file = {
|
||||
"macros": {"tests.sql": macros_v2__macros__tests_sql},
|
||||
}
|
||||
write_project_files(project_root, "macros-v2", macros_v2_file)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["render_test_cli_arg_models"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__render_test_cli_arg_models__schema_yml,
|
||||
"model.sql": models_v2__render_test_cli_arg_models__model_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -488,9 +642,19 @@ class TestCliVarsSchemaTests:
|
||||
|
||||
|
||||
class TestConfiguredVarsSchemaTests:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
macros_v2_file = {
|
||||
"macros": {"tests.sql": macros_v2__macros__tests_sql},
|
||||
}
|
||||
write_project_files(project_root, "macros-v2", macros_v2_file)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, all_models): # noqa: F811
|
||||
return all_models["render_test_configured_arg_models"]
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": models_v2__render_test_configured_arg_models__schema_yml,
|
||||
"model.sql": models_v2__render_test_configured_arg_models__model_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -512,8 +676,11 @@ class TestConfiguredVarsSchemaTests:
|
||||
|
||||
class TestSchemaCaseInsensitive:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, case_sensitive_models): # noqa: F811
|
||||
return case_sensitive_models
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": case_sensitive_models__schema_yml,
|
||||
"lowercase.sql": case_sensitive_models__lowercase_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUP(self, project):
|
||||
@@ -541,9 +708,33 @@ class TestSchemaCaseInsensitive:
|
||||
|
||||
|
||||
class TestSchemaTestContext:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
local_utils_files = {
|
||||
"dbt_project.yml": local_utils__dbt_project_yml,
|
||||
"macros": {
|
||||
"datediff.sql": local_utils__macros__datediff_sql,
|
||||
"current_timestamp.sql": local_utils__macros__current_timestamp_sql,
|
||||
"custom_test.sql": local_utils__macros__custom_test_sql,
|
||||
},
|
||||
}
|
||||
write_project_files(project_root, "local_utils", local_utils_files)
|
||||
|
||||
test_context_macros_files = {
|
||||
"my_test.sql": test_context_macros__my_test_sql,
|
||||
"test_my_datediff.sql": test_context_macros__test_my_datediff_sql,
|
||||
"custom_schema_tests.sql": test_context_macros__custom_schema_tests_sql,
|
||||
}
|
||||
write_project_files(project_root, "test-context-macros", test_context_macros_files)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, test_context_models): # noqa: F811
|
||||
return test_context_models
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": test_context_models__schema_yml,
|
||||
"model_c.sql": test_context_models__model_c_sql,
|
||||
"model_b.sql": test_context_models__model_b_sql,
|
||||
"model_a.sql": test_context_models__model_a_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -557,10 +748,7 @@ class TestSchemaTestContext:
|
||||
def packages(self):
|
||||
return {"packages": [{"local": "local_utils"}]}
|
||||
|
||||
def test_test_context_tests(
|
||||
self,
|
||||
project,
|
||||
):
|
||||
def test_test_context_tests(self, project):
|
||||
# This test tests the the TestContext and TestMacroNamespace
|
||||
# are working correctly
|
||||
run_dbt(["deps"])
|
||||
@@ -586,9 +774,43 @@ class TestSchemaTestContext:
|
||||
|
||||
|
||||
class TestSchemaTestContextWithMacroNamespace:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
test_utils_files = {
|
||||
"dbt_project.yml": test_utils__dbt_project_yml,
|
||||
"macros": {
|
||||
"current_timestamp.sql": test_utils__macros__current_timestamp_sql,
|
||||
"custom_test.sql": test_utils__macros__custom_test_sql,
|
||||
},
|
||||
}
|
||||
write_project_files(project_root, "test_utils", test_utils_files)
|
||||
|
||||
local_utils_files = {
|
||||
"dbt_project.yml": local_utils__dbt_project_yml,
|
||||
"macros": {
|
||||
"datediff.sql": local_utils__macros__datediff_sql,
|
||||
"current_timestamp.sql": local_utils__macros__current_timestamp_sql,
|
||||
"custom_test.sql": local_utils__macros__custom_test_sql,
|
||||
},
|
||||
}
|
||||
write_project_files(project_root, "local_utils", local_utils_files)
|
||||
|
||||
test_context_macros_namespaced_file = {
|
||||
"my_test.sql": test_context_macros_namespaced__my_test_sql,
|
||||
"custom_schema_tests.sql": test_context_macros_namespaced__custom_schema_tests_sql,
|
||||
}
|
||||
write_project_files(
|
||||
project_root, "test-context-macros-namespaced", test_context_macros_namespaced_file
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, test_context_models_namespaced): # noqa: F811
|
||||
return test_context_models_namespaced
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": test_context_models_namespaced__schema_yml,
|
||||
"model_c.sql": test_context_models_namespaced__model_c_sql,
|
||||
"model_b.sql": test_context_models_namespaced__model_b_sql,
|
||||
"model_a.sql": test_context_models_namespaced__model_a_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -640,8 +862,12 @@ class TestSchemaTestContextWithMacroNamespace:
|
||||
|
||||
class TestSchemaTestNameCollision:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, name_collision): # noqa: F811
|
||||
return name_collision
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": name_collision__schema_yml,
|
||||
"base.sql": name_collision__base_sql,
|
||||
"base_extension.sql": name_collision__base_extension_sql,
|
||||
}
|
||||
|
||||
def test_collision_test_names_get_hash(
|
||||
self,
|
||||
@@ -666,8 +892,11 @@ class TestSchemaTestNameCollision:
|
||||
|
||||
class TestGenericTestsCollide:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, dupe_tests_collide): # noqa: F811
|
||||
return dupe_tests_collide
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": dupe_generic_tests_collide__schema_yml,
|
||||
"model_a.sql": dupe_generic_tests_collide__model_a,
|
||||
}
|
||||
|
||||
def test_generic_test_collision(
|
||||
self,
|
||||
@@ -681,8 +910,11 @@ class TestGenericTestsCollide:
|
||||
|
||||
class TestGenericTestsConfigCustomMacros:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, custom_generic_test_config_custom_macros): # noqa: F811
|
||||
return custom_generic_test_config_custom_macros
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": custom_generic_test_config_custom_macro__schema_yml,
|
||||
"model_a.sql": custom_generic_test_config_custom_macro__model_a,
|
||||
}
|
||||
|
||||
def test_generic_test_config_custom_macros(
|
||||
self,
|
||||
@@ -696,8 +928,11 @@ class TestGenericTestsConfigCustomMacros:
|
||||
|
||||
class TestGenericTestsCustomNames:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, custom_generic_test_names): # noqa: F811
|
||||
return custom_generic_test_names
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": custom_generic_test_names__schema_yml,
|
||||
"model_a.sql": custom_generic_test_names__model_a,
|
||||
}
|
||||
|
||||
# users can define custom names for specific instances of generic tests
|
||||
def test_generic_tests_with_custom_names(
|
||||
@@ -723,8 +958,11 @@ class TestGenericTestsCustomNames:
|
||||
|
||||
class TestGenericTestsCustomNamesAltFormat(TestGenericTestsCustomNames):
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, custom_generic_test_names_alt_format): # noqa: F811
|
||||
return custom_generic_test_names_alt_format
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": custom_generic_test_names_alt_format__schema_yml,
|
||||
"model_a.sql": custom_generic_test_names_alt_format__model_a,
|
||||
}
|
||||
|
||||
# exactly as above, just alternative format for yaml definition
|
||||
def test_collision_test_names_get_hash(
|
||||
@@ -738,8 +976,11 @@ class TestGenericTestsCustomNamesAltFormat(TestGenericTestsCustomNames):
|
||||
|
||||
class TestInvalidSchema:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, invalid_schema_models): # noqa: F811
|
||||
return invalid_schema_models
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": invalid_schema_models__schema_yml,
|
||||
"model.sql": invalid_schema_models__model_sql,
|
||||
}
|
||||
|
||||
def test_invalid_schema_file(
|
||||
self,
|
||||
@@ -752,8 +993,12 @@ class TestInvalidSchema:
|
||||
|
||||
class TestWrongSpecificationBlock:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, wrong_specification_block): # noqa: F811
|
||||
return wrong_specification_block
|
||||
def models(self):
|
||||
return {"schema.yml": wrong_specification_block__schema_yml}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def seeds(self):
|
||||
return {"some_seed.csv": seeds__some_seed_csv}
|
||||
|
||||
def test_wrong_specification_block(
|
||||
self,
|
||||
@@ -777,9 +1022,21 @@ class TestWrongSpecificationBlock:
|
||||
|
||||
|
||||
class TestSchemaTestContextWhereSubq:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
test_context_where_subq_macros_file = {
|
||||
"custom_generic_test.sql": test_context_where_subq_macros__custom_generic_test_sql
|
||||
}
|
||||
write_project_files(
|
||||
project_root, "test-context-where-subq-macros", test_context_where_subq_macros_file
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self, test_context_where_subq_models): # noqa: F811
|
||||
return test_context_where_subq_models
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": test_context_where_subq_models__schema_yml,
|
||||
"model_a.sql": test_context_where_subq_models__model_a_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
@@ -799,3 +1056,52 @@ class TestSchemaTestContextWhereSubq:
|
||||
|
||||
results = run_dbt(["test"])
|
||||
assert len(results) == 1
|
||||
|
||||
|
||||
class TestCustomSchemaTestMacroResolutionOrder:
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setUp(self, project_root):
|
||||
alt_local_utils_file = {
|
||||
"dbt_project.yml": local_utils__dbt_project_yml,
|
||||
"macros": {
|
||||
"datediff.sql": alt_local_utils__macros__type_timestamp_sql,
|
||||
},
|
||||
}
|
||||
write_project_files(project_root, "alt_local_utils", alt_local_utils_file)
|
||||
|
||||
macros_resolution_order_file = {
|
||||
"my_custom_test.sql": macro_resolution_order_macros__my_custom_test_sql,
|
||||
}
|
||||
write_project_files(
|
||||
project_root, "macro_resolution_order_macros", macros_resolution_order_file
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"schema.yml": macro_resolution_order_models__config_yml,
|
||||
"my_model.sql": macro_resolution_order_models__my_model_sql,
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def project_config_update(self):
|
||||
return {
|
||||
"config-version": 2,
|
||||
"macro-paths": ["macro_resolution_order_macros"],
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def packages(self):
|
||||
return {"packages": [{"local": "alt_local_utils"}]}
|
||||
|
||||
def test_macro_resolution_test_namespace(
|
||||
self,
|
||||
project,
|
||||
):
|
||||
# https://github.com/dbt-labs/dbt-core/issues/5720
|
||||
# Previously, macros called as 'dbt.some_macro' would not correctly
|
||||
# resolve to 'some_macro' from the 'dbt' namespace during static analysis,
|
||||
# if 'some_macro' also existed in an installed package,
|
||||
# leading to the macro being missing in the TestNamespace
|
||||
run_dbt(["deps"])
|
||||
run_dbt(["parse"])
|
||||
|
||||
Reference in New Issue
Block a user