forked from repo-mirrors/dbt-core
Compare commits
40 Commits
feature/ad
...
jerco/pyth
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93572b9291 | ||
|
|
3562637984 | ||
|
|
17aca39e1c | ||
|
|
59744f18bb | ||
|
|
f1326f526c | ||
|
|
834ac716fd | ||
|
|
0487b96098 | ||
|
|
dbd36f06e4 | ||
|
|
38ada8a68e | ||
|
|
e58edaab2d | ||
|
|
c202e005cd | ||
|
|
8129862b3c | ||
|
|
4e8aa007cf | ||
|
|
fe88bfabbf | ||
|
|
5328a64df2 | ||
|
|
87c9974be1 | ||
|
|
f3f509da92 | ||
|
|
5e8dcec2c5 | ||
|
|
56783446db | ||
|
|
207cc0383d | ||
|
|
49ecd6a6a4 | ||
|
|
c109f39d82 | ||
|
|
fd778dceb5 | ||
|
|
e402241e0e | ||
|
|
a6c37c948d | ||
|
|
fd886cb7dd | ||
|
|
b089a471b7 | ||
|
|
ae294b643b | ||
|
|
0bd6df0d1b | ||
|
|
7b1d61c956 | ||
|
|
646a0c704f | ||
|
|
bbf4fc30a5 | ||
|
|
6baaa2bcb0 | ||
|
|
13a595722a | ||
|
|
3680b6ad0e | ||
|
|
4c29d48d1c | ||
|
|
e00eb9aa3a | ||
|
|
f5a94fc774 | ||
|
|
b98af4ce17 | ||
|
|
b0f8d3d2f1 |
7
.changes/unreleased/Docs-20220920-152040.yaml
Normal file
7
.changes/unreleased/Docs-20220920-152040.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Docs
|
||||
body: Refer to exposures by their label by default.
|
||||
time: 2022-09-20T15:20:40.652948-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "306"
|
||||
PR: "307"
|
||||
7
.changes/unreleased/Features-20220716-142116.yaml
Normal file
7
.changes/unreleased/Features-20220716-142116.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: merge_exclude_columns for incremental materialization
|
||||
time: 2022-07-16T14:21:16.592519-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "5260"
|
||||
PR: "5457"
|
||||
7
.changes/unreleased/Features-20220825-195023.yaml
Normal file
7
.changes/unreleased/Features-20220825-195023.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Search current working directory for `profiles.yml`
|
||||
time: 2022-08-25T19:50:23.940417-06:00
|
||||
custom:
|
||||
Author: dbeatty10
|
||||
Issue: "5411"
|
||||
PR: "5717"
|
||||
7
.changes/unreleased/Features-20220908-123650.yaml
Normal file
7
.changes/unreleased/Features-20220908-123650.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Flags work with new Click CLI
|
||||
time: 2022-09-08T12:36:50.386978-05:00
|
||||
custom:
|
||||
Author: iknox-fa
|
||||
Issue: "5529"
|
||||
PR: "5790"
|
||||
7
.changes/unreleased/Features-20220909-204643.yaml
Normal file
7
.changes/unreleased/Features-20220909-204643.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Add metadata env method to ProviderContext class
|
||||
time: 2022-09-09T20:46:43.889302+01:00
|
||||
custom:
|
||||
Author: jared-rimmer
|
||||
Issue: "5522"
|
||||
PR: "5794"
|
||||
7
.changes/unreleased/Features-20220912-222227.yaml
Normal file
7
.changes/unreleased/Features-20220912-222227.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Array macros
|
||||
time: 2022-09-12T22:22:27.475515-06:00
|
||||
custom:
|
||||
Author: graciegoheen dbeatty10
|
||||
Issue: "5520"
|
||||
PR: "5823"
|
||||
7
.changes/unreleased/Features-20220919-112903.yaml
Normal file
7
.changes/unreleased/Features-20220919-112903.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: add -fr flag shorthand
|
||||
time: 2022-09-19T11:29:03.774678-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "5878"
|
||||
PR: "5879"
|
||||
7
.changes/unreleased/Features-20220919-231414.yaml
Normal file
7
.changes/unreleased/Features-20220919-231414.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: add type_boolean as a data type macro
|
||||
time: 2022-09-19T23:14:14.9871+01:00
|
||||
custom:
|
||||
Author: jpmmcneill
|
||||
Issue: "5739"
|
||||
PR: "5875"
|
||||
7
.changes/unreleased/Features-20220921-145222.yaml
Normal file
7
.changes/unreleased/Features-20220921-145222.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Features
|
||||
body: Support .dbtignore in project root to ignore certain files being read by dbt
|
||||
time: 2022-09-21T14:52:22.131627-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "5733"
|
||||
PR: "5897"
|
||||
9
.changes/unreleased/Features-20220926-130627.yaml
Normal file
9
.changes/unreleased/Features-20220926-130627.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
kind: Features
|
||||
body: This conditionally no-ops warehouse connection at compile depending on an env
|
||||
var, disabling introspection/queries during compilation only. This is a temporary
|
||||
solution to more complex permissions requirements for the semantic layer.
|
||||
time: 2022-09-26T13:06:27.591061-05:00
|
||||
custom:
|
||||
Author: racheldaniel
|
||||
Issue: "5936"
|
||||
PR: "5926"
|
||||
7
.changes/unreleased/Fixes-20220822-194238.yaml
Normal file
7
.changes/unreleased/Fixes-20220822-194238.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Fix typos of comments in core/dbt/adapters/
|
||||
time: 2022-08-22T19:42:38.593923+09:00
|
||||
custom:
|
||||
Author: yoiki
|
||||
Issue: "5690"
|
||||
PR: "5693"
|
||||
7
.changes/unreleased/Fixes-20220916-091723.yaml
Normal file
7
.changes/unreleased/Fixes-20220916-091723.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Prevent event_history from holding references
|
||||
time: 2022-09-16T09:17:23.273847-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "5848"
|
||||
PR: "5858"
|
||||
7
.changes/unreleased/Fixes-20220920-181856.yaml
Normal file
7
.changes/unreleased/Fixes-20220920-181856.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: ConfigSelectorMethod should check for bools
|
||||
time: 2022-09-20T18:18:56.630628+01:00
|
||||
custom:
|
||||
Author: danielcmessias
|
||||
Issue: "5890"
|
||||
PR: "5889"
|
||||
7
.changes/unreleased/Fixes-20220922-083926.yaml
Normal file
7
.changes/unreleased/Fixes-20220922-083926.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: shorthand for full refresh should be one character
|
||||
time: 2022-09-22T08:39:26.948671-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "5878"
|
||||
PR: "5908"
|
||||
7
.changes/unreleased/Fixes-20220923-143226.yaml
Normal file
7
.changes/unreleased/Fixes-20220923-143226.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Fix macro resolution order during static analysis for custom generic tests
|
||||
time: 2022-09-23T14:32:26.857376+02:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
Issue: "5720"
|
||||
PR: "5907"
|
||||
7
.changes/unreleased/Fixes-20220923-174504.yaml
Normal file
7
.changes/unreleased/Fixes-20220923-174504.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: Fix race condition when invoking dbt via lib.py concurrently
|
||||
time: 2022-09-23T17:45:04.405026-04:00
|
||||
custom:
|
||||
Author: drewbanin
|
||||
Issue: "5919"
|
||||
PR: "5921"
|
||||
7
.changes/unreleased/Under the Hood-20220912-190341.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220912-190341.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: remove key as reserved keyword from test_bool_or
|
||||
time: 2022-09-12T19:03:41.481601+02:00
|
||||
custom:
|
||||
Author: sdebruyn
|
||||
Issue: "5817"
|
||||
PR: "5818"
|
||||
7
.changes/unreleased/Under the Hood-20220913-111744.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220913-111744.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Compatibiltiy for metric attribute renaming
|
||||
time: 2022-09-13T11:17:44.953536+02:00
|
||||
custom:
|
||||
Author: jtcohen6 callum-mcdata
|
||||
Issue: "5807"
|
||||
PR: "5825"
|
||||
7
.changes/unreleased/Under the Hood-20220914-132632.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220914-132632.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Add name validation for metrics
|
||||
time: 2022-09-14T13:26:32.387524-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "5456"
|
||||
PR: "5841"
|
||||
7
.changes/unreleased/Under the Hood-20220914-150058.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220914-150058.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Validate exposure name and add label
|
||||
time: 2022-09-14T15:00:58.982982-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "5606"
|
||||
PR: "5844"
|
||||
7
.changes/unreleased/Under the Hood-20220914-193933.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220914-193933.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: remove source quoting setting in adapter tests
|
||||
time: 2022-09-14T19:39:33.688385+02:00
|
||||
custom:
|
||||
Author: sdebruyn
|
||||
Issue: "5836"
|
||||
PR: "5839"
|
||||
7
.changes/unreleased/Under the Hood-20220916-154712.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220916-154712.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Adding validation for metric expression attribute
|
||||
time: 2022-09-16T15:47:12.799002-05:00
|
||||
custom:
|
||||
Author: callum-mcdata
|
||||
Issue: "5871"
|
||||
PR: "5873"
|
||||
7
.changes/unreleased/Under the Hood-20220920-144842.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220920-144842.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Profiling and Adapter Management work with Click CLI
|
||||
time: 2022-09-20T14:48:42.070256-05:00
|
||||
custom:
|
||||
Author: iknox-fa
|
||||
Issue: "5531"
|
||||
PR: "5892"
|
||||
7
.changes/unreleased/Under the Hood-20220923-133525.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220923-133525.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Reparse references to deleted metric
|
||||
time: 2022-09-23T13:35:25.681656-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "5444"
|
||||
PR: "5920"
|
||||
@@ -44,7 +44,7 @@ custom:
|
||||
footerFormat: |
|
||||
{{- $contributorDict := dict }}
|
||||
{{- /* any names added to this list should be all lowercase for later matching purposes */}}
|
||||
{{- $core_team := list "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" }}
|
||||
{{- $core_team := list "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $authorList := splitList " " $change.Custom.Author }}
|
||||
{{- /* loop through all authors for a PR */}}
|
||||
|
||||
3
.github/workflows/jira-transition.yml
vendored
3
.github/workflows/jira-transition.yml
vendored
@@ -15,6 +15,9 @@ on:
|
||||
issues:
|
||||
types: [closed, deleted, reopened]
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-transition.yml@main
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -20,6 +20,9 @@ on:
|
||||
description: 'The release version number (i.e. 1.0.0b1)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
3
.github/workflows/schema-check.yml
vendored
3
.github/workflows/schema-check.yml
vendored
@@ -21,6 +21,9 @@ on:
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
|
||||
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
|
||||
|
||||
4
.github/workflows/stale.yml
vendored
4
.github/workflows/stale.yml
vendored
@@ -3,6 +3,10 @@ on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/version-bump.yml
vendored
4
.github/workflows/version-bump.yml
vendored
@@ -20,6 +20,10 @@ on:
|
||||
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -95,6 +95,7 @@ venv/
|
||||
|
||||
# vscode
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
|
||||
# poetry
|
||||
pyproject.toml
|
||||
|
||||
@@ -6,7 +6,7 @@ exclude: ^test/
|
||||
|
||||
# Force all unspecified python hooks to run python 3.8
|
||||
default_language_version:
|
||||
python: python3.8
|
||||
python: python3
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
|
||||
@@ -12,6 +12,7 @@ class Column:
|
||||
"TIMESTAMP": "TIMESTAMP",
|
||||
"FLOAT": "FLOAT",
|
||||
"INTEGER": "INT",
|
||||
"BOOLEAN": "BOOLEAN",
|
||||
}
|
||||
column: str
|
||||
dtype: str
|
||||
|
||||
@@ -1220,7 +1220,6 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
def default_python_submission_method(self) -> str:
|
||||
raise NotImplementedError("default_python_submission_method is not specified")
|
||||
|
||||
@available.parse_none
|
||||
@log_code_execution
|
||||
def submit_python_job(self, parsed_model: dict, compiled_code: str) -> AdapterResponse:
|
||||
submission_method = parsed_model["config"].get(
|
||||
|
||||
@@ -384,7 +384,7 @@ class RelationsCache:
|
||||
relation = self.relations.pop(old_key)
|
||||
new_key = new_relation.key()
|
||||
|
||||
# relaton has to rename its innards, so it needs the _CachedRelation.
|
||||
# relation has to rename its innards, so it needs the _CachedRelation.
|
||||
relation.rename(new_relation)
|
||||
# update all the relations that refer to it
|
||||
for cached in self.relations.values():
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from typing import Type, Dict, Any, List, Optional, Set
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Type
|
||||
|
||||
from dbt.exceptions import RuntimeException, InternalException
|
||||
from dbt.include.global_project import (
|
||||
PACKAGE_PATH as GLOBAL_PROJECT_PATH,
|
||||
PROJECT_NAME as GLOBAL_PROJECT_NAME,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import AdapterImportError, PluginLoadError
|
||||
from dbt.contracts.connection import Credentials, AdapterRequiredConfig
|
||||
from dbt.adapters.protocol import (
|
||||
AdapterProtocol,
|
||||
AdapterConfig,
|
||||
RelationProtocol,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
|
||||
from dbt.exceptions import InternalException, RuntimeException
|
||||
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
|
||||
Adapter = AdapterProtocol
|
||||
|
||||
@@ -64,7 +58,7 @@ class AdapterContainer:
|
||||
# if we failed to import the target module in particular, inform
|
||||
# the user about it via a runtime error
|
||||
if exc.name == "dbt.adapters." + name:
|
||||
fire_event(AdapterImportError(exc=exc))
|
||||
fire_event(AdapterImportError(exc=str(exc)))
|
||||
raise RuntimeException(f"Could not find adapter type {name}!")
|
||||
# otherwise, the error had to have come from some underlying
|
||||
# library. Log the stack trace.
|
||||
@@ -217,3 +211,12 @@ def get_adapter_package_names(name: Optional[str]) -> List[str]:
|
||||
|
||||
def get_adapter_type_names(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_type_names(name)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def adapter_management():
|
||||
reset_adapters()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cleanup_connections()
|
||||
|
||||
@@ -88,7 +88,7 @@ class AdapterProtocol( # type: ignore[misc]
|
||||
],
|
||||
):
|
||||
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
|
||||
# ClassVar due to the restirctiveness of PEP-526
|
||||
# ClassVar due to the restrictiveness of PEP-526
|
||||
# See: https://github.com/python/mypy/issues/5144
|
||||
AdapterSpecificConfigs: Type[AdapterConfig_T]
|
||||
Column: Type[Column_T]
|
||||
|
||||
44
core/dbt/cli/flags.py
Normal file
44
core/dbt/cli/flags.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# TODO Move this to /core/dbt/flags.py when we're ready to break things
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from multiprocessing import get_context
|
||||
from pprint import pformat as pf
|
||||
|
||||
from click import get_current_context
|
||||
|
||||
if os.name != "nt":
|
||||
# https://bugs.python.org/issue41567
|
||||
import multiprocessing.popen_spawn_posix # type: ignore # noqa: F401
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Flags:
|
||||
def __init__(self, ctx=None) -> None:
|
||||
|
||||
if ctx is None:
|
||||
ctx = get_current_context()
|
||||
|
||||
def assign_params(ctx):
|
||||
"""Recursively adds all click params to flag object"""
|
||||
for param_name, param_value in ctx.params.items():
|
||||
# N.B. You have to use the base MRO method (object.__setattr__) to set attributes
|
||||
# when using frozen dataclasses.
|
||||
# https://docs.python.org/3/library/dataclasses.html#frozen-instances
|
||||
if hasattr(self, param_name):
|
||||
raise Exception(f"Duplicate flag names found in click command: {param_name}")
|
||||
object.__setattr__(self, param_name.upper(), param_value)
|
||||
if ctx.parent:
|
||||
assign_params(ctx.parent)
|
||||
|
||||
assign_params(ctx)
|
||||
|
||||
# Hard coded flags
|
||||
object.__setattr__(self, "WHICH", ctx.info_name)
|
||||
object.__setattr__(self, "MP_CONTEXT", get_context("spawn"))
|
||||
|
||||
# Support console DO NOT TRACK initiave
|
||||
if os.getenv("DO_NOT_TRACK", "").lower() in (1, "t", "true", "y", "yes"):
|
||||
object.__setattr__(self, "ANONYMOUS_USAGE_STATS", False)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(pf(self.__dict__))
|
||||
@@ -1,44 +1,70 @@
|
||||
import click
|
||||
from dbt.cli import params as p
|
||||
import sys
|
||||
import inspect # This is temporary for RAT-ing
|
||||
from copy import copy
|
||||
from pprint import pformat as pf # This is temporary for RAT-ing
|
||||
|
||||
# This is temporary for RAT-ing
|
||||
import inspect
|
||||
from pprint import pformat as pf
|
||||
import click
|
||||
from dbt.adapters.factory import adapter_management
|
||||
from dbt.cli import params as p
|
||||
from dbt.cli.flags import Flags
|
||||
from dbt.profiler import profiler
|
||||
|
||||
|
||||
def cli_runner():
|
||||
# Alias "list" to "ls"
|
||||
ls = copy(cli.commands["list"])
|
||||
ls.hidden = True
|
||||
cli.add_command(ls, "ls")
|
||||
|
||||
# Run the cli
|
||||
cli()
|
||||
|
||||
|
||||
# dbt
|
||||
@click.group(
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
invoke_without_command=True,
|
||||
no_args_is_help=True,
|
||||
epilog="Specify one of these sub-commands and you can find more help from there.",
|
||||
)
|
||||
@click.pass_context
|
||||
@p.version
|
||||
@p.anonymous_usage_stats
|
||||
@p.cache_selected_only
|
||||
@p.debug
|
||||
@p.enable_legacy_logger
|
||||
@p.event_buffer_size
|
||||
@p.fail_fast
|
||||
@p.log_cache_events
|
||||
@p.log_format
|
||||
@p.macro_debugging
|
||||
@p.partial_parse
|
||||
@p.print
|
||||
@p.printer_width
|
||||
@p.quiet
|
||||
@p.send_anonymous_usage_stats
|
||||
@p.record_timing_info
|
||||
@p.static_parser
|
||||
@p.use_colors
|
||||
@p.use_experimental_parser
|
||||
@p.version
|
||||
@p.version_check
|
||||
@p.warn_error
|
||||
@p.write_json
|
||||
@p.event_buffer_size
|
||||
@p.record_timing
|
||||
def cli(ctx, **kwargs):
|
||||
"""An ELT tool for managing your SQL transformations and data models.
|
||||
For more documentation on these commands, visit: docs.getdbt.com
|
||||
"""
|
||||
if kwargs.get("version", False):
|
||||
incomplete_flags = Flags()
|
||||
|
||||
# Profiling
|
||||
if incomplete_flags.RECORD_TIMING_INFO:
|
||||
ctx.with_resource(profiler(enable=True, outfile=incomplete_flags.RECORD_TIMING_INFO))
|
||||
|
||||
# Adapter management
|
||||
ctx.with_resource(adapter_management())
|
||||
|
||||
# Version info
|
||||
if incomplete_flags.VERSION:
|
||||
click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}")
|
||||
sys.exit()
|
||||
return
|
||||
else:
|
||||
del ctx.params["version"]
|
||||
|
||||
@@ -46,26 +72,43 @@ def cli(ctx, **kwargs):
|
||||
# dbt build
|
||||
@cli.command("build")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.fail_fast
|
||||
@p.full_refresh
|
||||
@p.indirect_selection
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.show
|
||||
@p.state
|
||||
@p.store_failures
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def build(ctx, **kwargs):
|
||||
"""Run all Seeds, Models, Snapshots, and tests in DAG order"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt clean
|
||||
@cli.command("clean")
|
||||
@click.pass_context
|
||||
@p.project_dir
|
||||
@p.profiles_dir
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
def clean(ctx, **kwargs):
|
||||
"""Delete all folders in the clean-targets list (usually the dbt_packages and target directories.)"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt docs
|
||||
@@ -78,86 +121,82 @@ def docs(ctx, **kwargs):
|
||||
# dbt docs generate
|
||||
@docs.command("generate")
|
||||
@click.pass_context
|
||||
@p.version_check
|
||||
@p.project_dir
|
||||
@p.profiles_dir
|
||||
@p.profile
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.compile_docs
|
||||
@p.defer
|
||||
@p.threads
|
||||
@p.target_path
|
||||
@p.exclude
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def docs_generate(ctx, **kwargs):
|
||||
"""Generate the documentation website for your project"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt docs serve
|
||||
@docs.command("serve")
|
||||
@click.pass_context
|
||||
@p.project_dir
|
||||
@p.profiles_dir
|
||||
@p.browser
|
||||
@p.port
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.port
|
||||
@p.browser
|
||||
def docs_serve(ctx, **kwargs):
|
||||
"""Serve the documentation website for your project"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt compile
|
||||
@cli.command("compile")
|
||||
@click.pass_context
|
||||
@p.version_check
|
||||
@p.project_dir
|
||||
@p.profiles_dir
|
||||
@p.profile
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.parse_only
|
||||
@p.threads
|
||||
@p.target_path
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.parse_only
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.defer
|
||||
@p.full_refresh
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def compile(ctx, **kwargs):
|
||||
"""Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory."""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt debug
|
||||
@cli.command("debug")
|
||||
@click.pass_context
|
||||
@p.version_check
|
||||
@p.project_dir
|
||||
@p.profiles_dir
|
||||
@p.config_dir
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.config_dir
|
||||
@p.version_check
|
||||
def debug(ctx, **kwargs):
|
||||
"""Show some helpful information about dbt for debugging. Not to be confused with the --debug option which increases verbosity."""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt deps
|
||||
@@ -170,9 +209,8 @@ def debug(ctx, **kwargs):
|
||||
@p.vars
|
||||
def deps(ctx, **kwargs):
|
||||
"""Pull the most recent version of the dependencies listed in packages.yml"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt init
|
||||
@@ -181,147 +219,139 @@ def deps(ctx, **kwargs):
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.skip_profile_setup
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.skip_profile_setup
|
||||
def init(ctx, **kwargs):
|
||||
"""Initialize a new DBT project."""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt list
|
||||
# dbt TODO: Figure out aliasing for ls (or just c/p?)
|
||||
@cli.command("list")
|
||||
@click.pass_context
|
||||
@p.exclude
|
||||
@p.indirect_selection
|
||||
@p.models
|
||||
@p.output
|
||||
@p.output_keys
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.output
|
||||
@p.ouptut_keys
|
||||
@p.resource_type
|
||||
@p.models
|
||||
@p.indirect_selection
|
||||
@p.exclude
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.vars
|
||||
def list(ctx, **kwargs):
|
||||
"""List the resources in your project"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt parse
|
||||
@cli.command("parse")
|
||||
@click.pass_context
|
||||
@p.compile_parse
|
||||
@p.log_path
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.write_manifest
|
||||
@p.compile_parse
|
||||
@p.threads
|
||||
@p.target_path
|
||||
@p.log_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
@p.write_manifest
|
||||
def parse(ctx, **kwargs):
|
||||
"""Parses the project and provides information on performance"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt run
|
||||
@cli.command("run")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.fail_fast
|
||||
@p.version_check
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.log_path
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.defer
|
||||
@p.full_refresh
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def run(ctx, **kwargs):
|
||||
"""Compile SQL and execute against the current target database."""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt run operation
|
||||
@cli.command("run-operation")
|
||||
@click.pass_context
|
||||
@p.args
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.args
|
||||
def run_operation(ctx, **kwargs):
|
||||
"""Run the named macro with any supplied arguments."""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt seed
|
||||
@cli.command("seed")
|
||||
@click.pass_context
|
||||
@p.version_check
|
||||
@p.exclude
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.show
|
||||
@p.state
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.show
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def seed(ctx, **kwargs):
|
||||
"""Load data from csv files into your data warehouse."""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt snapshot
|
||||
@cli.command("snapshot")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.threads
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.defer
|
||||
@p.target
|
||||
@p.threads
|
||||
@p.vars
|
||||
def snapshot(ctx, **kwargs):
|
||||
"""Execute snapshots defined in your project"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt source
|
||||
@@ -334,51 +364,49 @@ def source(ctx, **kwargs):
|
||||
# dbt source freshness
|
||||
@source.command("freshness")
|
||||
@click.pass_context
|
||||
@p.exclude
|
||||
@p.models
|
||||
@p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate?
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.threads
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate?
|
||||
@p.target
|
||||
@p.threads
|
||||
@p.vars
|
||||
def freshness(ctx, **kwargs):
|
||||
"""Snapshots the current freshness of the project's sources"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt test
|
||||
@cli.command("test")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.fail_fast
|
||||
@p.version_check
|
||||
@p.store_failures
|
||||
@p.indirect_selection
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.indirect_selection
|
||||
@p.log_path
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.models
|
||||
@p.exclude
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.defer
|
||||
@p.store_failures
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def test(ctx, **kwargs):
|
||||
"""Runs tests on data in deployed models. Run this after `dbt run`"""
|
||||
click.echo(
|
||||
f"`{inspect.stack()[0][3]}` called\n kwargs: {kwargs}\n ctx: {pf(ctx.parent.params)}"
|
||||
)
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# Support running as a module
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
cli_runner()
|
||||
|
||||
33
core/dbt/cli/option_types.py
Normal file
33
core/dbt/cli/option_types.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from click import ParamType
|
||||
import yaml
|
||||
|
||||
|
||||
class YAML(ParamType):
|
||||
"""The Click YAML type. Converts YAML strings into objects."""
|
||||
|
||||
name = "YAML"
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
# assume non-string values are a problem
|
||||
if not isinstance(value, str):
|
||||
self.fail(f"Cannot load YAML from type {type(value)}", param, ctx)
|
||||
try:
|
||||
return yaml.load(value, Loader=yaml.Loader)
|
||||
except yaml.parser.ParserError:
|
||||
self.fail(f"String '{value}' is not valid YAML", param, ctx)
|
||||
|
||||
|
||||
class Truthy(ParamType):
|
||||
"""The Click Truthy type. Converts strings into a "truthy" type"""
|
||||
|
||||
name = "TRUTHY"
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
# assume non-string / non-None values are a problem
|
||||
if not isinstance(value, (str, None)):
|
||||
self.fail(f"Cannot load TRUTHY from type {type(value)}", param, ctx)
|
||||
|
||||
if value is None or value.lower() in ("0", "false", "f"):
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
@@ -1,56 +1,58 @@
|
||||
import click
|
||||
import yaml
|
||||
from pathlib import Path, PurePath
|
||||
from click import ParamType
|
||||
|
||||
import click
|
||||
from dbt.cli.option_types import YAML
|
||||
from dbt.cli.resolvers import default_project_dir, default_profiles_dir
|
||||
|
||||
|
||||
class YAML(ParamType):
|
||||
"""The Click YAML type. Converts YAML strings into objects."""
|
||||
|
||||
name = "YAML"
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
# assume non-string values are a problem
|
||||
if not isinstance(value, str):
|
||||
self.fail(f"Cannot load YAML from type {type(value)}", param, ctx)
|
||||
try:
|
||||
return yaml.load(value, Loader=yaml.Loader)
|
||||
except yaml.parser.ParserError:
|
||||
self.fail(f"String '{value}' is not valid YAML", param, ctx)
|
||||
|
||||
# TODO: The name (reflected in flags) is a correction!
|
||||
# The original name was `SEND_ANONYMOUS_USAGE_STATS` and used an env var called "DBT_SEND_ANONYMOUS_USAGE_STATS"
|
||||
# Both of which break existing naming conventions (doesn't match param flag).
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
anonymous_usage_stats = click.option(
|
||||
"--anonymous-usage-stats/--no-anonymous-usage-stats",
|
||||
envvar="DBT_ANONYMOUS_USAGE_STATS",
|
||||
help="Send anonymous usage stats to dbt Labs.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
args = click.option(
|
||||
"--args",
|
||||
envvar=None,
|
||||
help="Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. '{my_variable: my_value}'",
|
||||
type=YAML(),
|
||||
)
|
||||
|
||||
browser = click.option(
|
||||
"--browser/--no-browser",
|
||||
envvar=None,
|
||||
help="Wether or not to open a local web browser after starting the server",
|
||||
default=True,
|
||||
)
|
||||
|
||||
cache_selected_only = click.option(
|
||||
"--cache-selected-only/--no-cache-selected-only",
|
||||
envvar="DBT_CACHE_SELECTED_ONLY",
|
||||
help="Pre cache database objects relevant to selected resource only.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
compile_docs = click.option(
|
||||
"--compile/--no-compile",
|
||||
envvar=None,
|
||||
help="Wether or not to run 'dbt compile' as part of docs generation",
|
||||
default=True,
|
||||
)
|
||||
|
||||
compile_parse = click.option(
|
||||
"--compile/--no-compile",
|
||||
envvar=None,
|
||||
help="TODO: No help text currently available",
|
||||
default=True,
|
||||
)
|
||||
|
||||
config_dir = click.option(
|
||||
"--config-dir",
|
||||
envvar=None,
|
||||
help="If specified, DBT will show path information for this project",
|
||||
type=click.STRING,
|
||||
)
|
||||
@@ -58,44 +60,68 @@ config_dir = click.option(
|
||||
debug = click.option(
|
||||
"--debug/--no-debug",
|
||||
"-d/ ",
|
||||
envvar="DBT_DEBUG",
|
||||
help="Display debug logging during dbt execution. Useful for debugging and making bug reports.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
# TODO: The env var and name (reflected in flags) are corrections!
|
||||
# The original name was `DEFER_MODE` and used an env var called "DBT_DEFER_TO_STATE"
|
||||
# Both of which break existing naming conventions.
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
defer = click.option(
|
||||
"--defer/--no-defer",
|
||||
envvar="DBT_DEFER",
|
||||
help="If set, defer to the state variable for resolving unselected nodes.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
enable_legacy_logger = click.option(
|
||||
"--enable-legacy-logger/--no-enable-legacy-logger",
|
||||
envvar="DBT_ENABLE_LEGACY_LOGGER",
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
event_buffer_size = click.option(
|
||||
"--event-buffer-size",
|
||||
envvar="DBT_EVENT_BUFFER_SIZE",
|
||||
help="Sets the max number of events to buffer in EVENT_HISTORY.",
|
||||
default=100000,
|
||||
type=click.INT,
|
||||
)
|
||||
|
||||
exclude = click.option("--exclude", help="Specify the nodes to exclude.")
|
||||
exclude = click.option("--exclude", envvar=None, help="Specify the nodes to exclude.")
|
||||
|
||||
fail_fast = click.option(
|
||||
"--fail-fast/--no-fail-fast", "-x/ ", help="Stop execution on first failure.", default=False
|
||||
"--fail-fast/--no-fail-fast",
|
||||
"-x/ ",
|
||||
envvar="DBT_FAIL_FAST",
|
||||
help="Stop execution on first failure.",
|
||||
)
|
||||
|
||||
full_refresh = click.option(
|
||||
"--full-refresh",
|
||||
"-f",
|
||||
envvar="DBT_FULL_REFRESH",
|
||||
help="If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
indirect_selection = click.option(
|
||||
"--indirect_selection",
|
||||
"--indirect-selection",
|
||||
envvar="DBT_INDIRECT_SELECTION",
|
||||
help="Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.",
|
||||
type=click.Choice(["eager", "cautious"], case_sensitive=False),
|
||||
default="eager",
|
||||
)
|
||||
|
||||
log_cache_events = click.option(
|
||||
"--log-cache-events/--no-log-cache-events",
|
||||
help="Enable verbose adapter cache logging.",
|
||||
envvar="DBT_LOG_CACHE_EVENTS",
|
||||
)
|
||||
|
||||
log_format = click.option(
|
||||
"--log-format",
|
||||
envvar="DBT_LOG_FORMAT",
|
||||
help="Specify the log format, overriding the command's default.",
|
||||
type=click.Choice(["text", "json", "default"], case_sensitive=False),
|
||||
default="default",
|
||||
@@ -103,28 +129,42 @@ log_format = click.option(
|
||||
|
||||
log_path = click.option(
|
||||
"--log-path",
|
||||
envvar="DBT_LOG_PATH",
|
||||
help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.",
|
||||
type=click.Path(),
|
||||
)
|
||||
|
||||
models = click.option("-m", "-s", help="Specify the nodes to include.", multiple=True)
|
||||
macro_debugging = click.option(
|
||||
"--macro-debugging/--no-macro-debugging",
|
||||
envvar="DBT_MACRO_DEBUGGING",
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
models = click.option(
|
||||
"-m",
|
||||
"-s",
|
||||
"models",
|
||||
envvar=None,
|
||||
help="Specify the nodes to include.",
|
||||
multiple=True,
|
||||
)
|
||||
|
||||
output = click.option(
|
||||
"--output",
|
||||
envvar=None,
|
||||
help="TODO: No current help text",
|
||||
type=click.Choice(["json", "name", "path", "selector"], case_sensitive=False),
|
||||
default="name",
|
||||
)
|
||||
|
||||
ouptut_keys = click.option(
|
||||
"--output-keys",
|
||||
help="TODO: No current help text",
|
||||
default=False,
|
||||
output_keys = click.option(
|
||||
"--output-keys", envvar=None, help="TODO: No current help text", type=click.STRING
|
||||
)
|
||||
|
||||
output_path = click.option(
|
||||
"--output",
|
||||
"-o",
|
||||
envvar=None,
|
||||
help="Specify the output path for the json report. By default, outputs to 'target/sources.json'",
|
||||
type=click.Path(file_okay=True, dir_okay=False, writable=True),
|
||||
default=PurePath.joinpath(Path.cwd(), "target/sources.json"),
|
||||
@@ -132,65 +172,84 @@ output_path = click.option(
|
||||
|
||||
parse_only = click.option(
|
||||
"--parse-only",
|
||||
envvar=None,
|
||||
help="TODO: No help text currently available",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
partial_parse = click.option(
|
||||
"--partial-parse/--no-partial-parse",
|
||||
envvar="DBT_PARTIAL_PARSE",
|
||||
help="Allow for partial parsing by looking for and writing to a pickle file in the target directory. This overrides the user configuration file.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
port = click.option(
|
||||
"--port", help="Specify the port number for the docs server", default=8080, type=click.INT
|
||||
"--port",
|
||||
envvar=None,
|
||||
help="Specify the port number for the docs server",
|
||||
default=8080,
|
||||
type=click.INT,
|
||||
)
|
||||
|
||||
# TODO: The env var and name (reflected in flags) are corrections!
|
||||
# The original name was `NO_PRINT` and used the env var `DBT_NO_PRINT`.
|
||||
# Both of which break existing naming conventions.
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
print = click.option(
|
||||
"--print/--no-print", help="Output all {{ print() }} macro calls.", default=True
|
||||
"--print/--no-print",
|
||||
envvar="DBT_PRINT",
|
||||
help="Output all {{ print() }} macro calls.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
printer_width = click.option(
|
||||
"--printer_width", help="Sets the width of terminal output", type=click.INT, default=80
|
||||
"--printer-width",
|
||||
envvar="DBT_PRINTER_WIDTH",
|
||||
help="Sets the width of terminal output",
|
||||
type=click.INT,
|
||||
default=80,
|
||||
)
|
||||
|
||||
profile = click.option(
|
||||
"--profile",
|
||||
envvar=None,
|
||||
help="Which profile to load. Overrides setting in dbt_project.yml.",
|
||||
)
|
||||
|
||||
profiles_dir = click.option(
|
||||
"--profiles-dir",
|
||||
help=f"Which directory to look in for the profiles.yml file. Default = {PurePath.joinpath(Path.home(), '.dbt')}",
|
||||
default=PurePath.joinpath(Path.home(), ".dbt"),
|
||||
type=click.Path(
|
||||
exists=True,
|
||||
),
|
||||
envvar="DBT_PROFILES_DIR",
|
||||
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
|
||||
default=default_profiles_dir(),
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
project_dir = click.option(
|
||||
"--project-dir",
|
||||
envvar=None,
|
||||
help="Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.",
|
||||
default=Path.cwd(),
|
||||
default=default_project_dir(),
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
quiet = click.option(
|
||||
"--quiet/--no-quiet",
|
||||
envvar="DBT_QUIET",
|
||||
help="Suppress all non-error logging to stdout. Does not affect {{ print() }} macro calls.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
record_timing = click.option(
|
||||
"-r",
|
||||
record_timing_info = click.option(
|
||||
"--record-timing-info",
|
||||
"-r",
|
||||
envvar=None,
|
||||
help="When this option is passed, dbt will output low-level timing stats to the specified file. Example: `--record-timing-info output.profile`",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
type=click.Path(exists=False),
|
||||
)
|
||||
|
||||
resource_type = click.option(
|
||||
"--resource-type",
|
||||
envvar=None,
|
||||
help="TODO: No current help text",
|
||||
type=click.Choice(
|
||||
[
|
||||
@@ -210,50 +269,63 @@ resource_type = click.option(
|
||||
default="default",
|
||||
)
|
||||
|
||||
selector = click.option("--selector", help="The selector name to use, as defined in selectors.yml")
|
||||
|
||||
send_anonymous_usage_stats = click.option(
|
||||
"--anonymous-usage-stats/--no-anonymous-usage-stats",
|
||||
help="Send anonymous usage stats to dbt Labs.",
|
||||
default=True,
|
||||
selector = click.option(
|
||||
"--selector", envvar=None, help="The selector name to use, as defined in selectors.yml"
|
||||
)
|
||||
|
||||
show = click.option(
|
||||
"--show",
|
||||
help="Show a sample of the loaded data in the terminal",
|
||||
default=False,
|
||||
"--show", envvar=None, help="Show a sample of the loaded data in the terminal", is_flag=True
|
||||
)
|
||||
|
||||
skip_profile_setup = click.option(
|
||||
"--skip-profile-setup",
|
||||
"-s",
|
||||
help="Skip interative profile setup.",
|
||||
default=False,
|
||||
"--skip-profile-setup", "-s", envvar=None, help="Skip interative profile setup.", is_flag=True
|
||||
)
|
||||
|
||||
# TODO: The env var and name (reflected in flags) are corrections!
|
||||
# The original name was `ARTIFACT_STATE_PATH` and used the env var `DBT_ARTIFACT_STATE_PATH`.
|
||||
# Both of which break existing naming conventions.
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
state = click.option(
|
||||
"--state",
|
||||
envvar="DBT_STATE",
|
||||
help="If set, use the given directory as the source for json files to compare with this project.",
|
||||
type=click.Path(
|
||||
dir_okay=True,
|
||||
exists=True,
|
||||
file_okay=False,
|
||||
readable=True,
|
||||
resolve_path=True,
|
||||
),
|
||||
)
|
||||
|
||||
static_parser = click.option(
|
||||
"--static-parser/--no-static-parser", help="Use the static parser.", default=True
|
||||
"--static-parser/--no-static-parser",
|
||||
envvar="DBT_STATIC_PARSER",
|
||||
help="Use the static parser.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
store_failures = click.option(
|
||||
"--store-failures", help="Store test results (failing rows) in the database", default=False
|
||||
"--store-failures",
|
||||
envvar="DBT_STORE_FAILURES",
|
||||
help="Store test results (failing rows) in the database",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
target = click.option("-t", "--target", help="Which target to load for the given profile")
|
||||
target = click.option(
|
||||
"--target", "-t", envvar=None, help="Which target to load for the given profile"
|
||||
)
|
||||
|
||||
target_path = click.option(
|
||||
"--target-path",
|
||||
envvar="DBT_TARGET_PATH",
|
||||
help="Configure the 'target-path'. Only applies this setting for the current run. Overrides the 'DBT_TARGET_PATH' if it is set.",
|
||||
type=click.Path(),
|
||||
)
|
||||
|
||||
threads = click.option(
|
||||
"--threads",
|
||||
envvar=None,
|
||||
help="Specify number of threads to use while executing models. Overrides settings in profiles.yml.",
|
||||
default=1,
|
||||
type=click.INT,
|
||||
@@ -261,44 +333,54 @@ threads = click.option(
|
||||
|
||||
use_colors = click.option(
|
||||
"--use-colors/--no-use-colors",
|
||||
envvar="DBT_USE_COLORS",
|
||||
help="Output is colorized by default and may also be set in a profile or at the command line.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
use_experimental_parser = click.option(
|
||||
"--use-experimental-parser/--no-use-experimental-parser",
|
||||
envvar="DBT_USE_EXPERIMENTAL_PARSER",
|
||||
help="Enable experimental parsing features.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
vars = click.option(
|
||||
"--vars",
|
||||
envvar=None,
|
||||
help="Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. '{my_variable: my_value}'",
|
||||
type=YAML(),
|
||||
)
|
||||
|
||||
version = click.option("--version", help="Show version information", is_flag=True, default=False)
|
||||
version = click.option(
|
||||
"--version",
|
||||
envvar=None,
|
||||
help="Show version information",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
version_check = click.option(
|
||||
"--version-check/--no-version-check",
|
||||
envvar="DBT_VERSION_CHECK",
|
||||
help="Ensure dbt's version matches the one specified in the dbt_project.yml file ('require-dbt-version')",
|
||||
default=True,
|
||||
)
|
||||
|
||||
warn_error = click.option(
|
||||
"--warn-error/--no-warn-error",
|
||||
envvar="DBT_WARN_ERROR",
|
||||
help="If dbt would normally warn, instead raise an exception. Examples include --models that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
write_json = click.option(
|
||||
"--write-json/--no-write-json",
|
||||
envvar="DBT_WRITE_JSON",
|
||||
help="Writing the manifest and run_results.json files to disk",
|
||||
default=True,
|
||||
)
|
||||
|
||||
write_manifest = click.option(
|
||||
"--write-manifest/--no-write-manifest",
|
||||
envvar=None,
|
||||
help="TODO: No help text currently available",
|
||||
default=True,
|
||||
)
|
||||
|
||||
11
core/dbt/cli/resolvers.py
Normal file
11
core/dbt/cli/resolvers.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def default_project_dir():
|
||||
paths = list(Path.cwd().parents)
|
||||
paths.insert(0, Path.cwd())
|
||||
return next((x for x in paths if (x / "dbt_project.yml").exists()), Path.cwd())
|
||||
|
||||
|
||||
def default_profiles_dir():
|
||||
return Path.cwd() if (Path.cwd() / "profiles.yml").exists() else Path.home() / ".dbt"
|
||||
@@ -27,6 +27,7 @@ from dbt.utils import (
|
||||
from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag
|
||||
from dbt.contracts.graph.compiled import CompiledGenericTestNode
|
||||
from dbt.contracts.graph.parsed import ParsedGenericTestNode
|
||||
|
||||
from dbt.exceptions import (
|
||||
InternalException,
|
||||
raise_compiler_error,
|
||||
@@ -305,13 +306,13 @@ class MacroGenerator(BaseMacroGenerator):
|
||||
@contextmanager
|
||||
def track_call(self):
|
||||
# This is only called from __call__
|
||||
if self.stack is None or self.node is None:
|
||||
if self.stack is None:
|
||||
yield
|
||||
else:
|
||||
unique_id = self.macro.unique_id
|
||||
depth = self.stack.depth
|
||||
# only mark depth=0 as a dependency
|
||||
if depth == 0:
|
||||
# only mark depth=0 as a dependency, when creating this dependency we don't pass in stack
|
||||
if depth == 0 and self.node:
|
||||
self.node.depends_on.add_macro(unique_id)
|
||||
self.stack.push(unique_id)
|
||||
try:
|
||||
|
||||
@@ -12,6 +12,7 @@ import tarfile
|
||||
import requests
|
||||
import stat
|
||||
from typing import Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union
|
||||
from pathspec import PathSpec # type: ignore
|
||||
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
@@ -36,6 +37,7 @@ def find_matching(
|
||||
root_path: str,
|
||||
relative_paths_to_search: List[str],
|
||||
file_pattern: str,
|
||||
ignore_spec: Optional[PathSpec] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Given an absolute `root_path`, a list of relative paths to that
|
||||
@@ -57,19 +59,30 @@ def find_matching(
|
||||
reobj = re.compile(regex, re.IGNORECASE)
|
||||
|
||||
for relative_path_to_search in relative_paths_to_search:
|
||||
# potential speedup for ignore_spec
|
||||
# if ignore_spec.matches(relative_path_to_search):
|
||||
# continue
|
||||
absolute_path_to_search = os.path.join(root_path, relative_path_to_search)
|
||||
walk_results = os.walk(absolute_path_to_search)
|
||||
|
||||
for current_path, subdirectories, local_files in walk_results:
|
||||
# potential speedup for ignore_spec
|
||||
# relative_dir = os.path.relpath(current_path, root_path) + os.sep
|
||||
# if ignore_spec.match(relative_dir):
|
||||
# continue
|
||||
for local_file in local_files:
|
||||
absolute_path = os.path.join(current_path, local_file)
|
||||
relative_path = os.path.relpath(absolute_path, absolute_path_to_search)
|
||||
relative_path_to_root = os.path.join(relative_path_to_search, relative_path)
|
||||
|
||||
modification_time = 0.0
|
||||
try:
|
||||
modification_time = os.path.getmtime(absolute_path)
|
||||
except OSError:
|
||||
fire_event(SystemErrorRetrievingModTime(path=absolute_path))
|
||||
if reobj.match(local_file):
|
||||
if reobj.match(local_file) and (
|
||||
not ignore_spec or not ignore_spec.match_file(relative_path_to_root)
|
||||
):
|
||||
matching.append(
|
||||
{
|
||||
"searched_path": relative_path_to_search,
|
||||
@@ -164,7 +177,7 @@ def write_file(path: str, contents: str = "") -> bool:
|
||||
reason = "Path was possibly too long"
|
||||
# all our hard work and the path was still too long. Log and
|
||||
# continue.
|
||||
fire_event(SystemCouldNotWrite(path=path, reason=reason, exc=exc))
|
||||
fire_event(SystemCouldNotWrite(path=path, reason=reason, exc=str(exc)))
|
||||
else:
|
||||
raise
|
||||
return True
|
||||
|
||||
@@ -183,6 +183,7 @@ class Compiler:
|
||||
|
||||
context = generate_runtime_model_context(node, self.config, manifest)
|
||||
context.update(extra_context)
|
||||
|
||||
if isinstance(node, CompiledGenericTestNode):
|
||||
# for test nodes, add a special keyword args value to the context
|
||||
jinja.add_rendered_test_kwargs(context, node)
|
||||
@@ -369,11 +370,6 @@ class Compiler:
|
||||
compiled_node = _compiled_type_for(node).from_dict(data)
|
||||
|
||||
if compiled_node.language == ModelLanguage.python:
|
||||
# TODO could we also 'minify' this code at all? just aesthetic, not functional
|
||||
|
||||
# quoating seems like something very specific to sql so far
|
||||
# for all python implementations we are seeing there's no quating.
|
||||
# TODO try to find better way to do this, given that
|
||||
original_quoting = self.config.quoting
|
||||
self.config.quoting = {key: False for key in original_quoting.keys()}
|
||||
context = self._create_node_context(compiled_node, manifest, extra_context)
|
||||
@@ -384,7 +380,19 @@ class Compiler:
|
||||
node,
|
||||
)
|
||||
# we should NOT jinja render the python model's 'raw code'
|
||||
compiled_node.compiled_code = f"{node.raw_code}\n\n{postfix}"
|
||||
|
||||
# if the user didn't specify an explicit `model(dbt, session)` function,
|
||||
# we're going to treat the user code as a "script" and wrap it in that function now.
|
||||
# TODO: this is the jankiest way of doing it, with zero AST magic
|
||||
if node.meta.get("missing_model_function") is True:
|
||||
raw_code_lines = node.raw_code.strip().split("\n")
|
||||
raw_code_lines[-1] = f"return {raw_code_lines[-1]}"
|
||||
raw_code_indented = "\n ".join(raw_code_lines)
|
||||
model_code = f"def model(dbt, session):\n {raw_code_indented}"
|
||||
else:
|
||||
model_code = node.raw_code
|
||||
|
||||
compiled_node.compiled_code = f"{model_code}\n\n{postfix}"
|
||||
# restore quoting settings in the end since context is lazy evaluated
|
||||
self.config.quoting = original_quoting
|
||||
|
||||
|
||||
@@ -23,8 +23,6 @@ from .renderer import ProfileRenderer
|
||||
|
||||
DEFAULT_THREADS = 1
|
||||
|
||||
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
|
||||
|
||||
INVALID_PROFILE_MESSAGE = """
|
||||
dbt encountered an error while trying to read your profiles.yml file.
|
||||
|
||||
@@ -44,7 +42,7 @@ defined in your profiles.yml file. You can find profiles.yml here:
|
||||
|
||||
{profiles_file}/profiles.yml
|
||||
""".format(
|
||||
profiles_file=DEFAULT_PROFILES_DIR
|
||||
profiles_file=flags.DEFAULT_PROFILES_DIR
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -109,9 +109,15 @@ class MacroResolver:
|
||||
|
||||
def get_macro(self, local_package, macro_name):
|
||||
local_package_macros = {}
|
||||
# If the macro is explicitly prefixed with an internal namespace
|
||||
# (e.g. 'dbt.some_macro'), look there first
|
||||
if local_package in self.internal_package_names:
|
||||
local_package_macros = self.internal_packages[local_package]
|
||||
# If the macro is explicitly prefixed with a different package name
|
||||
# (e.g. 'dbt_utils.some_macro'), look there first
|
||||
if local_package not in self.internal_package_names and local_package in self.packages:
|
||||
local_package_macros = self.packages[local_package]
|
||||
# First: search the local packages for this macro
|
||||
# First: search the specified package for this macro
|
||||
if macro_name in local_package_macros:
|
||||
return local_package_macros[macro_name]
|
||||
# Now look up in the standard search order
|
||||
|
||||
@@ -4,6 +4,7 @@ from dbt.clients.jinja import MacroStack
|
||||
from dbt.contracts.connection import AdapterRequiredConfig
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.context.macro_resolver import TestMacroNamespace
|
||||
from .base import contextproperty
|
||||
|
||||
|
||||
from .configured import ConfiguredContext
|
||||
@@ -66,6 +67,10 @@ class ManifestContext(ConfiguredContext):
|
||||
dct.update(self.namespace)
|
||||
return dct
|
||||
|
||||
@contextproperty
|
||||
def context_macro_stack(self):
|
||||
return self.macro_stack
|
||||
|
||||
|
||||
class QueryHeaderContext(ManifestContext):
|
||||
def __init__(self, config: AdapterRequiredConfig, manifest: Manifest) -> None:
|
||||
|
||||
@@ -41,6 +41,7 @@ from dbt.contracts.graph.parsed import (
|
||||
ParsedSourceDefinition,
|
||||
)
|
||||
from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference
|
||||
from dbt.contracts.util import get_metadata_env
|
||||
from dbt.exceptions import (
|
||||
CompilationException,
|
||||
ParsingException,
|
||||
@@ -710,6 +711,10 @@ class ProviderContext(ManifestContext):
|
||||
self.model,
|
||||
)
|
||||
|
||||
@contextproperty
|
||||
def dbt_metadata_envs(self) -> Dict[str, str]:
|
||||
return get_metadata_env()
|
||||
|
||||
@contextproperty
|
||||
def invocation_args_dict(self):
|
||||
return args_to_dict(self.config.args)
|
||||
@@ -1244,6 +1249,19 @@ class ProviderContext(ManifestContext):
|
||||
"""
|
||||
return selected_resources.SELECTED_RESOURCES
|
||||
|
||||
@contextmember
|
||||
def submit_python_job(self, parsed_model: Dict, compiled_code: str) -> AdapterResponse:
|
||||
# Check macro_stack and that the unique id is for a materialization macro
|
||||
if not (
|
||||
self.context_macro_stack.depth == 2
|
||||
and self.context_macro_stack.call_stack[1] == "macro.dbt.statement"
|
||||
and "materialization" in self.context_macro_stack.call_stack[0]
|
||||
):
|
||||
raise RuntimeException(
|
||||
f"submit_python_job is not intended to be called here, at model {parsed_model['alias']}, with macro call_stack {self.context_macro_stack.call_stack}."
|
||||
)
|
||||
return self.adapter.submit_python_job(parsed_model, compiled_code)
|
||||
|
||||
|
||||
class MacroContext(ProviderContext):
|
||||
"""Internally, macros can be executed like nodes, with some restrictions:
|
||||
|
||||
@@ -114,25 +114,34 @@ class FileHash(dbtClassMixin):
|
||||
|
||||
@dataclass
|
||||
class RemoteFile(dbtClassMixin):
|
||||
def __init__(self, language) -> None:
|
||||
if language == "sql":
|
||||
self.path_end = ".sql"
|
||||
elif language == "python":
|
||||
self.path_end = ".py"
|
||||
else:
|
||||
raise RuntimeError(f"Invalid language for remote File {language}")
|
||||
self.path = f"from remote system{self.path_end}"
|
||||
|
||||
@property
|
||||
def searched_path(self) -> str:
|
||||
return "from remote system"
|
||||
return self.path
|
||||
|
||||
@property
|
||||
def relative_path(self) -> str:
|
||||
return "from remote system"
|
||||
return self.path
|
||||
|
||||
@property
|
||||
def absolute_path(self) -> str:
|
||||
return "from remote system"
|
||||
return self.path
|
||||
|
||||
@property
|
||||
def original_file_path(self):
|
||||
return "from remote system"
|
||||
return self.path
|
||||
|
||||
@property
|
||||
def modification_time(self):
|
||||
return "from remote system"
|
||||
return self.path
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -202,9 +211,9 @@ class SourceFile(BaseSourceFile):
|
||||
# TODO: do this a different way. This remote file kludge isn't going
|
||||
# to work long term
|
||||
@classmethod
|
||||
def remote(cls, contents: str, project_name: str) -> "SourceFile":
|
||||
def remote(cls, contents: str, project_name: str, language: str) -> "SourceFile":
|
||||
self = cls(
|
||||
path=RemoteFile(),
|
||||
path=RemoteFile(language),
|
||||
checksum=FileHash.from_contents(contents),
|
||||
project_name=project_name,
|
||||
contents=contents,
|
||||
@@ -268,11 +277,13 @@ class SchemaSourceFile(BaseSourceFile):
|
||||
self.tests[key][name] = []
|
||||
self.tests[key][name].append(node_unique_id)
|
||||
|
||||
# this is only used in unit tests
|
||||
def remove_tests(self, yaml_key, name):
|
||||
if yaml_key in self.tests:
|
||||
if name in self.tests[yaml_key]:
|
||||
del self.tests[yaml_key][name]
|
||||
|
||||
# this is only used in tests (unit + functional)
|
||||
def get_tests(self, yaml_key, name):
|
||||
if yaml_key in self.tests:
|
||||
if name in self.tests[yaml_key]:
|
||||
|
||||
@@ -33,6 +33,7 @@ from dbt.contracts.graph.parsed import (
|
||||
ParsedMacro,
|
||||
ParsedDocumentation,
|
||||
ParsedSourceDefinition,
|
||||
ParsedGenericTestNode,
|
||||
ParsedExposure,
|
||||
ParsedMetric,
|
||||
HasUniqueID,
|
||||
@@ -1112,8 +1113,13 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None):
|
||||
self.add_disabled_nofile(node)
|
||||
if isinstance(source_file, SchemaSourceFile):
|
||||
assert test_from
|
||||
source_file.add_test(node.unique_id, test_from)
|
||||
if isinstance(node, ParsedGenericTestNode):
|
||||
assert test_from
|
||||
source_file.add_test(node.unique_id, test_from)
|
||||
if isinstance(node, ParsedMetric):
|
||||
source_file.metrics.append(node.unique_id)
|
||||
if isinstance(node, ParsedExposure):
|
||||
source_file.exposures.append(node.unique_id)
|
||||
else:
|
||||
source_file.nodes.append(node.unique_id)
|
||||
|
||||
|
||||
@@ -745,6 +745,7 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
owner: ExposureOwner
|
||||
resource_type: NodeType = NodeType.Exposure
|
||||
description: str = ""
|
||||
label: Optional[str] = None
|
||||
maturity: Optional[MaturityType] = None
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
tags: List[str] = field(default_factory=list)
|
||||
@@ -770,6 +771,9 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
def same_description(self, old: "ParsedExposure") -> bool:
|
||||
return self.description == old.description
|
||||
|
||||
def same_label(self, old: "ParsedExposure") -> bool:
|
||||
return self.label == old.label
|
||||
|
||||
def same_maturity(self, old: "ParsedExposure") -> bool:
|
||||
return self.maturity == old.maturity
|
||||
|
||||
@@ -801,6 +805,7 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
and self.same_maturity(old)
|
||||
and self.same_url(old)
|
||||
and self.same_description(old)
|
||||
and self.same_label(old)
|
||||
and self.same_depends_on(old)
|
||||
and self.same_config(old)
|
||||
and True
|
||||
@@ -819,12 +824,12 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
description: str
|
||||
label: str
|
||||
calculation_method: str
|
||||
timestamp: str
|
||||
expression: str
|
||||
timestamp: Optional[str]
|
||||
filters: List[MetricFilter]
|
||||
time_grains: List[str]
|
||||
dimensions: List[str]
|
||||
window: Optional[MetricTime]
|
||||
window: Optional[MetricTime] = None
|
||||
model: Optional[str] = None
|
||||
model_unique_id: Optional[str] = None
|
||||
resource_type: NodeType = NodeType.Metric
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
import re
|
||||
|
||||
from dbt import deprecations
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.contracts.util import AdditionalPropertiesMixin, Mergeable, Replaceable
|
||||
from dbt.contracts.util import (
|
||||
AdditionalPropertiesMixin,
|
||||
Mergeable,
|
||||
Replaceable,
|
||||
rename_metric_attr,
|
||||
)
|
||||
|
||||
# trigger the PathEncoder
|
||||
import dbt.helper_types # noqa:F401
|
||||
@@ -429,6 +437,7 @@ class UnparsedExposure(dbtClassMixin, Replaceable):
|
||||
type: ExposureType
|
||||
owner: ExposureOwner
|
||||
description: str = ""
|
||||
label: Optional[str] = None
|
||||
maturity: Optional[MaturityType] = None
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
tags: List[str] = field(default_factory=list)
|
||||
@@ -436,6 +445,14 @@ class UnparsedExposure(dbtClassMixin, Replaceable):
|
||||
depends_on: List[str] = field(default_factory=list)
|
||||
config: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def validate(cls, data):
|
||||
super(UnparsedExposure, cls).validate(data)
|
||||
if "name" in data:
|
||||
# name can only contain alphanumeric chars and underscores
|
||||
if not (re.match(r"[\w-]+$", data["name"])):
|
||||
deprecations.warn("exposure-name", exposure=data["name"])
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricFilter(dbtClassMixin, Replaceable):
|
||||
@@ -470,8 +487,8 @@ class UnparsedMetric(dbtClassMixin, Replaceable):
|
||||
label: str
|
||||
calculation_method: str
|
||||
timestamp: str
|
||||
expression: str
|
||||
description: str = ""
|
||||
expression: Union[str, int] = ""
|
||||
time_grains: List[str] = field(default_factory=list)
|
||||
dimensions: List[str] = field(default_factory=list)
|
||||
window: Optional[MetricTime] = None
|
||||
@@ -483,14 +500,25 @@ class UnparsedMetric(dbtClassMixin, Replaceable):
|
||||
|
||||
@classmethod
|
||||
def validate(cls, data):
|
||||
data = rename_metric_attr(data, raise_deprecation_warning=True)
|
||||
super(UnparsedMetric, cls).validate(data)
|
||||
if "name" in data and " " in data["name"]:
|
||||
raise ParsingException(f"Metrics name '{data['name']}' cannot contain spaces")
|
||||
if "name" in data:
|
||||
errors = []
|
||||
if " " in data["name"]:
|
||||
errors.append("cannot contain spaces")
|
||||
# This handles failing queries due to too long metric names.
|
||||
# It only occurs in BigQuery and Snowflake (Postgres/Redshift truncate)
|
||||
if len(data["name"]) > 250:
|
||||
errors.append("cannot contain more than 250 characters")
|
||||
if not (re.match(r"^[A-Za-z]", data["name"])):
|
||||
errors.append("must begin with a letter")
|
||||
if not (re.match(r"[\w-]+$", data["name"])):
|
||||
errors.append("must contain only letters, numbers and underscores")
|
||||
|
||||
if data.get("calculation_method") == "expression":
|
||||
raise ValidationError(
|
||||
"The metric calculation method expression has been deprecated and renamed to derived. Please update"
|
||||
)
|
||||
if errors:
|
||||
raise ParsingException(
|
||||
f"The metric name '{data['name']}' is invalid. It {', '.join(e for e in errors)}"
|
||||
)
|
||||
|
||||
if data.get("model") is None and data.get("calculation_method") != "derived":
|
||||
raise ValidationError("Non-derived metrics require a 'model' property")
|
||||
|
||||
@@ -236,6 +236,7 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
|
||||
static_parser: Optional[bool] = None
|
||||
indirect_selection: Optional[str] = None
|
||||
cache_selected_only: Optional[bool] = None
|
||||
event_buffer_size: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -4,6 +4,7 @@ from datetime import datetime
|
||||
from typing import List, Tuple, ClassVar, Type, TypeVar, Dict, Any, Optional
|
||||
|
||||
from dbt.clients.system import write_json, read_json
|
||||
from dbt import deprecations
|
||||
from dbt.exceptions import (
|
||||
InternalException,
|
||||
RuntimeException,
|
||||
@@ -207,13 +208,60 @@ def get_manifest_schema_version(dct: dict) -> int:
|
||||
return int(schema_version.split(".")[-2][-1])
|
||||
|
||||
|
||||
# we renamed these properties in v1.3
|
||||
# this method allows us to be nice to the early adopters
|
||||
def rename_metric_attr(data: dict, raise_deprecation_warning: bool = False) -> dict:
|
||||
metric_name = data["name"]
|
||||
if raise_deprecation_warning and (
|
||||
"sql" in data.keys()
|
||||
or "type" in data.keys()
|
||||
or data.get("calculation_method") == "expression"
|
||||
):
|
||||
deprecations.warn("metric-attr-renamed", metric_name=metric_name)
|
||||
duplicated_attribute_msg = """\n
|
||||
The metric '{}' contains both the deprecated metric property '{}'
|
||||
and the up-to-date metric property '{}'. Please remove the deprecated property.
|
||||
"""
|
||||
if "sql" in data.keys():
|
||||
if "expression" in data.keys():
|
||||
raise ValidationError(
|
||||
duplicated_attribute_msg.format(metric_name, "sql", "expression")
|
||||
)
|
||||
else:
|
||||
data["expression"] = data.pop("sql")
|
||||
if "type" in data.keys():
|
||||
if "calculation_method" in data.keys():
|
||||
raise ValidationError(
|
||||
duplicated_attribute_msg.format(metric_name, "type", "calculation_method")
|
||||
)
|
||||
else:
|
||||
calculation_method = data.pop("type")
|
||||
data["calculation_method"] = calculation_method
|
||||
# we also changed "type: expression" -> "calculation_method: derived"
|
||||
if data.get("calculation_method") == "expression":
|
||||
data["calculation_method"] = "derived"
|
||||
return data
|
||||
|
||||
|
||||
def rename_sql_attr(node_content: dict) -> dict:
|
||||
if "raw_sql" in node_content:
|
||||
node_content["raw_code"] = node_content.pop("raw_sql")
|
||||
if "compiled_sql" in node_content:
|
||||
node_content["compiled_code"] = node_content.pop("compiled_sql")
|
||||
node_content["language"] = "sql"
|
||||
return node_content
|
||||
|
||||
|
||||
def upgrade_manifest_json(manifest: dict) -> dict:
|
||||
for node_content in manifest.get("nodes", {}).values():
|
||||
if "raw_sql" in node_content:
|
||||
node_content["raw_code"] = node_content.pop("raw_sql")
|
||||
if "compiled_sql" in node_content:
|
||||
node_content["compiled_code"] = node_content.pop("compiled_sql")
|
||||
node_content["language"] = "sql"
|
||||
node_content = rename_sql_attr(node_content)
|
||||
for disabled in manifest.get("disabled", {}).values():
|
||||
# There can be multiple disabled nodes for the same unique_id
|
||||
# so make sure all the nodes get the attr renamed
|
||||
disabled = [rename_sql_attr(n) for n in disabled]
|
||||
for metric_content in manifest.get("metrics", {}).values():
|
||||
# handle attr renames + value translation ("expression" -> "derived")
|
||||
metric_content = rename_metric_attr(metric_content)
|
||||
return manifest
|
||||
|
||||
|
||||
|
||||
@@ -87,6 +87,30 @@ def renamed_method(old_name: str, new_name: str):
|
||||
deprecations[dep.name] = dep
|
||||
|
||||
|
||||
class MetricAttributesRenamed(DBTDeprecation):
|
||||
_name = "metric-attr-renamed"
|
||||
_description = """\
|
||||
dbt-core v1.3 renamed attributes for metrics:
|
||||
\n 'sql' -> 'expression'
|
||||
\n 'type' -> 'calculation_method'
|
||||
\n 'type: expression' -> 'calculation_method: derived'
|
||||
\nThe old metric parameter names will be fully deprecated in v1.4.
|
||||
\nPlease remove them from the metric definition of metric '{metric_name}'
|
||||
\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849
|
||||
"""
|
||||
|
||||
|
||||
class ExposureNameDeprecation(DBTDeprecation):
|
||||
_name = "exposure-name"
|
||||
_description = """\
|
||||
Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores.
|
||||
Exposures support a new property, 'label', which may contain spaces, capital letters, and special characters.
|
||||
{exposure} does not follow this pattern.
|
||||
Please update the 'name', and use the 'label' property for a human-friendly title.
|
||||
This will raise an error in a future version of dbt-core.
|
||||
"""
|
||||
|
||||
|
||||
def warn(name, *args, **kwargs):
|
||||
if name not in deprecations:
|
||||
# this should (hopefully) never happen
|
||||
@@ -101,10 +125,12 @@ def warn(name, *args, **kwargs):
|
||||
active_deprecations: Set[str] = set()
|
||||
|
||||
deprecations_list: List[DBTDeprecation] = [
|
||||
ExposureNameDeprecation(),
|
||||
ConfigSourcePathDeprecation(),
|
||||
ConfigDataPathDeprecation(),
|
||||
PackageInstallPathDeprecation(),
|
||||
PackageRedirectDeprecation(),
|
||||
MetricAttributesRenamed(),
|
||||
]
|
||||
|
||||
deprecations: Dict[str, DBTDeprecation] = {d.name: d for d in deprecations_list}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from colorama import Style
|
||||
import dbt.events.functions as this # don't worry I hate it too.
|
||||
from dbt.events.base_types import NoStdOut, Event, NoFile, ShowException, Cache
|
||||
from dbt.events.types import EventBufferFull, T_Event, MainReportVersion, EmptyLine
|
||||
from dbt.events.types import T_Event, MainReportVersion, EmptyLine, EventBufferFull
|
||||
import dbt.flags as flags
|
||||
from dbt.constants import SECRET_ENV_PREFIX
|
||||
|
||||
@@ -22,24 +22,16 @@ import threading
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from collections import deque
|
||||
|
||||
global LOG_VERSION
|
||||
LOG_VERSION = 2
|
||||
|
||||
# create the global event history buffer with the default max size (10k)
|
||||
# python 3.7 doesn't support type hints on globals, but mypy requires them. hence the ignore.
|
||||
# TODO the flags module has not yet been resolved when this is created
|
||||
global EVENT_HISTORY
|
||||
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) # type: ignore
|
||||
EVENT_HISTORY = None
|
||||
|
||||
# create the global file logger with no configuration
|
||||
global FILE_LOG
|
||||
FILE_LOG = logging.getLogger("default_file")
|
||||
null_handler = logging.NullHandler()
|
||||
FILE_LOG.addHandler(null_handler)
|
||||
|
||||
# set up logger to go to stdout with defaults
|
||||
# setup_event_logger will be called once args have been parsed
|
||||
global STDOUT_LOG
|
||||
STDOUT_LOG = logging.getLogger("default_stdout")
|
||||
STDOUT_LOG.setLevel(logging.INFO)
|
||||
stdout_handler = logging.StreamHandler(sys.stdout)
|
||||
@@ -52,10 +44,6 @@ invocation_id: Optional[str] = None
|
||||
|
||||
|
||||
def setup_event_logger(log_path, level_override=None):
|
||||
# flags have been resolved, and log_path is known
|
||||
global EVENT_HISTORY
|
||||
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) # type: ignore
|
||||
|
||||
make_log_dir_if_missing(log_path)
|
||||
|
||||
this.format_json = flags.LOG_FORMAT == "json"
|
||||
@@ -271,14 +259,7 @@ def fire_event(e: Event) -> None:
|
||||
if isinstance(e, Cache) and not flags.LOG_CACHE_EVENTS:
|
||||
return
|
||||
|
||||
# if and only if the event history deque will be completely filled by this event
|
||||
# fire warning that old events are now being dropped
|
||||
global EVENT_HISTORY
|
||||
if len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1):
|
||||
EVENT_HISTORY.append(e)
|
||||
fire_event(EventBufferFull())
|
||||
else:
|
||||
EVENT_HISTORY.append(e)
|
||||
add_to_event_history(e)
|
||||
|
||||
# backwards compatibility for plugins that require old logger (dbt-rpc)
|
||||
if flags.ENABLE_LEGACY_LOGGER:
|
||||
@@ -344,3 +325,20 @@ def get_ts_rfc3339() -> str:
|
||||
ts = get_ts()
|
||||
ts_rfc3339 = ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
return ts_rfc3339
|
||||
|
||||
|
||||
def add_to_event_history(event):
|
||||
if flags.EVENT_BUFFER_SIZE == 0:
|
||||
return
|
||||
global EVENT_HISTORY
|
||||
if EVENT_HISTORY is None:
|
||||
reset_event_history()
|
||||
EVENT_HISTORY.append(event)
|
||||
# We only set the EventBufferFull message for event buffers >= 10,000
|
||||
if flags.EVENT_BUFFER_SIZE >= 10000 and len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1):
|
||||
fire_event(EventBufferFull())
|
||||
|
||||
|
||||
def reset_event_history():
|
||||
global EVENT_HISTORY
|
||||
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE)
|
||||
|
||||
@@ -103,11 +103,11 @@ class MainKeyboardInterrupt(InfoLevel):
|
||||
|
||||
@dataclass
|
||||
class MainEncounteredError(ErrorLevel):
|
||||
e: BaseException
|
||||
exc: str
|
||||
code: str = "Z002"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"Encountered an error:\n{self.e}"
|
||||
return f"Encountered an error:\n{self.exc}"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -382,7 +382,7 @@ class SystemErrorRetrievingModTime(ErrorLevel):
|
||||
class SystemCouldNotWrite(DebugLevel):
|
||||
path: str
|
||||
reason: str
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "Z005"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -762,7 +762,7 @@ class DumpAfterRenameSchema(DebugLevel, Cache):
|
||||
|
||||
@dataclass
|
||||
class AdapterImportError(InfoLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "E035"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1008,7 +1008,7 @@ class PartialParsingNotEnabled(DebugLevel):
|
||||
@dataclass
|
||||
class ParsedFileLoadFailed(ShowException, DebugLevel):
|
||||
path: str
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "I029"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1223,7 +1223,7 @@ class InvalidRefInTestNode(DebugLevel):
|
||||
|
||||
@dataclass
|
||||
class RunningOperationCaughtError(ErrorLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "Q001"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1232,7 +1232,7 @@ class RunningOperationCaughtError(ErrorLevel):
|
||||
|
||||
@dataclass
|
||||
class RunningOperationUncaughtError(ErrorLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "FF01"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1249,7 +1249,7 @@ class DbtProjectError(ErrorLevel):
|
||||
|
||||
@dataclass
|
||||
class DbtProjectErrorException(ErrorLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "A010"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1266,7 +1266,7 @@ class DbtProfileError(ErrorLevel):
|
||||
|
||||
@dataclass
|
||||
class DbtProfileErrorException(ErrorLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "A012"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1313,7 +1313,7 @@ https://docs.getdbt.com/docs/configure-your-profile
|
||||
|
||||
@dataclass
|
||||
class CatchableExceptionOnRun(ShowException, DebugLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "W002"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1323,7 +1323,7 @@ class CatchableExceptionOnRun(ShowException, DebugLevel):
|
||||
@dataclass
|
||||
class InternalExceptionOnRun(DebugLevel):
|
||||
build_path: str
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "W003"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1352,7 +1352,7 @@ class PrintDebugStackTrace(ShowException, DebugLevel):
|
||||
class GenericExceptionOnRun(ErrorLevel):
|
||||
build_path: Optional[str]
|
||||
unique_id: str
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "W004"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1366,7 +1366,7 @@ class GenericExceptionOnRun(ErrorLevel):
|
||||
@dataclass
|
||||
class NodeConnectionReleaseError(ShowException, DebugLevel):
|
||||
node_name: str
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "W005"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -1700,7 +1700,7 @@ class SQLCompiledPath(InfoLevel):
|
||||
|
||||
@dataclass
|
||||
class SQlRunnerException(ShowException, DebugLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "Q006"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -2458,7 +2458,7 @@ class GeneralWarningMsg(WarnLevel):
|
||||
|
||||
@dataclass
|
||||
class GeneralWarningException(WarnLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
log_fmt: str
|
||||
code: str = "Z047"
|
||||
|
||||
@@ -2479,7 +2479,7 @@ class EventBufferFull(WarnLevel):
|
||||
|
||||
@dataclass
|
||||
class RecordRetryException(DebugLevel):
|
||||
exc: Exception
|
||||
exc: str
|
||||
code: str = "M021"
|
||||
|
||||
def message(self) -> str:
|
||||
@@ -2495,7 +2495,7 @@ class RecordRetryException(DebugLevel):
|
||||
if 1 == 0:
|
||||
MainReportVersion(v="")
|
||||
MainKeyboardInterrupt()
|
||||
MainEncounteredError(e=BaseException(""))
|
||||
MainEncounteredError(exc="")
|
||||
MainStackTrace(stack_trace="")
|
||||
MainTrackingUserState(user_state="")
|
||||
ParsingStart()
|
||||
@@ -2524,7 +2524,7 @@ if 1 == 0:
|
||||
RegistryResponseMissingNestedKeys(response=""),
|
||||
RegistryResponseExtraNestedKeys(response=""),
|
||||
SystemErrorRetrievingModTime(path="")
|
||||
SystemCouldNotWrite(path="", reason="", exc=Exception(""))
|
||||
SystemCouldNotWrite(path="", reason="", exc="")
|
||||
SystemExecutingCmd(cmd=[""])
|
||||
SystemStdOutMsg(bmsg=b"")
|
||||
SystemStdErrMsg(bmsg=b"")
|
||||
@@ -2580,7 +2580,7 @@ if 1 == 0:
|
||||
DumpAfterAddGraph(Lazy.defer(lambda: dict()))
|
||||
DumpBeforeRenameSchema(Lazy.defer(lambda: dict()))
|
||||
DumpAfterRenameSchema(Lazy.defer(lambda: dict()))
|
||||
AdapterImportError(exc=Exception())
|
||||
AdapterImportError(exc="")
|
||||
PluginLoadError()
|
||||
SystemReportReturnCode(returncode=0)
|
||||
NewConnectionOpening(connection_state="")
|
||||
@@ -2603,7 +2603,7 @@ if 1 == 0:
|
||||
PartialParsingFailedBecauseNewProjectDependency()
|
||||
PartialParsingFailedBecauseHashChanged()
|
||||
PartialParsingDeletedMetric(id="")
|
||||
ParsedFileLoadFailed(path="", exc=Exception(""))
|
||||
ParsedFileLoadFailed(path="", exc="")
|
||||
PartialParseSaveFileNotFound()
|
||||
StaticParserCausedJinjaRendering(path="")
|
||||
UsingExperimentalParser(path="")
|
||||
@@ -2626,20 +2626,20 @@ if 1 == 0:
|
||||
PartialParsingDeletedExposure(unique_id="")
|
||||
InvalidDisabledSourceInTestNode(msg="")
|
||||
InvalidRefInTestNode(msg="")
|
||||
RunningOperationCaughtError(exc=Exception(""))
|
||||
RunningOperationUncaughtError(exc=Exception(""))
|
||||
RunningOperationCaughtError(exc="")
|
||||
RunningOperationUncaughtError(exc="")
|
||||
DbtProjectError()
|
||||
DbtProjectErrorException(exc=Exception(""))
|
||||
DbtProjectErrorException(exc="")
|
||||
DbtProfileError()
|
||||
DbtProfileErrorException(exc=Exception(""))
|
||||
DbtProfileErrorException(exc="")
|
||||
ProfileListTitle()
|
||||
ListSingleProfile(profile="")
|
||||
NoDefinedProfiles()
|
||||
ProfileHelpMessage()
|
||||
CatchableExceptionOnRun(exc=Exception(""))
|
||||
InternalExceptionOnRun(build_path="", exc=Exception(""))
|
||||
GenericExceptionOnRun(build_path="", unique_id="", exc=Exception(""))
|
||||
NodeConnectionReleaseError(node_name="", exc=Exception(""))
|
||||
CatchableExceptionOnRun(exc="")
|
||||
InternalExceptionOnRun(build_path="", exc="")
|
||||
GenericExceptionOnRun(build_path="", unique_id="", exc="")
|
||||
NodeConnectionReleaseError(node_name="", exc="")
|
||||
CheckCleanPath(path="")
|
||||
ConfirmCleanPath(path="")
|
||||
ProtectedCleanPath(path="")
|
||||
@@ -2847,6 +2847,6 @@ if 1 == 0:
|
||||
TrackingInitializeFailure()
|
||||
RetryExternalCall(attempt=0, max=0)
|
||||
GeneralWarningMsg(msg="", log_fmt="")
|
||||
GeneralWarningException(exc=Exception(""), log_fmt="")
|
||||
GeneralWarningException(exc="", log_fmt="")
|
||||
EventBufferFull()
|
||||
RecordRetryException(exc=Exception(""))
|
||||
RecordRetryException(exc="")
|
||||
|
||||
@@ -1098,7 +1098,7 @@ def warn_or_raise(exc, log_fmt=None):
|
||||
if flags.WARN_ERROR:
|
||||
raise exc
|
||||
else:
|
||||
fire_event(GeneralWarningException(exc=exc, log_fmt=log_fmt))
|
||||
fire_event(GeneralWarningException(exc=str(exc), log_fmt=log_fmt))
|
||||
|
||||
|
||||
def warn(msg, node=None):
|
||||
|
||||
@@ -10,7 +10,13 @@ from typing import Optional
|
||||
# PROFILES_DIR must be set before the other flags
|
||||
# It also gets set in main.py and in set_from_args because the rpc server
|
||||
# doesn't go through exactly the same main arg processing.
|
||||
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
|
||||
GLOBAL_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
|
||||
LOCAL_PROFILES_DIR = os.getcwd()
|
||||
# Use the current working directory if there is a profiles.yml file present there
|
||||
if os.path.exists(Path(LOCAL_PROFILES_DIR) / Path("profiles.yml")):
|
||||
DEFAULT_PROFILES_DIR = LOCAL_PROFILES_DIR
|
||||
else:
|
||||
DEFAULT_PROFILES_DIR = GLOBAL_PROFILES_DIR
|
||||
PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR))
|
||||
|
||||
STRICT_MODE = False # Only here for backwards compatibility
|
||||
@@ -52,6 +58,7 @@ _NON_BOOLEAN_FLAGS = [
|
||||
|
||||
_NON_DBT_ENV_FLAGS = ["DO_NOT_TRACK"]
|
||||
|
||||
|
||||
# Global CLI defaults. These flags are set from three places:
|
||||
# CLI args, environment variables, and user_config (profiles.yml).
|
||||
# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR
|
||||
|
||||
@@ -356,8 +356,21 @@ class ConfigSelectorMethod(SelectorMethod):
|
||||
except AttributeError:
|
||||
continue
|
||||
else:
|
||||
if selector == value:
|
||||
yield node
|
||||
if isinstance(value, list):
|
||||
if (
|
||||
(selector in value)
|
||||
or (CaseInsensitive(selector) == "true" and True in value)
|
||||
or (CaseInsensitive(selector) == "false" and False in value)
|
||||
):
|
||||
yield node
|
||||
else:
|
||||
if (
|
||||
(selector == value)
|
||||
or (CaseInsensitive(selector) == "true" and value is True)
|
||||
or (CaseInsensitive(selector) == "false")
|
||||
and value is False
|
||||
):
|
||||
yield node
|
||||
|
||||
|
||||
class ResourceTypeSelectorMethod(SelectorMethod):
|
||||
|
||||
@@ -12,7 +12,7 @@ The macro override naming method (spark__statement) only works for macros which
|
||||
{%- if language == 'sql'-%}
|
||||
{%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}
|
||||
{%- elif language == 'python' -%}
|
||||
{%- set res = adapter.submit_python_job(model, compiled_code) -%}
|
||||
{%- set res = submit_python_job(model, compiled_code) -%}
|
||||
{#-- TODO: What should table be for python models? --#}
|
||||
{%- set table = None -%}
|
||||
{%- else -%}
|
||||
|
||||
@@ -50,3 +50,31 @@
|
||||
{{ return(result) }}
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}
|
||||
{{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}
|
||||
{%- set default_cols = dest_columns | map(attribute="quoted") | list -%}
|
||||
|
||||
{%- if merge_update_columns and merge_exclude_columns -%}
|
||||
{{ exceptions.raise_compiler_error(
|
||||
'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'
|
||||
)}}
|
||||
{%- elif merge_update_columns -%}
|
||||
{%- set update_columns = merge_update_columns -%}
|
||||
{%- elif merge_exclude_columns -%}
|
||||
{%- set update_columns = [] -%}
|
||||
{%- for column in dest_columns -%}
|
||||
{% if column.column | lower not in merge_exclude_columns | map("lower") | list %}
|
||||
{%- do update_columns.append(column.quoted) -%}
|
||||
{% endif %}
|
||||
{%- endfor -%}
|
||||
{%- else -%}
|
||||
{%- set update_columns = default_cols -%}
|
||||
{%- endif -%}
|
||||
|
||||
{{ return(update_columns) }}
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}
|
||||
{%- set predicates = [] if predicates is none else [] + predicates -%}
|
||||
{%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%}
|
||||
{%- set update_columns = config.get('merge_update_columns', default = dest_columns | map(attribute="quoted") | list) -%}
|
||||
{%- set merge_update_columns = config.get('merge_update_columns') -%}
|
||||
{%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}
|
||||
{%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}
|
||||
{%- set sql_header = config.get('sql_header', none) -%}
|
||||
|
||||
{% if unique_key %}
|
||||
|
||||
@@ -30,8 +30,8 @@ def source(*args, dbt_load_df_function):
|
||||
|
||||
{% macro build_config_dict(model) %}
|
||||
{%- set config_dict = {} -%}
|
||||
{%- for key in model.config.utilized -%}
|
||||
{# TODO: weird type testing with enum, would be much easier to write this logic in Python! #}
|
||||
{%- for key in model.config.config_keys_used -%}
|
||||
{# weird type testing with enum, would be much easier to write this logic in Python! #}
|
||||
{%- if key == 'language' -%}
|
||||
{%- set value = 'python' -%}
|
||||
{%- endif -%}
|
||||
@@ -56,8 +56,8 @@ class config:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get(key):
|
||||
return config_dict.get(key)
|
||||
def get(key, default=None):
|
||||
return config_dict.get(key, default)
|
||||
|
||||
class this:
|
||||
"""dbt.this() or dbt.this.identifier"""
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
{% macro array_append(array, new_element) -%}
|
||||
{{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{# new_element must be the same data type as elements in array to match postgres functionality #}
|
||||
{% macro default__array_append(array, new_element) -%}
|
||||
array_append({{ array }}, {{ new_element }})
|
||||
{%- endmacro %}
|
||||
@@ -0,0 +1,7 @@
|
||||
{% macro array_concat(array_1, array_2) -%}
|
||||
{{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__array_concat(array_1, array_2) -%}
|
||||
array_cat({{ array_1 }}, {{ array_2 }})
|
||||
{%- endmacro %}
|
||||
@@ -0,0 +1,12 @@
|
||||
{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}
|
||||
{{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{# all inputs must be the same data type to match postgres functionality #}
|
||||
{% macro default__array_construct(inputs, data_type) -%}
|
||||
{% if inputs|length > 0 %}
|
||||
array[ {{ inputs|join(' , ') }} ]
|
||||
{% else %}
|
||||
array[]::{{data_type}}[]
|
||||
{% endif %}
|
||||
{%- endmacro %}
|
||||
@@ -59,7 +59,7 @@ The TIMESTAMP_* variation associated with TIMESTAMP is specified by the TIMESTAM
|
||||
{{ return(api.Column.translate_type("float")) }}
|
||||
{% endmacro %}
|
||||
|
||||
{# numeric ------------------------------------------------ #}
|
||||
{# numeric ------------------------------------------------- #}
|
||||
|
||||
{%- macro type_numeric() -%}
|
||||
{{ return(adapter.dispatch('type_numeric', 'dbt')()) }}
|
||||
@@ -115,3 +115,15 @@ the precision and scale explicitly.)
|
||||
|
||||
-- returns 'int' everywhere, except BigQuery, where it returns 'int64'
|
||||
-- (but BigQuery also now accepts 'int' as a valid alias for 'int64')
|
||||
|
||||
{# bool ------------------------------------------------- #}
|
||||
|
||||
{%- macro type_boolean() -%}
|
||||
{{ return(adapter.dispatch('type_boolean', 'dbt')()) }}
|
||||
{%- endmacro -%}
|
||||
|
||||
{%- macro default__type_boolean() -%}
|
||||
{{ return(api.Column.translate_type("boolean")) }}
|
||||
{%- endmacro -%}
|
||||
|
||||
-- returns 'boolean' everywhere. BigQuery accepts 'boolean' as a valid alias for 'bool'
|
||||
|
||||
File diff suppressed because one or more lines are too long
120
core/dbt/lib.py
120
core/dbt/lib.py
@@ -1,10 +1,64 @@
|
||||
# TODO: this file is one big TODO
|
||||
import os
|
||||
from dbt.contracts.results import RunningStatus, collect_timing_info
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import NodeCompiling, NodeExecuting
|
||||
from dbt.exceptions import RuntimeException
|
||||
from dbt import flags
|
||||
from collections import namedtuple
|
||||
from dbt.task.sql import SqlCompileRunner
|
||||
from dataclasses import dataclass
|
||||
|
||||
RuntimeArgs = namedtuple("RuntimeArgs", "project_dir profiles_dir single_threaded profile target")
|
||||
|
||||
@dataclass
|
||||
class RuntimeArgs:
|
||||
project_dir: str
|
||||
profiles_dir: str
|
||||
single_threaded: bool
|
||||
profile: str
|
||||
target: str
|
||||
|
||||
|
||||
class SqlCompileRunnerNoIntrospection(SqlCompileRunner):
|
||||
def compile_and_execute(self, manifest, ctx):
|
||||
"""
|
||||
This version of this method does not connect to the data warehouse.
|
||||
As a result, introspective queries at compilation will not be supported
|
||||
and will throw an error.
|
||||
|
||||
TODO: This is a temporary solution to more complex permissions requirements
|
||||
for the semantic layer, and thus largely duplicates the code in the parent class
|
||||
method. Once conditional credential usage is enabled, this should be removed.
|
||||
"""
|
||||
result = None
|
||||
ctx.node._event_status["node_status"] = RunningStatus.Compiling
|
||||
fire_event(
|
||||
NodeCompiling(
|
||||
node_info=ctx.node.node_info,
|
||||
unique_id=ctx.node.unique_id,
|
||||
)
|
||||
)
|
||||
with collect_timing_info("compile") as timing_info:
|
||||
# if we fail here, we still have a compiled node to return
|
||||
# this has the benefit of showing a build path for the errant
|
||||
# model
|
||||
ctx.node = self.compile(manifest)
|
||||
ctx.timing.append(timing_info)
|
||||
|
||||
# for ephemeral nodes, we only want to compile, not run
|
||||
if not ctx.node.is_ephemeral_model:
|
||||
ctx.node._event_status["node_status"] = RunningStatus.Executing
|
||||
fire_event(
|
||||
NodeExecuting(
|
||||
node_info=ctx.node.node_info,
|
||||
unique_id=ctx.node.unique_id,
|
||||
)
|
||||
)
|
||||
with collect_timing_info("execute") as timing_info:
|
||||
result = self.run(ctx.node, manifest)
|
||||
ctx.node = result.node
|
||||
|
||||
ctx.timing.append(timing_info)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_dbt_config(project_dir, args=None, single_threaded=False):
|
||||
@@ -15,29 +69,32 @@ def get_dbt_config(project_dir, args=None, single_threaded=False):
|
||||
if os.getenv("DBT_PROFILES_DIR"):
|
||||
profiles_dir = os.getenv("DBT_PROFILES_DIR")
|
||||
else:
|
||||
profiles_dir = os.path.expanduser("~/.dbt")
|
||||
profiles_dir = flags.DEFAULT_PROFILES_DIR
|
||||
|
||||
profile = args.profile if hasattr(args, "profile") else None
|
||||
target = args.target if hasattr(args, "target") else None
|
||||
|
||||
# Construct a phony config
|
||||
config = RuntimeConfig.from_args(
|
||||
RuntimeArgs(project_dir, profiles_dir, single_threaded, profile, target)
|
||||
runtime_args = RuntimeArgs(
|
||||
project_dir=project_dir,
|
||||
profiles_dir=profiles_dir,
|
||||
single_threaded=single_threaded,
|
||||
profile=getattr(args, "profile", None),
|
||||
target=getattr(args, "target", None),
|
||||
)
|
||||
# Clear previously registered adapters--
|
||||
# this fixes cacheing behavior on the dbt-server
|
||||
|
||||
# Construct a RuntimeConfig from phony args
|
||||
config = RuntimeConfig.from_args(runtime_args)
|
||||
|
||||
# Set global flags from arguments
|
||||
flags.set_from_args(args, config)
|
||||
dbt.adapters.factory.reset_adapters()
|
||||
# Load the relevant adapter
|
||||
|
||||
# This is idempotent, so we can call it repeatedly
|
||||
dbt.adapters.factory.register_adapter(config)
|
||||
# Set invocation id
|
||||
|
||||
# Make sure we have a valid invocation_id
|
||||
dbt.events.functions.set_invocation_id()
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_task_by_type(type):
|
||||
# TODO: we need to tell dbt-server what tasks are available
|
||||
from dbt.task.run import RunTask
|
||||
from dbt.task.list import ListTask
|
||||
from dbt.task.seed import SeedTask
|
||||
@@ -70,16 +127,13 @@ def create_task(type, args, manifest, config):
|
||||
def no_op(*args, **kwargs):
|
||||
pass
|
||||
|
||||
# TODO: yuck, let's rethink tasks a little
|
||||
task = task(args, config)
|
||||
|
||||
# Wow! We can monkeypatch taskCls.load_manifest to return _our_ manifest
|
||||
task.load_manifest = no_op
|
||||
task.manifest = manifest
|
||||
return task
|
||||
|
||||
|
||||
def _get_operation_node(manifest, project_path, sql):
|
||||
def _get_operation_node(manifest, project_path, sql, node_name):
|
||||
from dbt.parser.manifest import process_node
|
||||
from dbt.parser.sql import SqlBlockParser
|
||||
import dbt.adapters.factory
|
||||
@@ -92,26 +146,33 @@ def _get_operation_node(manifest, project_path, sql):
|
||||
)
|
||||
|
||||
adapter = dbt.adapters.factory.get_adapter(config)
|
||||
# TODO : This needs a real name?
|
||||
sql_node = block_parser.parse_remote(sql, "name")
|
||||
sql_node = block_parser.parse_remote(sql, node_name)
|
||||
process_node(config, manifest, sql_node)
|
||||
return config, sql_node, adapter
|
||||
|
||||
|
||||
def compile_sql(manifest, project_path, sql):
|
||||
from dbt.task.sql import SqlCompileRunner
|
||||
def compile_sql(manifest, project_path, sql, node_name="query"):
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql, node_name)
|
||||
allow_introspection = str(os.environ.get("__DBT_ALLOW_INTROSPECTION", "1")).lower() in (
|
||||
"true",
|
||||
"1",
|
||||
"on",
|
||||
)
|
||||
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql)
|
||||
runner = SqlCompileRunner(config, adapter, node, 1, 1)
|
||||
if allow_introspection:
|
||||
runner = SqlCompileRunner(config, adapter, node, 1, 1)
|
||||
else:
|
||||
runner = SqlCompileRunnerNoIntrospection(config, adapter, node, 1, 1)
|
||||
return runner.safe_run(manifest)
|
||||
|
||||
|
||||
def execute_sql(manifest, project_path, sql):
|
||||
def execute_sql(manifest, project_path, sql, node_name="query"):
|
||||
from dbt.task.sql import SqlExecuteRunner
|
||||
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql)
|
||||
config, node, adapter = _get_operation_node(manifest, project_path, sql, node_name)
|
||||
|
||||
runner = SqlExecuteRunner(config, adapter, node, 1, 1)
|
||||
# TODO: use same interface for runner
|
||||
|
||||
return runner.safe_run(manifest)
|
||||
|
||||
|
||||
@@ -128,5 +189,4 @@ def deserialize_manifest(manifest_msgpack):
|
||||
|
||||
|
||||
def serialize_manifest(manifest):
|
||||
# TODO: what should this take as an arg?
|
||||
return manifest.to_msgpack()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from typing import List
|
||||
|
||||
from dbt.logger import log_cache_events, log_manager
|
||||
|
||||
import argparse
|
||||
@@ -42,8 +43,13 @@ from dbt.adapters.factory import reset_adapters, cleanup_connections
|
||||
import dbt.tracking
|
||||
|
||||
from dbt.utils import ExitCodes, args_to_dict
|
||||
from dbt.config.profile import DEFAULT_PROFILES_DIR, read_user_config
|
||||
from dbt.exceptions import InternalException, NotImplementedException, FailedToConnectException
|
||||
from dbt.config.profile import read_user_config
|
||||
from dbt.exceptions import (
|
||||
Exception as dbtException,
|
||||
InternalException,
|
||||
NotImplementedException,
|
||||
FailedToConnectException,
|
||||
)
|
||||
|
||||
|
||||
class DBTVersion(argparse.Action):
|
||||
@@ -142,8 +148,9 @@ def main(args=None):
|
||||
exit_code = e.code
|
||||
|
||||
except BaseException as e:
|
||||
fire_event(MainEncounteredError(e=str(e)))
|
||||
fire_event(MainStackTrace(stack_trace=traceback.format_exc()))
|
||||
fire_event(MainEncounteredError(exc=str(e)))
|
||||
if not isinstance(e, dbtException):
|
||||
fire_event(MainStackTrace(stack_trace=traceback.format_exc()))
|
||||
exit_code = ExitCodes.UnhandledError.value
|
||||
|
||||
sys.exit(exit_code)
|
||||
@@ -201,7 +208,7 @@ def track_run(task):
|
||||
yield
|
||||
dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="ok")
|
||||
except (NotImplementedException, FailedToConnectException) as e:
|
||||
fire_event(MainEncounteredError(e=str(e)))
|
||||
fire_event(MainEncounteredError(exc=str(e)))
|
||||
dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="error")
|
||||
except Exception:
|
||||
dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="error")
|
||||
@@ -258,10 +265,8 @@ def _build_base_subparser():
|
||||
dest="sub_profiles_dir", # Main cli arg precedes subcommand
|
||||
type=str,
|
||||
help="""
|
||||
Which directory to look in for the profiles.yml file. Default = {}
|
||||
""".format(
|
||||
DEFAULT_PROFILES_DIR
|
||||
),
|
||||
Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/
|
||||
""",
|
||||
)
|
||||
|
||||
base_subparser.add_argument(
|
||||
@@ -620,6 +625,7 @@ def _add_table_mutability_arguments(*subparsers):
|
||||
for sub in subparsers:
|
||||
sub.add_argument(
|
||||
"--full-refresh",
|
||||
"-f",
|
||||
action="store_true",
|
||||
help="""
|
||||
If specified, dbt will drop incremental models and
|
||||
@@ -1059,10 +1065,8 @@ def parse_args(args, cls=DBTArgumentParser):
|
||||
dest="profiles_dir",
|
||||
type=str,
|
||||
help="""
|
||||
Which directory to look in for the profiles.yml file. Default = {}
|
||||
""".format(
|
||||
DEFAULT_PROFILES_DIR
|
||||
),
|
||||
Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/
|
||||
""",
|
||||
)
|
||||
|
||||
p.add_argument(
|
||||
|
||||
@@ -20,9 +20,7 @@ class MacroParser(BaseParser[ParsedMacro]):
|
||||
# from the normal parsing flow.
|
||||
def get_paths(self) -> List[FilePath]:
|
||||
return filesystem_search(
|
||||
project=self.project,
|
||||
relative_dirs=self.project.macro_paths,
|
||||
extension=".sql",
|
||||
project=self.project, relative_dirs=self.project.macro_paths, extension=".sql"
|
||||
)
|
||||
|
||||
@property
|
||||
|
||||
@@ -660,7 +660,7 @@ class ManifestLoader:
|
||||
manifest.metadata.invocation_id = get_invocation_id()
|
||||
return manifest
|
||||
except Exception as exc:
|
||||
fire_event(ParsedFileLoadFailed(path=path, exc=exc))
|
||||
fire_event(ParsedFileLoadFailed(path=path, exc=str(exc)))
|
||||
reparse_reason = ReparseReason.load_file_failure
|
||||
else:
|
||||
fire_event(PartialParseSaveFileNotFound())
|
||||
|
||||
@@ -60,8 +60,8 @@ class PythonValidationVisitor(ast.NodeVisitor):
|
||||
)
|
||||
|
||||
def check_error(self, node):
|
||||
if self.num_model_def != 1:
|
||||
raise ParsingException("dbt only allow one model defined per python file", node=node)
|
||||
if self.num_model_def > 1:
|
||||
raise ParsingException("dbt only allows one model defined per python file", node=node)
|
||||
if len(self.dbt_errors) != 0:
|
||||
raise ParsingException("\n".join(self.dbt_errors), node=node)
|
||||
|
||||
@@ -86,11 +86,12 @@ class PythonParseVisitor(ast.NodeVisitor):
|
||||
def _safe_eval(self, node):
|
||||
try:
|
||||
return ast.literal_eval(node)
|
||||
except (SyntaxError, ValueError, TypeError) as exc:
|
||||
msg = validator_error_message(exc)
|
||||
raise ParsingException(msg, node=self.dbt_node) from exc
|
||||
except (MemoryError, RecursionError) as exc:
|
||||
msg = validator_error_message(exc)
|
||||
except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc:
|
||||
msg = validator_error_message(
|
||||
f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n"
|
||||
"https://docs.python.org/3/library/ast.html#ast.literal_eval\n"
|
||||
"In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures"
|
||||
)
|
||||
raise ParsingException(msg, node=self.dbt_node) from exc
|
||||
|
||||
def _get_call_literals(self, node):
|
||||
@@ -112,7 +113,7 @@ class PythonParseVisitor(ast.NodeVisitor):
|
||||
return arg_literals, kwarg_literals
|
||||
|
||||
def visit_Call(self, node: ast.Call) -> None:
|
||||
# check weather the current call could be a dbt function call
|
||||
# check whether the current call could be a dbt function call
|
||||
if isinstance(node.func, ast.Attribute) and node.func.attr in dbt_function_key_words:
|
||||
func_name = self._flatten_attr(node.func)
|
||||
# check weather the current call really is a dbt function call
|
||||
@@ -203,18 +204,25 @@ class ModelParser(SimpleSQLParser[ParsedModelNode]):
|
||||
dbtValidator.visit(tree)
|
||||
dbtValidator.check_error(node)
|
||||
|
||||
# if the user didn't specify an explicit `model(dbt, session)` function,
|
||||
# we're going to treat the user code as a "script" to be wrapped in that function at compile time.
|
||||
# for now, we just need to recognize that fact, and save it to the node.
|
||||
if dbtValidator.num_model_def == 0:
|
||||
# TODO: this is silly, put this somewhere better (outside of user space)
|
||||
node.meta["missing_model_function"] = True
|
||||
|
||||
dbtParser = PythonParseVisitor(node)
|
||||
dbtParser.visit(tree)
|
||||
|
||||
config_keys_used = []
|
||||
for (func, args, kwargs) in dbtParser.dbt_function_calls:
|
||||
# TODO decide what we want to do with detected packages
|
||||
# if func == "config":
|
||||
# kwargs["detected_packages"] = dbtParser.packages
|
||||
if func == "get":
|
||||
context["config"](utilized=args)
|
||||
config_keys_used.append(args[0])
|
||||
continue
|
||||
|
||||
context[func](*args, **kwargs)
|
||||
if config_keys_used:
|
||||
# this is being used in macro build_config_dict
|
||||
context["config"](config_keys_used=config_keys_used)
|
||||
|
||||
def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None:
|
||||
self.manifest._parsing_info.static_analysis_path_count += 1
|
||||
|
||||
@@ -245,6 +245,22 @@ class PartialParsing:
|
||||
if "overrides" in source:
|
||||
self.remove_source_override_target(source)
|
||||
|
||||
def delete_disabled(self, unique_id, file_id):
|
||||
# This node/metric/exposure is disabled. Find it and remove it from disabled dictionary.
|
||||
for dis_index, dis_node in enumerate(self.saved_manifest.disabled[unique_id]):
|
||||
if dis_node.file_id == file_id:
|
||||
node = dis_node
|
||||
index = dis_index
|
||||
break
|
||||
# Remove node from disabled
|
||||
del self.saved_manifest.disabled[unique_id][index]
|
||||
# if all nodes were removed for the unique id, delete the unique_id
|
||||
# from the disabled dict
|
||||
if not self.saved_manifest.disabled[unique_id]:
|
||||
self.saved_manifest.disabled.pop(unique_id)
|
||||
|
||||
return node
|
||||
|
||||
# Deletes for all non-schema files
|
||||
def delete_from_saved(self, file_id):
|
||||
# Look at all things touched by file, remove those
|
||||
@@ -319,15 +335,7 @@ class PartialParsing:
|
||||
and unique_id in self.saved_manifest.disabled
|
||||
):
|
||||
# This node is disabled. Find the node and remove it from disabled dictionary.
|
||||
for dis_index, dis_node in enumerate(self.saved_manifest.disabled[unique_id]):
|
||||
if dis_node.file_id == source_file.file_id:
|
||||
node = dis_node
|
||||
break
|
||||
if dis_node:
|
||||
# Remove node from disabled and unique_id from disabled dict if necessary
|
||||
del self.saved_manifest.disabled[unique_id][dis_index]
|
||||
if not self.saved_manifest.disabled[unique_id]:
|
||||
self.saved_manifest.disabled.pop(unique_id)
|
||||
node = self.delete_disabled(unique_id, source_file.file_id)
|
||||
else:
|
||||
# Has already been deleted by another action
|
||||
return
|
||||
@@ -885,34 +893,40 @@ class PartialParsing:
|
||||
self.add_to_pp_files(self.saved_files[macro_file_id])
|
||||
|
||||
# exposures are created only from schema files, so just delete
|
||||
# the exposure.
|
||||
# the exposure or the disabled exposure.
|
||||
def delete_schema_exposure(self, schema_file, exposure_dict):
|
||||
exposure_name = exposure_dict["name"]
|
||||
exposures = schema_file.exposures.copy()
|
||||
for unique_id in exposures:
|
||||
exposure = self.saved_manifest.exposures[unique_id]
|
||||
if unique_id in self.saved_manifest.exposures:
|
||||
exposure = self.saved_manifest.exposures[unique_id]
|
||||
if exposure.name == exposure_name:
|
||||
self.deleted_manifest.exposures[unique_id] = self.saved_manifest.exposures.pop(
|
||||
unique_id
|
||||
)
|
||||
schema_file.exposures.remove(unique_id)
|
||||
fire_event(PartialParsingDeletedExposure(unique_id=unique_id))
|
||||
elif unique_id in self.saved_manifest.disabled:
|
||||
self.delete_disabled(unique_id, schema_file.file_id)
|
||||
|
||||
# metric are created only from schema files, so just delete
|
||||
# the metric.
|
||||
# metrics are created only from schema files, but also can be referred to by other nodes
|
||||
def delete_schema_metric(self, schema_file, metric_dict):
|
||||
metric_name = metric_dict["name"]
|
||||
metrics = schema_file.metrics.copy()
|
||||
for unique_id in metrics:
|
||||
metric = self.saved_manifest.metrics[unique_id]
|
||||
if unique_id in self.saved_manifest.metrics:
|
||||
metric = self.saved_manifest.metrics[unique_id]
|
||||
if metric.name == metric_name:
|
||||
# Need to find everything that referenced this metric and schedule for parsing
|
||||
if unique_id in self.saved_manifest.child_map:
|
||||
self.schedule_nodes_for_parsing(self.saved_manifest.child_map[unique_id])
|
||||
self.deleted_manifest.metrics[unique_id] = self.saved_manifest.metrics.pop(
|
||||
unique_id
|
||||
)
|
||||
schema_file.metrics.remove(unique_id)
|
||||
fire_event(PartialParsingDeletedMetric(id=unique_id))
|
||||
elif unique_id in self.saved_manifest.disabled:
|
||||
self.delete_disabled(unique_id, schema_file.file_id)
|
||||
|
||||
def get_schema_element(self, elem_list, elem_name):
|
||||
for element in elem_list:
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
import pathspec # type: ignore
|
||||
import pathlib
|
||||
from dbt.clients.system import load_file_contents
|
||||
from dbt.contracts.files import (
|
||||
@@ -107,9 +109,9 @@ def load_seed_source_file(match: FilePath, project_name) -> SourceFile:
|
||||
|
||||
# Use the FilesystemSearcher to get a bunch of FilePaths, then turn
|
||||
# them into a bunch of FileSource objects
|
||||
def get_source_files(project, paths, extension, parse_file_type, saved_files):
|
||||
def get_source_files(project, paths, extension, parse_file_type, saved_files, ignore_spec):
|
||||
# file path list
|
||||
fp_list = filesystem_search(project, paths, extension)
|
||||
fp_list = filesystem_search(project, paths, extension, ignore_spec)
|
||||
# file block list
|
||||
fb_list = []
|
||||
for fp in fp_list:
|
||||
@@ -129,42 +131,84 @@ def get_source_files(project, paths, extension, parse_file_type, saved_files):
|
||||
return fb_list
|
||||
|
||||
|
||||
def read_files_for_parser(project, files, dirs, extensions, parse_ft, saved_files):
|
||||
def read_files_for_parser(project, files, dirs, extensions, parse_ft, saved_files, ignore_spec):
|
||||
parser_files = []
|
||||
for extension in extensions:
|
||||
source_files = get_source_files(project, dirs, extension, parse_ft, saved_files)
|
||||
source_files = get_source_files(
|
||||
project, dirs, extension, parse_ft, saved_files, ignore_spec
|
||||
)
|
||||
for sf in source_files:
|
||||
files[sf.file_id] = sf
|
||||
parser_files.append(sf.file_id)
|
||||
return parser_files
|
||||
|
||||
|
||||
def generate_dbt_ignore_spec(project_root):
|
||||
ignore_file_path = os.path.join(project_root, ".dbtignore")
|
||||
|
||||
ignore_spec = None
|
||||
if os.path.exists(ignore_file_path):
|
||||
with open(ignore_file_path) as f:
|
||||
ignore_spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, f)
|
||||
return ignore_spec
|
||||
|
||||
|
||||
# This needs to read files for multiple projects, so the 'files'
|
||||
# dictionary needs to be passed in. What determines the order of
|
||||
# the various projects? Is the root project always last? Do the
|
||||
# non-root projects need to be done separately in order?
|
||||
def read_files(project, files, parser_files, saved_files):
|
||||
|
||||
dbt_ignore_spec = generate_dbt_ignore_spec(project.project_root)
|
||||
project_files = {}
|
||||
|
||||
project_files["MacroParser"] = read_files_for_parser(
|
||||
project, files, project.macro_paths, [".sql"], ParseFileType.Macro, saved_files
|
||||
project,
|
||||
files,
|
||||
project.macro_paths,
|
||||
[".sql"],
|
||||
ParseFileType.Macro,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["ModelParser"] = read_files_for_parser(
|
||||
project, files, project.model_paths, [".sql", ".py"], ParseFileType.Model, saved_files
|
||||
project,
|
||||
files,
|
||||
project.model_paths,
|
||||
[".sql", ".py"],
|
||||
ParseFileType.Model,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SnapshotParser"] = read_files_for_parser(
|
||||
project, files, project.snapshot_paths, [".sql"], ParseFileType.Snapshot, saved_files
|
||||
project,
|
||||
files,
|
||||
project.snapshot_paths,
|
||||
[".sql"],
|
||||
ParseFileType.Snapshot,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["AnalysisParser"] = read_files_for_parser(
|
||||
project, files, project.analysis_paths, [".sql"], ParseFileType.Analysis, saved_files
|
||||
project,
|
||||
files,
|
||||
project.analysis_paths,
|
||||
[".sql"],
|
||||
ParseFileType.Analysis,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SingularTestParser"] = read_files_for_parser(
|
||||
project, files, project.test_paths, [".sql"], ParseFileType.SingularTest, saved_files
|
||||
project,
|
||||
files,
|
||||
project.test_paths,
|
||||
[".sql"],
|
||||
ParseFileType.SingularTest,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
# all generic tests within /tests must be nested under a /generic subfolder
|
||||
@@ -175,14 +219,27 @@ def read_files(project, files, parser_files, saved_files):
|
||||
[".sql"],
|
||||
ParseFileType.GenericTest,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SeedParser"] = read_files_for_parser(
|
||||
project, files, project.seed_paths, [".csv"], ParseFileType.Seed, saved_files
|
||||
project,
|
||||
files,
|
||||
project.seed_paths,
|
||||
[".csv"],
|
||||
ParseFileType.Seed,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["DocumentationParser"] = read_files_for_parser(
|
||||
project, files, project.docs_paths, [".md"], ParseFileType.Documentation, saved_files
|
||||
project,
|
||||
files,
|
||||
project.docs_paths,
|
||||
[".md"],
|
||||
ParseFileType.Documentation,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
project_files["SchemaParser"] = read_files_for_parser(
|
||||
@@ -192,6 +249,7 @@ def read_files(project, files, parser_files, saved_files):
|
||||
[".yml", ".yaml"],
|
||||
ParseFileType.Schema,
|
||||
saved_files,
|
||||
dbt_ignore_spec,
|
||||
)
|
||||
|
||||
# Store the parser files for this particular project
|
||||
|
||||
@@ -67,7 +67,8 @@ class SchemaYamlRenderer(BaseRenderer):
|
||||
elif self._is_norender_key(keypath[0:]):
|
||||
return False
|
||||
elif self.key == "metrics":
|
||||
if keypath[0] == "expression":
|
||||
# back compat: "expression" is new name, "sql" is old name
|
||||
if keypath[0] in ("expression", "sql"):
|
||||
return False
|
||||
elif self._is_norender_key(keypath[0:]):
|
||||
return False
|
||||
|
||||
@@ -1010,6 +1010,7 @@ class ExposureParser(YamlReader):
|
||||
meta=unparsed.meta,
|
||||
tags=unparsed.tags,
|
||||
description=unparsed.description,
|
||||
label=unparsed.label,
|
||||
owner=unparsed.owner,
|
||||
maturity=unparsed.maturity,
|
||||
config=config,
|
||||
@@ -1028,7 +1029,7 @@ class ExposureParser(YamlReader):
|
||||
if parsed.config.enabled:
|
||||
self.manifest.add_exposure(self.yaml.file, parsed)
|
||||
else:
|
||||
self.manifest.add_disabled_nofile(parsed)
|
||||
self.manifest.add_disabled(self.yaml.file, parsed)
|
||||
|
||||
def _generate_exposure_config(
|
||||
self, target: UnparsedExposure, fqn: List[str], package_name: str, rendered: bool
|
||||
@@ -1061,6 +1062,7 @@ class ExposureParser(YamlReader):
|
||||
except (ValidationError, JSONValidationException) as exc:
|
||||
msg = error_context(self.yaml.path, self.key, data, exc)
|
||||
raise ParsingException(msg) from exc
|
||||
|
||||
self.parse_exposure(unparsed)
|
||||
|
||||
|
||||
@@ -1142,7 +1144,7 @@ class MetricParser(YamlReader):
|
||||
if parsed.config.enabled:
|
||||
self.manifest.add_metric(self.yaml.file, parsed)
|
||||
else:
|
||||
self.manifest.add_disabled_nofile(parsed)
|
||||
self.manifest.add_disabled(self.yaml.file, parsed)
|
||||
|
||||
def _generate_metric_config(
|
||||
self, target: UnparsedMetric, fqn: List[str], package_name: str, rendered: bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Callable, Iterable, Set, Union, Iterator, TypeVar, Generic
|
||||
from typing import List, Callable, Iterable, Set, Union, Iterator, TypeVar, Generic, Optional
|
||||
from pathspec import PathSpec # type: ignore
|
||||
|
||||
from dbt.clients.jinja import extract_toplevel_blocks, BlockTag
|
||||
from dbt.clients.system import find_matching
|
||||
@@ -61,11 +62,16 @@ class FullBlock(FileBlock):
|
||||
return self.block.full_block
|
||||
|
||||
|
||||
def filesystem_search(project: Project, relative_dirs: List[str], extension: str):
|
||||
def filesystem_search(
|
||||
project: Project,
|
||||
relative_dirs: List[str],
|
||||
extension: str,
|
||||
ignore_spec: Optional[PathSpec] = None,
|
||||
):
|
||||
ext = "[!.#~]*" + extension
|
||||
root = project.project_root
|
||||
file_path_list = []
|
||||
for result in find_matching(root, relative_dirs, ext):
|
||||
for result in find_matching(root, relative_dirs, ext, ignore_spec):
|
||||
if "searched_path" not in result or "relative_path" not in result:
|
||||
raise InternalException("Invalid result from find_matching: {}".format(result))
|
||||
file_match = FilePath(
|
||||
|
||||
@@ -63,7 +63,7 @@ class SourcePatcher:
|
||||
self.sources[unpatched.unique_id] = unpatched
|
||||
continue
|
||||
# returns None if there is no patch
|
||||
patch = self.get_patch_for(unpatched) # type: ignore[unreachable] # CT-564 / GH 5169
|
||||
patch = self.get_patch_for(unpatched)
|
||||
|
||||
# returns unpatched if there is no patch
|
||||
patched = self.patch_source(unpatched, patch)
|
||||
@@ -213,8 +213,8 @@ class SourcePatcher:
|
||||
self,
|
||||
unpatched: UnpatchedSourceDefinition,
|
||||
) -> Optional[SourcePatch]:
|
||||
if isinstance(unpatched, ParsedSourceDefinition): # type: ignore[unreachable] # CT-564 / GH 5169
|
||||
return None # type: ignore[unreachable] # CT-564 / GH 5169
|
||||
if isinstance(unpatched, ParsedSourceDefinition):
|
||||
return None
|
||||
key = (unpatched.package_name, unpatched.source.name)
|
||||
patch: Optional[SourcePatch] = self.manifest.source_patches.get(key)
|
||||
if patch is None:
|
||||
|
||||
@@ -43,7 +43,7 @@ class SqlBlockParser(SimpleSQLParser[ParsedSqlNode]):
|
||||
return os.path.join("sql", block.name)
|
||||
|
||||
def parse_remote(self, sql: str, name: str) -> ParsedSqlNode:
|
||||
source_file = SourceFile.remote(sql, self.project.project_name)
|
||||
source_file = SourceFile.remote(sql, self.project.project_name, "sql")
|
||||
contents = SqlBlock(block_name=name, file=source_file)
|
||||
return self.parse_node(contents)
|
||||
|
||||
|
||||
@@ -108,13 +108,13 @@ class BaseTask(metaclass=ABCMeta):
|
||||
config = cls.ConfigType.from_args(args)
|
||||
except dbt.exceptions.DbtProjectError as exc:
|
||||
fire_event(DbtProjectError())
|
||||
fire_event(DbtProjectErrorException(exc=exc))
|
||||
fire_event(DbtProjectErrorException(exc=str(exc)))
|
||||
|
||||
tracking.track_invalid_invocation(args=args, result_type=exc.result_type)
|
||||
raise dbt.exceptions.RuntimeException("Could not run dbt") from exc
|
||||
except dbt.exceptions.DbtProfileError as exc:
|
||||
fire_event(DbtProfileError())
|
||||
fire_event(DbtProfileErrorException(exc=exc))
|
||||
fire_event(DbtProfileErrorException(exc=str(exc)))
|
||||
|
||||
all_profiles = read_profiles(flags.PROFILES_DIR).keys()
|
||||
|
||||
@@ -170,6 +170,7 @@ def get_nearest_project_dir(args):
|
||||
def move_to_nearest_project_dir(args):
|
||||
nearest_project_dir = get_nearest_project_dir(args)
|
||||
os.chdir(nearest_project_dir)
|
||||
return nearest_project_dir
|
||||
|
||||
|
||||
class ConfiguredTask(BaseTask):
|
||||
@@ -345,11 +346,11 @@ class BaseRunner(metaclass=ABCMeta):
|
||||
if e.node is None:
|
||||
e.add_node(ctx.node)
|
||||
|
||||
fire_event(CatchableExceptionOnRun(exc=e))
|
||||
fire_event(CatchableExceptionOnRun(exc=str(e)))
|
||||
return str(e)
|
||||
|
||||
def _handle_internal_exception(self, e, ctx):
|
||||
fire_event(InternalExceptionOnRun(build_path=self.node.build_path, exc=e))
|
||||
fire_event(InternalExceptionOnRun(build_path=self.node.build_path, exc=str(e)))
|
||||
return str(e)
|
||||
|
||||
def _handle_generic_exception(self, e, ctx):
|
||||
@@ -357,7 +358,7 @@ class BaseRunner(metaclass=ABCMeta):
|
||||
GenericExceptionOnRun(
|
||||
build_path=self.node.build_path,
|
||||
unique_id=self.node.unique_id,
|
||||
exc=str(e), # TODO: unstring this when serialization is fixed
|
||||
exc=str(e),
|
||||
)
|
||||
)
|
||||
fire_event(PrintDebugStackTrace())
|
||||
@@ -413,7 +414,7 @@ class BaseRunner(metaclass=ABCMeta):
|
||||
try:
|
||||
self.adapter.release_connection()
|
||||
except Exception as exc:
|
||||
fire_event(NodeConnectionReleaseError(node_name=self.node.name, exc=exc))
|
||||
fire_event(NodeConnectionReleaseError(node_name=self.node.name, exc=str(exc)))
|
||||
return str(exc)
|
||||
|
||||
return None
|
||||
|
||||
@@ -21,10 +21,6 @@ from dbt.version import get_installed_version
|
||||
|
||||
from dbt.task.base import BaseTask, get_nearest_project_dir
|
||||
|
||||
PROFILE_DIR_MESSAGE = """To view your profiles.yml file, run:
|
||||
|
||||
{open_cmd} {profiles_dir}"""
|
||||
|
||||
ONLY_PROFILE_MESSAGE = """
|
||||
A `dbt_project.yml` file was not found in this directory.
|
||||
Using the only profile `{}`.
|
||||
|
||||
@@ -278,7 +278,9 @@ class ModelRunner(CompileRunner):
|
||||
|
||||
hook_ctx = self.adapter.pre_model_hook(context_config)
|
||||
try:
|
||||
result = MacroGenerator(materialization_macro, context)()
|
||||
result = MacroGenerator(
|
||||
materialization_macro, context, stack=context["context_macro_stack"]
|
||||
)()
|
||||
finally:
|
||||
self.adapter.post_model_hook(context_config, hook_ctx)
|
||||
|
||||
|
||||
@@ -55,11 +55,11 @@ class RunOperationTask(ManifestTask):
|
||||
try:
|
||||
self._run_unsafe()
|
||||
except dbt.exceptions.Exception as exc:
|
||||
fire_event(RunningOperationCaughtError(exc=exc))
|
||||
fire_event(RunningOperationCaughtError(exc=str(exc)))
|
||||
fire_event(PrintDebugStackTrace())
|
||||
success = False
|
||||
except Exception as exc:
|
||||
fire_event(RunningOperationUncaughtError(exc=exc))
|
||||
fire_event(RunningOperationUncaughtError(exc=str(exc)))
|
||||
fire_event(PrintDebugStackTrace())
|
||||
success = False
|
||||
else:
|
||||
|
||||
@@ -22,7 +22,7 @@ class GenericSqlRunner(CompileRunner, Generic[SQLResult]):
|
||||
CompileRunner.__init__(self, config, adapter, node, node_index, num_nodes)
|
||||
|
||||
def handle_exception(self, e, ctx):
|
||||
fire_event(SQlRunnerException(exc=e))
|
||||
fire_event(SQlRunnerException(exc=str(e)))
|
||||
if isinstance(e, dbt.exceptions.Exception):
|
||||
if isinstance(e, dbt.exceptions.RuntimeException):
|
||||
e.add_node(ctx.node)
|
||||
|
||||
@@ -6,6 +6,7 @@ import warnings
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
from contextlib import contextmanager
|
||||
from dbt.adapters.factory import Adapter
|
||||
|
||||
from dbt.main import handle_and_check
|
||||
from dbt.logger import log_manager
|
||||
@@ -215,6 +216,7 @@ class TestProcessingException(Exception):
|
||||
def run_sql_with_adapter(adapter, sql, fetch=None):
|
||||
if sql.strip() == "":
|
||||
return
|
||||
|
||||
# substitute schema and database in sql
|
||||
kwargs = {
|
||||
"schema": adapter.config.credentials.schema,
|
||||
@@ -308,7 +310,7 @@ def check_relation_types(adapter, relation_to_type):
|
||||
# by doing a separate call for each set of tables/relations.
|
||||
# Wraps check_relations_equal_with_relations by creating relations
|
||||
# from the list of names passed in.
|
||||
def check_relations_equal(adapter, relation_names, compare_snapshot_cols=False):
|
||||
def check_relations_equal(adapter, relation_names: List, compare_snapshot_cols=False):
|
||||
if len(relation_names) < 2:
|
||||
raise TestProcessingException(
|
||||
"Not enough relations to compare",
|
||||
@@ -325,7 +327,9 @@ def check_relations_equal(adapter, relation_names, compare_snapshot_cols=False):
|
||||
# adapter.get_columns_in_relation
|
||||
# adapter.get_rows_different_sql
|
||||
# adapter.execute
|
||||
def check_relations_equal_with_relations(adapter, relations, compare_snapshot_cols=False):
|
||||
def check_relations_equal_with_relations(
|
||||
adapter: Adapter, relations: List, compare_snapshot_cols=False
|
||||
):
|
||||
|
||||
with get_connection(adapter):
|
||||
basis, compares = relations[0], relations[1:]
|
||||
@@ -334,12 +338,12 @@ def check_relations_equal_with_relations(adapter, relations, compare_snapshot_co
|
||||
# (unless comparing "dbt_" snapshot columns is explicitly enabled)
|
||||
column_names = [
|
||||
c.name
|
||||
for c in adapter.get_columns_in_relation(basis)
|
||||
for c in adapter.get_columns_in_relation(basis) # type: ignore
|
||||
if not c.name.lower().startswith("dbt_") or compare_snapshot_cols
|
||||
]
|
||||
|
||||
for relation in compares:
|
||||
sql = adapter.get_rows_different_sql(basis, relation, column_names=column_names)
|
||||
sql = adapter.get_rows_different_sql(basis, relation, column_names=column_names) # type: ignore
|
||||
_, tbl = adapter.execute(sql, fetch=True)
|
||||
num_rows = len(tbl)
|
||||
assert (
|
||||
|
||||
@@ -619,7 +619,7 @@ def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0):
|
||||
ReadError,
|
||||
) as exc:
|
||||
if attempt <= max_attempts - 1:
|
||||
fire_event(RecordRetryException(exc=exc))
|
||||
fire_event(RecordRetryException(exc=str(exc)))
|
||||
fire_event(RetryExternalCall(attempt=attempt, max=max_attempts))
|
||||
time.sleep(1)
|
||||
return _connection_exception_retry(fn, max_attempts, attempt + 1)
|
||||
|
||||
@@ -62,6 +62,7 @@ setup(
|
||||
"dbt-extractor~=0.4.1",
|
||||
"typing-extensions>=3.7.4",
|
||||
"werkzeug>=1,<3",
|
||||
"pathspec~=0.9.0",
|
||||
# the following are all to match snowflake-connector-python
|
||||
"requests<3.0.0",
|
||||
"idna>=2.5,<4",
|
||||
|
||||
@@ -244,7 +244,7 @@
|
||||
"generated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"default": "2022-09-12T20:41:53.793012Z"
|
||||
"default": "2022-09-14T20:35:15.346636Z"
|
||||
},
|
||||
"invocation_id": {
|
||||
"oneOf": [
|
||||
@@ -255,7 +255,7 @@
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": "3e1f83a7-c7fd-44ed-bb2f-5e3fc9aee730"
|
||||
"default": "c59a8269-533c-4b78-a709-5094045afd4d"
|
||||
},
|
||||
"env": {
|
||||
"type": "object",
|
||||
@@ -523,7 +523,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.7976892
|
||||
"default": 1663187715.3517282
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -1074,7 +1074,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.800915
|
||||
"default": 1663187715.35441
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -1437,7 +1437,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.802708
|
||||
"default": 1663187715.356541
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -1688,7 +1688,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.8044102
|
||||
"default": 1663187715.3582149
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -1949,7 +1949,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.806144
|
||||
"default": 1663187715.359935
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -2200,7 +2200,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.807632
|
||||
"default": 1663187715.361423
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -2447,7 +2447,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.809396
|
||||
"default": 1663187715.363411
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -2746,7 +2746,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.8124218
|
||||
"default": 1663187715.366584
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -3162,7 +3162,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.814218
|
||||
"default": 1663187715.3682182
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -3409,7 +3409,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.816299
|
||||
"default": 1663187715.369675
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -3617,7 +3617,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.817742
|
||||
"default": 1663187715.370925
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -3833,7 +3833,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.819065
|
||||
"default": 1663187715.372257
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -4059,7 +4059,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.820469
|
||||
"default": 1663187715.373705
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -4275,7 +4275,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.821846
|
||||
"default": 1663187715.375131
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -4491,7 +4491,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.823106
|
||||
"default": 1663187715.376405
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -4703,7 +4703,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.8250082
|
||||
"default": 1663187715.3784761
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -4940,7 +4940,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.826428
|
||||
"default": 1663187715.380325
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -5133,7 +5133,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.8290439
|
||||
"default": 1663187715.3832462
|
||||
},
|
||||
"config_call_dict": {
|
||||
"type": "object",
|
||||
@@ -5518,7 +5518,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.831739
|
||||
"default": 1663187715.3854342
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -5633,7 +5633,7 @@
|
||||
"generated_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"default": "2022-09-12T20:41:53.788122Z"
|
||||
"default": "2022-09-14T20:35:15.341700Z"
|
||||
},
|
||||
"invocation_id": {
|
||||
"oneOf": [
|
||||
@@ -5644,7 +5644,7 @@
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": "3e1f83a7-c7fd-44ed-bb2f-5e3fc9aee730"
|
||||
"default": "c59a8269-533c-4b78-a709-5094045afd4d"
|
||||
},
|
||||
"env": {
|
||||
"type": "object",
|
||||
@@ -6002,7 +6002,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.832941
|
||||
"default": 1663187715.386226
|
||||
},
|
||||
"supported_languages": {
|
||||
"oneOf": [
|
||||
@@ -6178,6 +6178,16 @@
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"label": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"maturity": {
|
||||
"oneOf": [
|
||||
{
|
||||
@@ -6253,11 +6263,11 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.834493
|
||||
"default": 1663187715.3878772
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, root_path: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = <NodeType.Exposure: 'exposure'>, description: str = '', maturity: Union[dbt.contracts.graph.unparsed.MaturityType, NoneType] = None, meta: Dict[str, Any] = <factory>, tags: List[str] = <factory>, config: dbt.contracts.graph.model_config.ExposureConfig = <factory>, unrendered_config: Dict[str, Any] = <factory>, url: Union[str, NoneType] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = <factory>, refs: List[List[str]] = <factory>, sources: List[List[str]] = <factory>, created_at: float = <factory>)"
|
||||
"description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, root_path: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = <NodeType.Exposure: 'exposure'>, description: str = '', label: Union[str, NoneType] = None, maturity: Union[dbt.contracts.graph.unparsed.MaturityType, NoneType] = None, meta: Dict[str, Any] = <factory>, tags: List[str] = <factory>, config: dbt.contracts.graph.model_config.ExposureConfig = <factory>, unrendered_config: Dict[str, Any] = <factory>, url: Union[str, NoneType] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = <factory>, refs: List[List[str]] = <factory>, sources: List[List[str]] = <factory>, created_at: float = <factory>)"
|
||||
},
|
||||
"ExposureOwner": {
|
||||
"type": "object",
|
||||
@@ -6486,7 +6496,7 @@
|
||||
},
|
||||
"created_at": {
|
||||
"type": "number",
|
||||
"default": 1663015313.836148
|
||||
"default": 1663187715.38939
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
{{
|
||||
config(
|
||||
materialized = "table"
|
||||
)
|
||||
}}
|
||||
|
||||
select * from {{ref('view_model')}}
|
||||
@@ -1,7 +0,0 @@
|
||||
{{
|
||||
config(
|
||||
materialized = "table"
|
||||
)
|
||||
}}
|
||||
|
||||
select a_field_that_does_not_exist from {{ this.schema }}.seed
|
||||
@@ -1 +0,0 @@
|
||||
select * from {{ref('invalid')}}
|
||||
@@ -1,7 +0,0 @@
|
||||
{{
|
||||
config(
|
||||
materialized = "table"
|
||||
)
|
||||
}}
|
||||
|
||||
select * from {{ this.schema }}.seed
|
||||
@@ -1,7 +0,0 @@
|
||||
{{
|
||||
config(
|
||||
materialized = "table"
|
||||
)
|
||||
}}
|
||||
|
||||
select * from {{ this.schema }}.seed
|
||||
@@ -1,7 +0,0 @@
|
||||
{{
|
||||
config(
|
||||
materialized = "view"
|
||||
)
|
||||
}}
|
||||
|
||||
select * from {{ this.schema }}.seed
|
||||
@@ -1,5 +0,0 @@
|
||||
select * from {{ref('table_a')}}
|
||||
|
||||
union all
|
||||
|
||||
select * from {{ref('table_b')}}
|
||||
@@ -1,111 +0,0 @@
|
||||
create table {schema}.seed (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
first_name VARCHAR(50),
|
||||
last_name VARCHAR(50),
|
||||
email VARCHAR(50),
|
||||
gender VARCHAR(50),
|
||||
ip_address VARCHAR(20)
|
||||
);
|
||||
|
||||
|
||||
insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values
|
||||
('Jack', 'Hunter', 'jhunter0@pbs.org', 'Male', '59.80.20.168'),
|
||||
('Kathryn', 'Walker', 'kwalker1@ezinearticles.com', 'Female', '194.121.179.35'),
|
||||
('Gerald', 'Ryan', 'gryan2@com.com', 'Male', '11.3.212.243'),
|
||||
('Bonnie', 'Spencer', 'bspencer3@ameblo.jp', 'Female', '216.32.196.175'),
|
||||
('Harold', 'Taylor', 'htaylor4@people.com.cn', 'Male', '253.10.246.136'),
|
||||
('Jacqueline', 'Griffin', 'jgriffin5@t.co', 'Female', '16.13.192.220'),
|
||||
('Wanda', 'Arnold', 'warnold6@google.nl', 'Female', '232.116.150.64'),
|
||||
('Craig', 'Ortiz', 'cortiz7@sciencedaily.com', 'Male', '199.126.106.13'),
|
||||
('Gary', 'Day', 'gday8@nih.gov', 'Male', '35.81.68.186'),
|
||||
('Rose', 'Wright', 'rwright9@yahoo.co.jp', 'Female', '236.82.178.100'),
|
||||
('Raymond', 'Kelley', 'rkelleya@fc2.com', 'Male', '213.65.166.67'),
|
||||
('Gerald', 'Robinson', 'grobinsonb@disqus.com', 'Male', '72.232.194.193'),
|
||||
('Mildred', 'Martinez', 'mmartinezc@samsung.com', 'Female', '198.29.112.5'),
|
||||
('Dennis', 'Arnold', 'darnoldd@google.com', 'Male', '86.96.3.250'),
|
||||
('Judy', 'Gray', 'jgraye@opensource.org', 'Female', '79.218.162.245'),
|
||||
('Theresa', 'Garza', 'tgarzaf@epa.gov', 'Female', '21.59.100.54'),
|
||||
('Gerald', 'Robertson', 'grobertsong@csmonitor.com', 'Male', '131.134.82.96'),
|
||||
('Philip', 'Hernandez', 'phernandezh@adobe.com', 'Male', '254.196.137.72'),
|
||||
('Julia', 'Gonzalez', 'jgonzalezi@cam.ac.uk', 'Female', '84.240.227.174'),
|
||||
('Andrew', 'Davis', 'adavisj@patch.com', 'Male', '9.255.67.25'),
|
||||
('Kimberly', 'Harper', 'kharperk@foxnews.com', 'Female', '198.208.120.253'),
|
||||
('Mark', 'Martin', 'mmartinl@marketwatch.com', 'Male', '233.138.182.153'),
|
||||
('Cynthia', 'Ruiz', 'cruizm@google.fr', 'Female', '18.178.187.201'),
|
||||
('Samuel', 'Carroll', 'scarrolln@youtu.be', 'Male', '128.113.96.122'),
|
||||
('Jennifer', 'Larson', 'jlarsono@vinaora.com', 'Female', '98.234.85.95'),
|
||||
('Ashley', 'Perry', 'aperryp@rakuten.co.jp', 'Female', '247.173.114.52'),
|
||||
('Howard', 'Rodriguez', 'hrodriguezq@shutterfly.com', 'Male', '231.188.95.26'),
|
||||
('Amy', 'Brooks', 'abrooksr@theatlantic.com', 'Female', '141.199.174.118'),
|
||||
('Louise', 'Warren', 'lwarrens@adobe.com', 'Female', '96.105.158.28'),
|
||||
('Tina', 'Watson', 'twatsont@myspace.com', 'Female', '251.142.118.177'),
|
||||
('Janice', 'Kelley', 'jkelleyu@creativecommons.org', 'Female', '239.167.34.233'),
|
||||
('Terry', 'Mccoy', 'tmccoyv@bravesites.com', 'Male', '117.201.183.203'),
|
||||
('Jeffrey', 'Morgan', 'jmorganw@surveymonkey.com', 'Male', '78.101.78.149'),
|
||||
('Louis', 'Harvey', 'lharveyx@sina.com.cn', 'Male', '51.50.0.167'),
|
||||
('Philip', 'Miller', 'pmillery@samsung.com', 'Male', '103.255.222.110'),
|
||||
('Willie', 'Marshall', 'wmarshallz@ow.ly', 'Male', '149.219.91.68'),
|
||||
('Patrick', 'Lopez', 'plopez10@redcross.org', 'Male', '250.136.229.89'),
|
||||
('Adam', 'Jenkins', 'ajenkins11@harvard.edu', 'Male', '7.36.112.81'),
|
||||
('Benjamin', 'Cruz', 'bcruz12@linkedin.com', 'Male', '32.38.98.15'),
|
||||
('Ruby', 'Hawkins', 'rhawkins13@gmpg.org', 'Female', '135.171.129.255'),
|
||||
('Carlos', 'Barnes', 'cbarnes14@a8.net', 'Male', '240.197.85.140'),
|
||||
('Ruby', 'Griffin', 'rgriffin15@bravesites.com', 'Female', '19.29.135.24'),
|
||||
('Sean', 'Mason', 'smason16@icq.com', 'Male', '159.219.155.249'),
|
||||
('Anthony', 'Payne', 'apayne17@utexas.edu', 'Male', '235.168.199.218'),
|
||||
('Steve', 'Cruz', 'scruz18@pcworld.com', 'Male', '238.201.81.198'),
|
||||
('Anthony', 'Garcia', 'agarcia19@flavors.me', 'Male', '25.85.10.18'),
|
||||
('Doris', 'Lopez', 'dlopez1a@sphinn.com', 'Female', '245.218.51.238'),
|
||||
('Susan', 'Nichols', 'snichols1b@freewebs.com', 'Female', '199.99.9.61'),
|
||||
('Wanda', 'Ferguson', 'wferguson1c@yahoo.co.jp', 'Female', '236.241.135.21'),
|
||||
('Andrea', 'Pierce', 'apierce1d@google.co.uk', 'Female', '132.40.10.209'),
|
||||
('Lawrence', 'Phillips', 'lphillips1e@jugem.jp', 'Male', '72.226.82.87'),
|
||||
('Judy', 'Gilbert', 'jgilbert1f@multiply.com', 'Female', '196.250.15.142'),
|
||||
('Eric', 'Williams', 'ewilliams1g@joomla.org', 'Male', '222.202.73.126'),
|
||||
('Ralph', 'Romero', 'rromero1h@sogou.com', 'Male', '123.184.125.212'),
|
||||
('Jean', 'Wilson', 'jwilson1i@ocn.ne.jp', 'Female', '176.106.32.194'),
|
||||
('Lori', 'Reynolds', 'lreynolds1j@illinois.edu', 'Female', '114.181.203.22'),
|
||||
('Donald', 'Moreno', 'dmoreno1k@bbc.co.uk', 'Male', '233.249.97.60'),
|
||||
('Steven', 'Berry', 'sberry1l@eepurl.com', 'Male', '186.193.50.50'),
|
||||
('Theresa', 'Shaw', 'tshaw1m@people.com.cn', 'Female', '120.37.71.222'),
|
||||
('John', 'Stephens', 'jstephens1n@nationalgeographic.com', 'Male', '191.87.127.115'),
|
||||
('Richard', 'Jacobs', 'rjacobs1o@state.tx.us', 'Male', '66.210.83.155'),
|
||||
('Andrew', 'Lawson', 'alawson1p@over-blog.com', 'Male', '54.98.36.94'),
|
||||
('Peter', 'Morgan', 'pmorgan1q@rambler.ru', 'Male', '14.77.29.106'),
|
||||
('Nicole', 'Garrett', 'ngarrett1r@zimbio.com', 'Female', '21.127.74.68'),
|
||||
('Joshua', 'Kim', 'jkim1s@edublogs.org', 'Male', '57.255.207.41'),
|
||||
('Ralph', 'Roberts', 'rroberts1t@people.com.cn', 'Male', '222.143.131.109'),
|
||||
('George', 'Montgomery', 'gmontgomery1u@smugmug.com', 'Male', '76.75.111.77'),
|
||||
('Gerald', 'Alvarez', 'galvarez1v@flavors.me', 'Male', '58.157.186.194'),
|
||||
('Donald', 'Olson', 'dolson1w@whitehouse.gov', 'Male', '69.65.74.135'),
|
||||
('Carlos', 'Morgan', 'cmorgan1x@pbs.org', 'Male', '96.20.140.87'),
|
||||
('Aaron', 'Stanley', 'astanley1y@webnode.com', 'Male', '163.119.217.44'),
|
||||
('Virginia', 'Long', 'vlong1z@spiegel.de', 'Female', '204.150.194.182'),
|
||||
('Robert', 'Berry', 'rberry20@tripadvisor.com', 'Male', '104.19.48.241'),
|
||||
('Antonio', 'Brooks', 'abrooks21@unesco.org', 'Male', '210.31.7.24'),
|
||||
('Ruby', 'Garcia', 'rgarcia22@ovh.net', 'Female', '233.218.162.214'),
|
||||
('Jack', 'Hanson', 'jhanson23@blogtalkradio.com', 'Male', '31.55.46.199'),
|
||||
('Kathryn', 'Nelson', 'knelson24@walmart.com', 'Female', '14.189.146.41'),
|
||||
('Jason', 'Reed', 'jreed25@printfriendly.com', 'Male', '141.189.89.255'),
|
||||
('George', 'Coleman', 'gcoleman26@people.com.cn', 'Male', '81.189.221.144'),
|
||||
('Rose', 'King', 'rking27@ucoz.com', 'Female', '212.123.168.231'),
|
||||
('Johnny', 'Holmes', 'jholmes28@boston.com', 'Male', '177.3.93.188'),
|
||||
('Katherine', 'Gilbert', 'kgilbert29@altervista.org', 'Female', '199.215.169.61'),
|
||||
('Joshua', 'Thomas', 'jthomas2a@ustream.tv', 'Male', '0.8.205.30'),
|
||||
('Julie', 'Perry', 'jperry2b@opensource.org', 'Female', '60.116.114.192'),
|
||||
('Richard', 'Perry', 'rperry2c@oracle.com', 'Male', '181.125.70.232'),
|
||||
('Kenneth', 'Ruiz', 'kruiz2d@wikimedia.org', 'Male', '189.105.137.109'),
|
||||
('Jose', 'Morgan', 'jmorgan2e@webnode.com', 'Male', '101.134.215.156'),
|
||||
('Donald', 'Campbell', 'dcampbell2f@goo.ne.jp', 'Male', '102.120.215.84'),
|
||||
('Debra', 'Collins', 'dcollins2g@uol.com.br', 'Female', '90.13.153.235'),
|
||||
('Jesse', 'Johnson', 'jjohnson2h@stumbleupon.com', 'Male', '225.178.125.53'),
|
||||
('Elizabeth', 'Stone', 'estone2i@histats.com', 'Female', '123.184.126.221'),
|
||||
('Angela', 'Rogers', 'arogers2j@goodreads.com', 'Female', '98.104.132.187'),
|
||||
('Emily', 'Dixon', 'edixon2k@mlb.com', 'Female', '39.190.75.57'),
|
||||
('Albert', 'Scott', 'ascott2l@tinypic.com', 'Male', '40.209.13.189'),
|
||||
('Barbara', 'Peterson', 'bpeterson2m@ow.ly', 'Female', '75.249.136.180'),
|
||||
('Adam', 'Greene', 'agreene2n@fastcompany.com', 'Male', '184.173.109.144'),
|
||||
('Earl', 'Sanders', 'esanders2o@hc360.com', 'Male', '247.34.90.117'),
|
||||
('Angela', 'Brooks', 'abrooks2p@mtv.com', 'Female', '10.63.249.126'),
|
||||
('Harold', 'Foster', 'hfoster2q@privacy.gov.au', 'Male', '139.214.40.244'),
|
||||
('Carl', 'Meyer', 'cmeyer2r@disqus.com', 'Male', '204.117.7.88');
|
||||
@@ -1,39 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
|
||||
|
||||
class TestConcurrency(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return "concurrency_021"
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return "models"
|
||||
|
||||
@use_profile('postgres')
|
||||
def test__postgres__concurrency(self):
|
||||
self.run_sql_file("seed.sql")
|
||||
|
||||
results = self.run_dbt(expect_pass=False)
|
||||
self.assertEqual(len(results), 7)
|
||||
|
||||
self.assertTablesEqual("seed", "view_model")
|
||||
self.assertTablesEqual("seed", "dep")
|
||||
self.assertTablesEqual("seed", "table_a")
|
||||
self.assertTablesEqual("seed", "table_b")
|
||||
self.assertTableDoesNotExist("invalid")
|
||||
self.assertTableDoesNotExist("skip")
|
||||
|
||||
self.run_sql_file("update.sql")
|
||||
|
||||
results, output = self.run_dbt_and_capture(expect_pass=False)
|
||||
self.assertEqual(len(results), 7)
|
||||
|
||||
self.assertTablesEqual("seed", "view_model")
|
||||
self.assertTablesEqual("seed", "dep")
|
||||
self.assertTablesEqual("seed", "table_a")
|
||||
self.assertTablesEqual("seed", "table_b")
|
||||
self.assertTableDoesNotExist("invalid")
|
||||
self.assertTableDoesNotExist("skip")
|
||||
|
||||
self.assertIn('PASS=5 WARN=0 ERROR=1 SKIP=1 TOTAL=7', output)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user