mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-19 12:31:27 +00:00
Compare commits
8 Commits
jerco/upda
...
test-codec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8da2f01c5f | ||
|
|
0d645c227f | ||
|
|
1fbab197b2 | ||
|
|
fb6c349677 | ||
|
|
eeb057085c | ||
|
|
121371f4a4 | ||
|
|
a32713198b | ||
|
|
a1b067c683 |
@@ -1,6 +0,0 @@
|
||||
kind: Docs
|
||||
body: Fix for column tests not rendering on quoted columns
|
||||
time: 2023-05-31T11:54:19.687363-04:00
|
||||
custom:
|
||||
Author: drewbanin
|
||||
Issue: "201"
|
||||
6
.changes/unreleased/Docs-20230715-200907.yaml
Normal file
6
.changes/unreleased/Docs-20230715-200907.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Docs
|
||||
body: Corrected spelling of "Partiton"
|
||||
time: 2023-07-15T20:09:07.057361092+02:00
|
||||
custom:
|
||||
Author: pgoslatara
|
||||
Issue: "8100"
|
||||
6
.changes/unreleased/Fixes-20230720-122723.yaml
Normal file
6
.changes/unreleased/Fixes-20230720-122723.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Add status to Parse Inline Error
|
||||
time: 2023-07-20T12:27:23.085084-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "8173"
|
||||
6
.changes/unreleased/Under the Hood-20230719-124611.yaml
Normal file
6
.changes/unreleased/Under the Hood-20230719-124611.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Refactor flaky test pp_versioned_models
|
||||
time: 2023-07-19T12:46:11.972481-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "7781"
|
||||
6
.changes/unreleased/Under the Hood-20230719-163334.yaml
Normal file
6
.changes/unreleased/Under the Hood-20230719-163334.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: format exception from dbtPlugin.initialize
|
||||
time: 2023-07-19T16:33:34.586377-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "8152"
|
||||
@@ -13,7 +13,7 @@ from uuid import uuid4
|
||||
from dbt.events.format import timestamp_to_datetime_string
|
||||
|
||||
from dbt.events.base_types import BaseEvent, EventLevel, msg_from_base_event, EventMsg
|
||||
|
||||
import dbt.utils
|
||||
|
||||
# A Filter is a function which takes a BaseEvent and returns True if the event
|
||||
# should be logged, False otherwise.
|
||||
@@ -175,7 +175,7 @@ class _JsonLogger(_Logger):
|
||||
from dbt.events.functions import msg_to_dict
|
||||
|
||||
msg_dict = msg_to_dict(msg)
|
||||
raw_log_line = json.dumps(msg_dict, sort_keys=True)
|
||||
raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)
|
||||
line = self.scrubber(raw_log_line) # type: ignore
|
||||
return line
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ from typing import Callable, Dict, List, Optional, TextIO
|
||||
import uuid
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
|
||||
import dbt.utils
|
||||
|
||||
LOG_VERSION = 3
|
||||
metadata_vars: Optional[Dict[str, str]] = None
|
||||
@@ -200,7 +201,7 @@ def stop_capture_stdout_logs():
|
||||
# the message may contain secrets which must be scrubbed at the usage site.
|
||||
def msg_to_json(msg: EventMsg) -> str:
|
||||
msg_dict = msg_to_dict(msg)
|
||||
raw_log_line = json.dumps(msg_dict, sort_keys=True)
|
||||
raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)
|
||||
return raw_log_line
|
||||
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
{% set day_count = (end_date - start_date).days %}
|
||||
{% if day_count < 0 %}
|
||||
{% set msg -%}
|
||||
Partiton start date is after the end date ({{ start_date }}, {{ end_date }})
|
||||
Partition start date is after the end date ({{ start_date }}, {{ end_date }})
|
||||
{%- endset %}
|
||||
|
||||
{{ exceptions.raise_compiler_error(msg, model) }}
|
||||
|
||||
@@ -29,8 +29,11 @@ class dbtPlugin:
|
||||
self.project_name = project_name
|
||||
try:
|
||||
self.initialize()
|
||||
except DbtRuntimeError as e:
|
||||
# Remove the first line of DbtRuntimeError to avoid redundant "Runtime Error" line
|
||||
raise DbtRuntimeError("\n".join(str(e).split("\n")[1:]))
|
||||
except Exception as e:
|
||||
raise DbtRuntimeError(f"initialize: {e}")
|
||||
raise DbtRuntimeError(str(e))
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
|
||||
@@ -139,6 +139,7 @@ class CompileTask(GraphRunnableTask):
|
||||
"node_path": "sql/inline_query",
|
||||
"node_name": "inline_query",
|
||||
"unique_id": "sqloperation.test.inline_query",
|
||||
"node_status": "failed",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -16,8 +16,6 @@ import time
|
||||
from pathlib import PosixPath, WindowsPath
|
||||
|
||||
from contextlib import contextmanager
|
||||
from dbt.exceptions import ConnectionError, DuplicateAliasError
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import RetryExternalCall, RecordRetryException
|
||||
from dbt import flags
|
||||
from enum import Enum
|
||||
@@ -40,6 +38,7 @@ from typing import (
|
||||
Sequence,
|
||||
)
|
||||
|
||||
import dbt.events.functions
|
||||
import dbt.exceptions
|
||||
|
||||
DECIMALS: Tuple[Type[Any], ...]
|
||||
@@ -337,14 +336,17 @@ class JSONEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, DECIMALS):
|
||||
return float(obj)
|
||||
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
|
||||
elif isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
|
||||
return obj.isoformat()
|
||||
if isinstance(obj, jinja2.Undefined):
|
||||
elif isinstance(obj, jinja2.Undefined):
|
||||
return ""
|
||||
if hasattr(obj, "to_dict"):
|
||||
elif isinstance(obj, Exception):
|
||||
return repr(obj)
|
||||
elif hasattr(obj, "to_dict"):
|
||||
# if we have a to_dict we should try to serialize the result of
|
||||
# that!
|
||||
return obj.to_dict(omit_none=True)
|
||||
else:
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
@@ -369,7 +371,7 @@ class Translator:
|
||||
for key, value in kwargs.items():
|
||||
canonical_key = self.aliases.get(key, key)
|
||||
if canonical_key in result:
|
||||
raise DuplicateAliasError(kwargs, self.aliases, canonical_key)
|
||||
raise dbt.exceptions.DuplicateAliasError(kwargs, self.aliases, canonical_key)
|
||||
result[canonical_key] = self.translate_value(value)
|
||||
return result
|
||||
|
||||
@@ -389,9 +391,7 @@ class Translator:
|
||||
return self.translate_mapping(value)
|
||||
except RuntimeError as exc:
|
||||
if "maximum recursion depth exceeded" in str(exc):
|
||||
raise dbt.exceptions.RecursionError(
|
||||
"Cycle detected in a value passed to translate!"
|
||||
)
|
||||
raise RecursionError("Cycle detected in a value passed to translate!")
|
||||
raise
|
||||
|
||||
|
||||
@@ -603,12 +603,14 @@ def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0):
|
||||
ReadError,
|
||||
) as exc:
|
||||
if attempt <= max_attempts - 1:
|
||||
fire_event(RecordRetryException(exc=str(exc)))
|
||||
fire_event(RetryExternalCall(attempt=attempt, max=max_attempts))
|
||||
dbt.events.functions.fire_event(RecordRetryException(exc=str(exc)))
|
||||
dbt.events.functions.fire_event(RetryExternalCall(attempt=attempt, max=max_attempts))
|
||||
time.sleep(1)
|
||||
return _connection_exception_retry(fn, max_attempts, attempt + 1)
|
||||
else:
|
||||
raise ConnectionError("External connection exception occurred: " + str(exc))
|
||||
raise dbt.exceptions.ConnectionError(
|
||||
"External connection exception occurred: " + str(exc)
|
||||
)
|
||||
|
||||
|
||||
# This is used to serialize the args in the run_results and in the logs.
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -8,9 +8,6 @@ from tests.functional.partial_parsing.fixtures import (
|
||||
models_schema1_yml,
|
||||
models_schema2_yml,
|
||||
models_schema2b_yml,
|
||||
models_versions_schema_yml,
|
||||
models_versions_defined_in_schema_yml,
|
||||
models_versions_updated_schema_yml,
|
||||
model_three_sql,
|
||||
model_three_modified_sql,
|
||||
model_four1_sql,
|
||||
@@ -71,7 +68,7 @@ from tests.functional.partial_parsing.fixtures import (
|
||||
groups_schema_yml_two_groups_private_orders_invalid_access,
|
||||
)
|
||||
|
||||
from dbt.exceptions import CompilationError, ParsingError, DuplicateVersionedUnversionedError
|
||||
from dbt.exceptions import CompilationError, ParsingError
|
||||
from dbt.contracts.files import ParseFileType
|
||||
from dbt.contracts.results import TestStatus
|
||||
|
||||
@@ -303,72 +300,6 @@ class TestModels:
|
||||
assert model_id not in manifest.disabled
|
||||
|
||||
|
||||
class TestVersionedModels:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"model_one_v1.sql": model_one_sql,
|
||||
"model_one.sql": model_one_sql,
|
||||
"model_one_downstream.sql": model_four2_sql,
|
||||
"schema.yml": models_versions_schema_yml,
|
||||
}
|
||||
|
||||
def test_pp_versioned_models(self, project):
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 3
|
||||
|
||||
manifest = get_manifest(project.project_root)
|
||||
model_one_node = manifest.nodes["model.test.model_one.v1"]
|
||||
assert not model_one_node.is_latest_version
|
||||
model_two_node = manifest.nodes["model.test.model_one.v2"]
|
||||
assert model_two_node.is_latest_version
|
||||
# assert unpinned ref points to latest version
|
||||
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
|
||||
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v2"]
|
||||
|
||||
# update schema.yml block - model_one is now 'defined_in: model_one_different'
|
||||
rm_file(project.project_root, "models", "model_one.sql")
|
||||
write_file(model_one_sql, project.project_root, "models", "model_one_different.sql")
|
||||
write_file(
|
||||
models_versions_defined_in_schema_yml, project.project_root, "models", "schema.yml"
|
||||
)
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 3
|
||||
|
||||
# update versions schema.yml block - latest_version from 2 to 1
|
||||
write_file(
|
||||
models_versions_updated_schema_yml, project.project_root, "models", "schema.yml"
|
||||
)
|
||||
results, log_output = run_dbt_and_capture(
|
||||
["--partial-parse", "--log-format", "json", "run"]
|
||||
)
|
||||
assert len(results) == 3
|
||||
|
||||
manifest = get_manifest(project.project_root)
|
||||
model_one_node = manifest.nodes["model.test.model_one.v1"]
|
||||
assert model_one_node.is_latest_version
|
||||
model_two_node = manifest.nodes["model.test.model_one.v2"]
|
||||
assert not model_two_node.is_latest_version
|
||||
# assert unpinned ref points to latest version
|
||||
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
|
||||
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v1"]
|
||||
# assert unpinned ref to latest-not-max version yields an "FYI" info-level log
|
||||
assert "UnpinnedRefNewVersionAvailable" in log_output
|
||||
|
||||
# update versioned model
|
||||
write_file(model_two_sql, project.project_root, "models", "model_one_different.sql")
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 3
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert len(manifest.nodes) == 3
|
||||
print(f"--- nodes: {manifest.nodes.keys()}")
|
||||
|
||||
# create a new model_one in model_one.sql and re-parse
|
||||
write_file(model_one_sql, project.project_root, "models", "model_one.sql")
|
||||
with pytest.raises(DuplicateVersionedUnversionedError):
|
||||
run_dbt(["parse"])
|
||||
|
||||
|
||||
class TestSources:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
|
||||
126
tests/functional/partial_parsing/test_versioned_models.py
Normal file
126
tests/functional/partial_parsing/test_versioned_models.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import pytest
|
||||
import pathlib
|
||||
from dbt.tests.util import (
|
||||
run_dbt,
|
||||
get_manifest,
|
||||
write_file,
|
||||
rm_file,
|
||||
read_file,
|
||||
)
|
||||
from dbt.exceptions import DuplicateVersionedUnversionedError
|
||||
|
||||
model_one_sql = """
|
||||
select 1 as fun
|
||||
"""
|
||||
|
||||
model_one_downstream_sql = """
|
||||
select fun from {{ ref('model_one') }}
|
||||
"""
|
||||
|
||||
models_versions_schema_yml = """
|
||||
|
||||
models:
|
||||
- name: model_one
|
||||
description: "The first model"
|
||||
versions:
|
||||
- v: 1
|
||||
- v: 2
|
||||
"""
|
||||
|
||||
models_versions_defined_in_schema_yml = """
|
||||
models:
|
||||
- name: model_one
|
||||
description: "The first model"
|
||||
versions:
|
||||
- v: 1
|
||||
- v: 2
|
||||
defined_in: model_one_different
|
||||
"""
|
||||
|
||||
models_versions_updated_schema_yml = """
|
||||
models:
|
||||
- name: model_one
|
||||
latest_version: 1
|
||||
description: "The first model"
|
||||
versions:
|
||||
- v: 1
|
||||
- v: 2
|
||||
defined_in: model_one_different
|
||||
"""
|
||||
|
||||
model_two_sql = """
|
||||
select 1 as notfun
|
||||
"""
|
||||
|
||||
|
||||
class TestVersionedModels:
|
||||
@pytest.fixture(scope="class")
|
||||
def models(self):
|
||||
return {
|
||||
"model_one_v1.sql": model_one_sql,
|
||||
"model_one.sql": model_one_sql,
|
||||
"model_one_downstream.sql": model_one_downstream_sql,
|
||||
"schema.yml": models_versions_schema_yml,
|
||||
}
|
||||
|
||||
def test_pp_versioned_models(self, project):
|
||||
results = run_dbt(["run"])
|
||||
assert len(results) == 3
|
||||
|
||||
manifest = get_manifest(project.project_root)
|
||||
model_one_node = manifest.nodes["model.test.model_one.v1"]
|
||||
assert not model_one_node.is_latest_version
|
||||
model_two_node = manifest.nodes["model.test.model_one.v2"]
|
||||
assert model_two_node.is_latest_version
|
||||
# assert unpinned ref points to latest version
|
||||
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
|
||||
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v2"]
|
||||
|
||||
# update schema.yml block - model_one is now 'defined_in: model_one_different'
|
||||
rm_file(project.project_root, "models", "model_one.sql")
|
||||
write_file(model_one_sql, project.project_root, "models", "model_one_different.sql")
|
||||
write_file(
|
||||
models_versions_defined_in_schema_yml, project.project_root, "models", "schema.yml"
|
||||
)
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 3
|
||||
|
||||
# update versions schema.yml block - latest_version from 2 to 1
|
||||
write_file(
|
||||
models_versions_updated_schema_yml, project.project_root, "models", "schema.yml"
|
||||
)
|
||||
# This is where the test was failings in a CI run with:
|
||||
# relation \"test..._test_partial_parsing.model_one_downstream\" does not exist
|
||||
# because in core/dbt/include/global_project/macros/materializations/models/view/view.sql
|
||||
# "existing_relation" didn't actually exist by the time it gets to the rename of the
|
||||
# existing relation.
|
||||
(pathlib.Path(project.project_root) / "log_output").mkdir(parents=True, exist_ok=True)
|
||||
results = run_dbt(
|
||||
["--partial-parse", "--log-format-file", "json", "--log-path", "log_output", "run"]
|
||||
)
|
||||
assert len(results) == 3
|
||||
|
||||
manifest = get_manifest(project.project_root)
|
||||
model_one_node = manifest.nodes["model.test.model_one.v1"]
|
||||
assert model_one_node.is_latest_version
|
||||
model_two_node = manifest.nodes["model.test.model_one.v2"]
|
||||
assert not model_two_node.is_latest_version
|
||||
# assert unpinned ref points to latest version
|
||||
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
|
||||
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v1"]
|
||||
|
||||
# assert unpinned ref to latest-not-max version yields an "FYI" info-level log
|
||||
log_output = read_file("log_output", "dbt.log").replace("\n", " ").replace("\\n", " ")
|
||||
assert "UnpinnedRefNewVersionAvailable" in log_output
|
||||
|
||||
# update versioned model
|
||||
write_file(model_two_sql, project.project_root, "models", "model_one_different.sql")
|
||||
results = run_dbt(["--partial-parse", "run"])
|
||||
assert len(results) == 3
|
||||
manifest = get_manifest(project.project_root)
|
||||
assert len(manifest.nodes) == 3
|
||||
|
||||
# create a new model_one in model_one.sql and re-parse
|
||||
write_file(model_one_sql, project.project_root, "models", "model_one.sql")
|
||||
with pytest.raises(DuplicateVersionedUnversionedError):
|
||||
run_dbt(["parse"])
|
||||
@@ -1,388 +0,0 @@
|
||||
import pytest
|
||||
|
||||
import click
|
||||
from multiprocessing import get_context
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from dbt.cli.exceptions import DbtUsageException
|
||||
from dbt.cli.flags import Flags
|
||||
from dbt.cli.main import cli
|
||||
from dbt.cli.types import Command
|
||||
from dbt.contracts.project import UserConfig
|
||||
from dbt.exceptions import DbtInternalError
|
||||
from dbt.helper_types import WarnErrorOptions
|
||||
from dbt.tests.util import rm_file, write_file
|
||||
|
||||
|
||||
class TestFlags:
|
||||
def make_dbt_context(
|
||||
self, context_name: str, args: List[str], parent: Optional[click.Context] = None
|
||||
) -> click.Context:
|
||||
ctx = cli.make_context(context_name, args, parent)
|
||||
return ctx
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def run_context(self) -> click.Context:
|
||||
return self.make_dbt_context("run", ["run"])
|
||||
|
||||
@pytest.fixture
|
||||
def user_config(self) -> UserConfig:
|
||||
return UserConfig()
|
||||
|
||||
def test_which(self, run_context):
|
||||
flags = Flags(run_context)
|
||||
assert flags.WHICH == "run"
|
||||
|
||||
def test_mp_context(self, run_context):
|
||||
flags = Flags(run_context)
|
||||
assert flags.MP_CONTEXT == get_context("spawn")
|
||||
|
||||
@pytest.mark.parametrize("param", cli.params)
|
||||
def test_cli_group_flags_from_params(self, run_context, param):
|
||||
flags = Flags(run_context)
|
||||
|
||||
if "DEPRECATED_" in param.name.upper():
|
||||
assert not hasattr(flags, param.name.upper())
|
||||
return
|
||||
|
||||
if param.name.upper() in ("VERSION", "LOG_PATH"):
|
||||
return
|
||||
|
||||
assert hasattr(flags, param.name.upper())
|
||||
assert getattr(flags, param.name.upper()) == run_context.params[param.name.lower()]
|
||||
|
||||
def test_log_path_default(self, run_context):
|
||||
flags = Flags(run_context)
|
||||
assert hasattr(flags, "LOG_PATH")
|
||||
assert getattr(flags, "LOG_PATH") == Path("logs")
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"set_stats_param,do_not_track,expected_anonymous_usage_stats",
|
||||
[
|
||||
# set_stats_param = default, DNT = True, expected = False
|
||||
("default", "1", False),
|
||||
("default", "t", False),
|
||||
("default", "true", False),
|
||||
("default", "y", False),
|
||||
("default", "yes", False),
|
||||
# set_stats_param = default, DNT = false, expected = True
|
||||
("default", "false", True),
|
||||
("default", "anything", True),
|
||||
# set_stats_param = True, DNT = True, expected = False
|
||||
(True, "1", False),
|
||||
(True, "t", False),
|
||||
(True, "true", False),
|
||||
(True, "y", False),
|
||||
(True, "yes", False),
|
||||
# set_stats_param = True, DNT = false, expected = True
|
||||
(True, "false", True),
|
||||
(True, "anything", True),
|
||||
(True, "2", True),
|
||||
# set_stats_param = False, DNT = True, expected = False
|
||||
(False, "1", False),
|
||||
(False, "t", False),
|
||||
(False, "true", False),
|
||||
(False, "y", False),
|
||||
(False, "yes", False),
|
||||
# set_stats_param = False, DNT = False, expected = False
|
||||
(False, "false", False),
|
||||
(False, "anything", False),
|
||||
(False, "2", False),
|
||||
],
|
||||
)
|
||||
def test_anonymous_usage_state(
|
||||
self,
|
||||
monkeypatch,
|
||||
run_context,
|
||||
set_stats_param,
|
||||
do_not_track,
|
||||
expected_anonymous_usage_stats,
|
||||
):
|
||||
monkeypatch.setenv("DO_NOT_TRACK", do_not_track)
|
||||
if set_stats_param != "default":
|
||||
run_context.params["send_anonymous_usage_stats"] = set_stats_param
|
||||
flags = Flags(run_context)
|
||||
assert flags.SEND_ANONYMOUS_USAGE_STATS == expected_anonymous_usage_stats
|
||||
|
||||
def test_empty_user_config_uses_default(self, run_context, user_config):
|
||||
flags = Flags(run_context, user_config)
|
||||
assert flags.USE_COLORS == run_context.params["use_colors"]
|
||||
|
||||
def test_none_user_config_uses_default(self, run_context):
|
||||
flags = Flags(run_context, None)
|
||||
assert flags.USE_COLORS == run_context.params["use_colors"]
|
||||
|
||||
def test_prefer_user_config_to_default(self, run_context, user_config):
|
||||
user_config.use_colors = False
|
||||
# ensure default value is not the same as user config
|
||||
assert run_context.params["use_colors"] is not user_config.use_colors
|
||||
|
||||
flags = Flags(run_context, user_config)
|
||||
assert flags.USE_COLORS == user_config.use_colors
|
||||
|
||||
def test_prefer_param_value_to_user_config(self):
|
||||
user_config = UserConfig(use_colors=False)
|
||||
context = self.make_dbt_context("run", ["--use-colors", "True", "run"])
|
||||
|
||||
flags = Flags(context, user_config)
|
||||
assert flags.USE_COLORS
|
||||
|
||||
def test_prefer_env_to_user_config(self, monkeypatch, user_config):
|
||||
user_config.use_colors = False
|
||||
monkeypatch.setenv("DBT_USE_COLORS", "True")
|
||||
context = self.make_dbt_context("run", ["run"])
|
||||
|
||||
flags = Flags(context, user_config)
|
||||
assert flags.USE_COLORS
|
||||
|
||||
def test_mutually_exclusive_options_passed_separately(self):
|
||||
"""Assert options that are mutually exclusive can be passed separately without error"""
|
||||
warn_error_context = self.make_dbt_context("run", ["--warn-error", "run"])
|
||||
|
||||
flags = Flags(warn_error_context)
|
||||
assert flags.WARN_ERROR
|
||||
|
||||
warn_error_options_context = self.make_dbt_context(
|
||||
"run", ["--warn-error-options", '{"include": "all"}', "run"]
|
||||
)
|
||||
flags = Flags(warn_error_options_context)
|
||||
assert flags.WARN_ERROR_OPTIONS == WarnErrorOptions(include="all")
|
||||
|
||||
def test_mutually_exclusive_options_from_cli(self):
|
||||
context = self.make_dbt_context(
|
||||
"run", ["--warn-error", "--warn-error-options", '{"include": "all"}', "run"]
|
||||
)
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context)
|
||||
|
||||
@pytest.mark.parametrize("warn_error", [True, False])
|
||||
def test_mutually_exclusive_options_from_user_config(self, warn_error, user_config):
|
||||
user_config.warn_error = warn_error
|
||||
context = self.make_dbt_context(
|
||||
"run", ["--warn-error-options", '{"include": "all"}', "run"]
|
||||
)
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context, user_config)
|
||||
|
||||
@pytest.mark.parametrize("warn_error", ["True", "False"])
|
||||
def test_mutually_exclusive_options_from_envvar(self, warn_error, monkeypatch):
|
||||
monkeypatch.setenv("DBT_WARN_ERROR", warn_error)
|
||||
monkeypatch.setenv("DBT_WARN_ERROR_OPTIONS", '{"include":"all"}')
|
||||
context = self.make_dbt_context("run", ["run"])
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context)
|
||||
|
||||
@pytest.mark.parametrize("warn_error", [True, False])
|
||||
def test_mutually_exclusive_options_from_cli_and_user_config(self, warn_error, user_config):
|
||||
user_config.warn_error = warn_error
|
||||
context = self.make_dbt_context(
|
||||
"run", ["--warn-error-options", '{"include": "all"}', "run"]
|
||||
)
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context, user_config)
|
||||
|
||||
@pytest.mark.parametrize("warn_error", ["True", "False"])
|
||||
def test_mutually_exclusive_options_from_cli_and_envvar(self, warn_error, monkeypatch):
|
||||
monkeypatch.setenv("DBT_WARN_ERROR", warn_error)
|
||||
context = self.make_dbt_context(
|
||||
"run", ["--warn-error-options", '{"include": "all"}', "run"]
|
||||
)
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context)
|
||||
|
||||
@pytest.mark.parametrize("warn_error", ["True", "False"])
|
||||
def test_mutually_exclusive_options_from_user_config_and_envvar(
|
||||
self, user_config, warn_error, monkeypatch
|
||||
):
|
||||
user_config.warn_error = warn_error
|
||||
monkeypatch.setenv("DBT_WARN_ERROR_OPTIONS", '{"include": "all"}')
|
||||
context = self.make_dbt_context("run", ["run"])
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context, user_config)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cli_colors,cli_colors_file,flag_colors,flag_colors_file",
|
||||
[
|
||||
(None, None, True, True),
|
||||
(True, None, True, True),
|
||||
(None, True, True, True),
|
||||
(False, None, False, False),
|
||||
(None, False, True, False),
|
||||
(True, True, True, True),
|
||||
(False, False, False, False),
|
||||
(True, False, True, False),
|
||||
(False, True, False, True),
|
||||
],
|
||||
)
|
||||
def test_no_color_interaction(
|
||||
self, cli_colors, cli_colors_file, flag_colors, flag_colors_file
|
||||
):
|
||||
cli_params = []
|
||||
|
||||
if cli_colors is not None:
|
||||
cli_params.append("--use-colors" if cli_colors else "--no-use-colors")
|
||||
|
||||
if cli_colors_file is not None:
|
||||
cli_params.append("--use-colors-file" if cli_colors_file else "--no-use-colors-file")
|
||||
|
||||
cli_params.append("run")
|
||||
|
||||
context = self.make_dbt_context("run", cli_params)
|
||||
|
||||
flags = Flags(context, None)
|
||||
|
||||
assert flags.USE_COLORS == flag_colors
|
||||
assert flags.USE_COLORS_FILE == flag_colors_file
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cli_log_level,cli_log_level_file,flag_log_level,flag_log_level_file",
|
||||
[
|
||||
(None, None, "info", "debug"),
|
||||
("error", None, "error", "error"), # explicit level overrides file level...
|
||||
("info", None, "info", "info"), # ...but file level doesn't change console level
|
||||
(
|
||||
"debug",
|
||||
"warn",
|
||||
"debug",
|
||||
"warn",
|
||||
), # still, two separate explicit levels are applied independently
|
||||
],
|
||||
)
|
||||
def test_log_level_interaction(
|
||||
self, cli_log_level, cli_log_level_file, flag_log_level, flag_log_level_file
|
||||
):
|
||||
cli_params = []
|
||||
|
||||
if cli_log_level is not None:
|
||||
cli_params.append("--log-level")
|
||||
cli_params.append(cli_log_level)
|
||||
|
||||
if cli_log_level_file is not None:
|
||||
cli_params.append("--log-level-file")
|
||||
cli_params.append(cli_log_level_file)
|
||||
|
||||
cli_params.append("run")
|
||||
|
||||
context = self.make_dbt_context("run", cli_params)
|
||||
|
||||
flags = Flags(context, None)
|
||||
|
||||
assert flags.LOG_LEVEL == flag_log_level
|
||||
assert flags.LOG_LEVEL_FILE == flag_log_level_file
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cli_log_format,cli_log_format_file,flag_log_format,flag_log_format_file",
|
||||
[
|
||||
(None, None, "default", "debug"),
|
||||
("json", None, "json", "json"), # explicit format overrides file format...
|
||||
(None, "json", "default", "json"), # ...but file format doesn't change console format
|
||||
(
|
||||
"debug",
|
||||
"text",
|
||||
"debug",
|
||||
"text",
|
||||
), # still, two separate explicit formats are applied independently
|
||||
],
|
||||
)
|
||||
def test_log_format_interaction(
|
||||
self, cli_log_format, cli_log_format_file, flag_log_format, flag_log_format_file
|
||||
):
|
||||
cli_params = []
|
||||
|
||||
if cli_log_format is not None:
|
||||
cli_params.append("--log-format")
|
||||
cli_params.append(cli_log_format)
|
||||
|
||||
if cli_log_format_file is not None:
|
||||
cli_params.append("--log-format-file")
|
||||
cli_params.append(cli_log_format_file)
|
||||
|
||||
cli_params.append("run")
|
||||
|
||||
context = self.make_dbt_context("run", cli_params)
|
||||
|
||||
flags = Flags(context, None)
|
||||
|
||||
assert flags.LOG_FORMAT == flag_log_format
|
||||
assert flags.LOG_FORMAT_FILE == flag_log_format_file
|
||||
|
||||
def test_log_settings_from_config(self):
|
||||
"""Test that values set in UserConfig for log settings will set flags as expected"""
|
||||
context = self.make_dbt_context("run", ["run"])
|
||||
|
||||
config = UserConfig(log_format="json", log_level="warn", use_colors=False)
|
||||
|
||||
flags = Flags(context, config)
|
||||
|
||||
assert flags.LOG_FORMAT == "json"
|
||||
assert flags.LOG_FORMAT_FILE == "json"
|
||||
assert flags.LOG_LEVEL == "warn"
|
||||
assert flags.LOG_LEVEL_FILE == "warn"
|
||||
assert flags.USE_COLORS is False
|
||||
assert flags.USE_COLORS_FILE is False
|
||||
|
||||
def test_log_file_settings_from_config(self):
|
||||
"""Test that values set in UserConfig for log *file* settings will set flags as expected, leaving the console
|
||||
logging flags with their default values"""
|
||||
context = self.make_dbt_context("run", ["run"])
|
||||
|
||||
config = UserConfig(log_format_file="json", log_level_file="warn", use_colors_file=False)
|
||||
|
||||
flags = Flags(context, config)
|
||||
|
||||
assert flags.LOG_FORMAT == "default"
|
||||
assert flags.LOG_FORMAT_FILE == "json"
|
||||
assert flags.LOG_LEVEL == "info"
|
||||
assert flags.LOG_LEVEL_FILE == "warn"
|
||||
assert flags.USE_COLORS is True
|
||||
assert flags.USE_COLORS_FILE is False
|
||||
|
||||
def test_duplicate_flags_raises_error(self):
|
||||
parent_context = self.make_dbt_context("parent", ["--version-check"])
|
||||
context = self.make_dbt_context("child", ["--version-check"], parent_context)
|
||||
|
||||
with pytest.raises(DbtUsageException):
|
||||
Flags(context)
|
||||
|
||||
def _create_flags_from_dict(self, cmd, d):
|
||||
write_file("", "profiles.yml")
|
||||
result = Flags.from_dict(cmd, d)
|
||||
assert result.which is cmd.value
|
||||
rm_file("profiles.yml")
|
||||
return result
|
||||
|
||||
def test_from_dict__run(self):
|
||||
args_dict = {
|
||||
"print": False,
|
||||
"select": ["model_one", "model_two"],
|
||||
}
|
||||
result = self._create_flags_from_dict(Command.RUN, args_dict)
|
||||
assert "model_one" in result.select[0]
|
||||
assert "model_two" in result.select[0]
|
||||
|
||||
def test_from_dict__build(self):
|
||||
args_dict = {
|
||||
"print": True,
|
||||
"state": "some/path",
|
||||
}
|
||||
result = self._create_flags_from_dict(Command.BUILD, args_dict)
|
||||
assert result.print is True
|
||||
assert "some/path" in str(result.state)
|
||||
|
||||
def test_from_dict__seed(self):
|
||||
args_dict = {"use_colors": False, "exclude": ["model_three"]}
|
||||
result = self._create_flags_from_dict(Command.SEED, args_dict)
|
||||
assert result.use_colors is False
|
||||
assert "model_three" in result.exclude[0]
|
||||
|
||||
def test_from_dict__which_fails(self):
|
||||
args_dict = {"which": "some bad command"}
|
||||
with pytest.raises(DbtInternalError, match=r"does not match value of which"):
|
||||
self._create_flags_from_dict(Command.RUN, args_dict)
|
||||
@@ -1,340 +0,0 @@
|
||||
import os
|
||||
from unittest import TestCase
|
||||
from argparse import Namespace
|
||||
import pytest
|
||||
|
||||
from dbt import flags
|
||||
from dbt.contracts.project import UserConfig
|
||||
from dbt.graph.selector_spec import IndirectSelection
|
||||
from dbt.helper_types import WarnErrorOptions
|
||||
|
||||
# Skip due to interface for flag updated
|
||||
pytestmark = pytest.mark.skip
|
||||
|
||||
|
||||
class TestFlags(TestCase):
|
||||
def setUp(self):
|
||||
self.args = Namespace()
|
||||
self.user_config = UserConfig()
|
||||
|
||||
def test__flags(self):
|
||||
|
||||
# use_experimental_parser
|
||||
self.user_config.use_experimental_parser = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, True)
|
||||
os.environ["DBT_USE_EXPERIMENTAL_PARSER"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, False)
|
||||
setattr(self.args, "use_experimental_parser", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_USE_EXPERIMENTAL_PARSER")
|
||||
delattr(self.args, "use_experimental_parser")
|
||||
flags.USE_EXPERIMENTAL_PARSER = False
|
||||
self.user_config.use_experimental_parser = None
|
||||
|
||||
# static_parser
|
||||
self.user_config.static_parser = False
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.STATIC_PARSER, False)
|
||||
os.environ["DBT_STATIC_PARSER"] = "true"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.STATIC_PARSER, True)
|
||||
setattr(self.args, "static_parser", False)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.STATIC_PARSER, False)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_STATIC_PARSER")
|
||||
delattr(self.args, "static_parser")
|
||||
flags.STATIC_PARSER = True
|
||||
self.user_config.static_parser = None
|
||||
|
||||
# warn_error
|
||||
self.user_config.warn_error = False
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WARN_ERROR, False)
|
||||
os.environ["DBT_WARN_ERROR"] = "true"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WARN_ERROR, True)
|
||||
setattr(self.args, "warn_error", False)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WARN_ERROR, False)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_WARN_ERROR")
|
||||
delattr(self.args, "warn_error")
|
||||
flags.WARN_ERROR = False
|
||||
self.user_config.warn_error = None
|
||||
|
||||
# warn_error_options
|
||||
self.user_config.warn_error_options = '{"include": "all"}'
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WARN_ERROR_OPTIONS, WarnErrorOptions(include="all"))
|
||||
os.environ["DBT_WARN_ERROR_OPTIONS"] = '{"include": []}'
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WARN_ERROR_OPTIONS, WarnErrorOptions(include=[]))
|
||||
setattr(self.args, "warn_error_options", '{"include": "all"}')
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WARN_ERROR_OPTIONS, WarnErrorOptions(include="all"))
|
||||
# cleanup
|
||||
os.environ.pop("DBT_WARN_ERROR_OPTIONS")
|
||||
delattr(self.args, "warn_error_options")
|
||||
self.user_config.warn_error_options = None
|
||||
|
||||
# write_json
|
||||
self.user_config.write_json = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WRITE_JSON, True)
|
||||
os.environ["DBT_WRITE_JSON"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WRITE_JSON, False)
|
||||
setattr(self.args, "write_json", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.WRITE_JSON, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_WRITE_JSON")
|
||||
delattr(self.args, "write_json")
|
||||
|
||||
# partial_parse
|
||||
self.user_config.partial_parse = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.PARTIAL_PARSE, True)
|
||||
os.environ["DBT_PARTIAL_PARSE"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.PARTIAL_PARSE, False)
|
||||
setattr(self.args, "partial_parse", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.PARTIAL_PARSE, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_PARTIAL_PARSE")
|
||||
delattr(self.args, "partial_parse")
|
||||
self.user_config.partial_parse = False
|
||||
|
||||
# use_colors
|
||||
self.user_config.use_colors = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.USE_COLORS, True)
|
||||
os.environ["DBT_USE_COLORS"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.USE_COLORS, False)
|
||||
setattr(self.args, "use_colors", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.USE_COLORS, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_USE_COLORS")
|
||||
delattr(self.args, "use_colors")
|
||||
|
||||
# debug
|
||||
self.user_config.debug = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.DEBUG, True)
|
||||
os.environ["DBT_DEBUG"] = "True"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.DEBUG, True)
|
||||
os.environ["DBT_DEBUG"] = "False"
|
||||
setattr(self.args, "debug", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.DEBUG, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_DEBUG")
|
||||
delattr(self.args, "debug")
|
||||
self.user_config.debug = None
|
||||
|
||||
# log_format -- text, json, default
|
||||
self.user_config.log_format = "text"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.LOG_FORMAT, "text")
|
||||
os.environ["DBT_LOG_FORMAT"] = "json"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.LOG_FORMAT, "json")
|
||||
setattr(self.args, "log_format", "text")
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.LOG_FORMAT, "text")
|
||||
# cleanup
|
||||
os.environ.pop("DBT_LOG_FORMAT")
|
||||
delattr(self.args, "log_format")
|
||||
self.user_config.log_format = None
|
||||
|
||||
# version_check
|
||||
self.user_config.version_check = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.VERSION_CHECK, True)
|
||||
os.environ["DBT_VERSION_CHECK"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.VERSION_CHECK, False)
|
||||
setattr(self.args, "version_check", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.VERSION_CHECK, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_VERSION_CHECK")
|
||||
delattr(self.args, "version_check")
|
||||
|
||||
# fail_fast
|
||||
self.user_config.fail_fast = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.FAIL_FAST, True)
|
||||
os.environ["DBT_FAIL_FAST"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.FAIL_FAST, False)
|
||||
setattr(self.args, "fail_fast", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.FAIL_FAST, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_FAIL_FAST")
|
||||
delattr(self.args, "fail_fast")
|
||||
self.user_config.fail_fast = False
|
||||
|
||||
# send_anonymous_usage_stats
|
||||
self.user_config.send_anonymous_usage_stats = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, True)
|
||||
os.environ["DBT_SEND_ANONYMOUS_USAGE_STATS"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, False)
|
||||
setattr(self.args, "send_anonymous_usage_stats", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, True)
|
||||
os.environ["DO_NOT_TRACK"] = "1"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, False)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_SEND_ANONYMOUS_USAGE_STATS")
|
||||
os.environ.pop("DO_NOT_TRACK")
|
||||
delattr(self.args, "send_anonymous_usage_stats")
|
||||
|
||||
# printer_width
|
||||
self.user_config.printer_width = 100
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.PRINTER_WIDTH, 100)
|
||||
os.environ["DBT_PRINTER_WIDTH"] = "80"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.PRINTER_WIDTH, 80)
|
||||
setattr(self.args, "printer_width", "120")
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.PRINTER_WIDTH, 120)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_PRINTER_WIDTH")
|
||||
delattr(self.args, "printer_width")
|
||||
self.user_config.printer_width = None
|
||||
|
||||
# indirect_selection
|
||||
self.user_config.indirect_selection = "eager"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Eager)
|
||||
self.user_config.indirect_selection = "cautious"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Cautious)
|
||||
self.user_config.indirect_selection = "buildable"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Buildable)
|
||||
self.user_config.indirect_selection = None
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Eager)
|
||||
os.environ["DBT_INDIRECT_SELECTION"] = "cautious"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Cautious)
|
||||
setattr(self.args, "indirect_selection", "cautious")
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Cautious)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_INDIRECT_SELECTION")
|
||||
delattr(self.args, "indirect_selection")
|
||||
self.user_config.indirect_selection = None
|
||||
|
||||
# quiet
|
||||
self.user_config.quiet = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.QUIET, True)
|
||||
# cleanup
|
||||
self.user_config.quiet = None
|
||||
|
||||
# no_print
|
||||
self.user_config.no_print = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.NO_PRINT, True)
|
||||
# cleanup
|
||||
self.user_config.no_print = None
|
||||
|
||||
# cache_selected_only
|
||||
self.user_config.cache_selected_only = True
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.CACHE_SELECTED_ONLY, True)
|
||||
os.environ["DBT_CACHE_SELECTED_ONLY"] = "false"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.CACHE_SELECTED_ONLY, False)
|
||||
setattr(self.args, "cache_selected_only", True)
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.CACHE_SELECTED_ONLY, True)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_CACHE_SELECTED_ONLY")
|
||||
delattr(self.args, "cache_selected_only")
|
||||
self.user_config.cache_selected_only = False
|
||||
|
||||
# target_path/log_path
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertIsNone(flags.LOG_PATH)
|
||||
os.environ["DBT_LOG_PATH"] = "a/b/c"
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.LOG_PATH, "a/b/c")
|
||||
setattr(self.args, "log_path", "d/e/f")
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
self.assertEqual(flags.LOG_PATH, "d/e/f")
|
||||
# cleanup
|
||||
os.environ.pop("DBT_LOG_PATH")
|
||||
delattr(self.args, "log_path")
|
||||
|
||||
def test__flags_are_mutually_exclusive(self):
|
||||
# options from user config
|
||||
self.user_config.warn_error = False
|
||||
self.user_config.warn_error_options = '{"include":"all"}'
|
||||
with pytest.raises(ValueError):
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
# cleanup
|
||||
self.user_config.warn_error = None
|
||||
self.user_config.warn_error_options = None
|
||||
|
||||
# options from args
|
||||
setattr(self.args, "warn_error", False)
|
||||
setattr(self.args, "warn_error_options", '{"include":"all"}')
|
||||
with pytest.raises(ValueError):
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
# cleanup
|
||||
delattr(self.args, "warn_error")
|
||||
delattr(self.args, "warn_error_options")
|
||||
|
||||
# options from environment
|
||||
os.environ["DBT_WARN_ERROR"] = "false"
|
||||
os.environ["DBT_WARN_ERROR_OPTIONS"] = '{"include": []}'
|
||||
with pytest.raises(ValueError):
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
# cleanup
|
||||
os.environ.pop("DBT_WARN_ERROR")
|
||||
os.environ.pop("DBT_WARN_ERROR_OPTIONS")
|
||||
|
||||
# options from user config + args
|
||||
self.user_config.warn_error = False
|
||||
setattr(self.args, "warn_error_options", '{"include":"all"}')
|
||||
with pytest.raises(ValueError):
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
# cleanup
|
||||
self.user_config.warn_error = None
|
||||
delattr(self.args, "warn_error_options")
|
||||
|
||||
# options from user config + environ
|
||||
self.user_config.warn_error = False
|
||||
os.environ["DBT_WARN_ERROR_OPTIONS"] = '{"include": []}'
|
||||
with pytest.raises(ValueError):
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
# cleanup
|
||||
self.user_config.warn_error = None
|
||||
os.environ.pop("DBT_WARN_ERROR_OPTIONS")
|
||||
|
||||
# options from args + environ
|
||||
setattr(self.args, "warn_error", False)
|
||||
os.environ["DBT_WARN_ERROR_OPTIONS"] = '{"include": []}'
|
||||
with pytest.raises(ValueError):
|
||||
flags.set_from_args(self.args, self.user_config)
|
||||
# cleanup
|
||||
delattr(self.args, "warn_error")
|
||||
os.environ.pop("DBT_WARN_ERROR_OPTIONS")
|
||||
@@ -1,7 +1,20 @@
|
||||
import pytest
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
from dbt.plugins import PluginManager, dbtPlugin, dbt_hook
|
||||
from dbt.plugins.manifest import PluginNodes, ModelNodeArgs
|
||||
from dbt.plugins.contracts import PluginArtifacts, PluginArtifact
|
||||
from dbt.plugins.exceptions import dbtPluginError
|
||||
|
||||
|
||||
class ExceptionInitializePlugin(dbtPlugin):
|
||||
def initialize(self) -> None:
|
||||
raise Exception("plugin error message")
|
||||
|
||||
|
||||
class dbtRuntimeErrorInitializePlugin(dbtPlugin):
|
||||
def initialize(self) -> None:
|
||||
raise dbtPluginError("plugin error message")
|
||||
|
||||
|
||||
class GetNodesPlugin(dbtPlugin):
|
||||
@@ -42,6 +55,14 @@ class TestPluginManager:
|
||||
def get_artifacts_plugins(self, get_artifacts_plugin):
|
||||
return [get_artifacts_plugin, GetArtifactsPlugin(project_name="test2")]
|
||||
|
||||
def test_plugin_manager_init_exception(self):
|
||||
with pytest.raises(DbtRuntimeError, match="plugin error message"):
|
||||
PluginManager(plugins=[ExceptionInitializePlugin(project_name="test")])
|
||||
|
||||
def test_plugin_manager_init_plugin_exception(self):
|
||||
with pytest.raises(DbtRuntimeError, match="^Runtime Error\n plugin error message"):
|
||||
PluginManager(plugins=[dbtRuntimeErrorInitializePlugin(project_name="test")])
|
||||
|
||||
def test_plugin_manager_init_single_hook(self, get_nodes_plugin):
|
||||
pm = PluginManager(plugins=[get_nodes_plugin])
|
||||
assert len(pm.hooks) == 1
|
||||
|
||||
Reference in New Issue
Block a user