Compare commits

...

19 Commits

Author SHA1 Message Date
Kyle Wigley
ed7dbcec21 test commit 2021-09-23 13:40:46 -04:00
Kyle Wigley
fb84abd28c run performance action when code changes in performance dir 2021-09-23 13:39:32 -04:00
dave-connors-3
f4f5d31959 Feature/catalog relational objects (#3922)
* filter to relational nodes

* cleanup

* flake formatting

* changelog
2021-09-23 08:54:05 -07:00
Jeremy Cohen
e7e12075b9 Fix batching for Snowflake seeds >10k rows (#3942)
* Call get_batch_size in snowflake__load_csv_rows

* Git ignore big csv. Update changelog
2021-09-23 08:49:52 -07:00
Emily Rockman
74dda5aa19 Merge pull request #3893 from dbt-labs/2798_enact_deprecations
removed deprecation for materialization-return and replaced with exception
2021-09-22 14:35:05 -05:00
Emily Rockman
092e96ce70 Merge branch 'develop' into 2798_enact_deprecations 2021-09-22 14:09:35 -05:00
Kyle Wigley
18102027ba Pull in changes for the 0.21.0rc1 release (#3935)
Co-authored-by: Github Build Bot <buildbot@fishtownanalytics.com>
2021-09-22 13:53:43 -05:00
Emily Rockman
f80825d63e updated changelog 2021-09-22 12:55:49 -05:00
Kyle Wigley
9316e47b77 Pull in changes for the 0.21.0rc1 release (#3935)
Co-authored-by: Github Build Bot <buildbot@fishtownanalytics.com>
2021-09-22 13:25:46 -04:00
Emily Rockman
f99cf1218a fixed conflict 2021-09-22 11:36:22 -05:00
Emily Rockman
5871915ce9 Merge branch '2798_enact_deprecations' of https://github.com/dbt-labs/dbt into 2798_enact_deprecations
# Conflicts:
#	test/integration/012_deprecation_tests/test_deprecations.py
2021-09-22 11:34:51 -05:00
Emily Rockman
5ce290043f more explicit error check 2021-09-22 11:16:59 -05:00
Emily Rockman
080d27321b removed deprication for materialization-return and replaced it with an exception 2021-09-22 11:16:59 -05:00
Gerda Shank
1d0936bd14 Merge pull request #3889 from dbt-labs/3886_pp_log_levels
[#3886] Tweak partial parsing log messages
2021-09-22 10:48:21 -04:00
Gerda Shank
706b8ca9df Merge pull request #3839 from dbt-labs/2990_global_cli_flags
[#2990] Normalize global CLI args/flags
2021-09-22 10:47:54 -04:00
Gerda Shank
779c789a64 [#2990] Normalize global CLI args/flags 2021-09-22 09:58:07 -04:00
Gerda Shank
409b4ba109 [#3886] Tweak partial parsing log messages 2021-09-22 09:20:24 -04:00
Emily Rockman
237048c7ac more explicit error check 2021-09-17 10:51:53 -05:00
Emily Rockman
30ff395b7b removed deprication for materialization-return and replaced it with an exception 2021-09-17 10:51:53 -05:00
81 changed files with 941 additions and 643 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.21.0b2
current_version = 0.21.0rc1
parse = (?P<major>\d+)
\.(?P<minor>\d+)
\.(?P<patch>\d+)

View File

@@ -6,6 +6,9 @@ on:
- cron: "5 10,22 * * *"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
push:
paths:
- performance/**
jobs:
# checks fmt of runner code

View File

@@ -1,7 +1,24 @@
## dbt 1.0.0 (Release TBD)
### Features
### Fixes
### Under the hood
- Enact deprecation for `materialization-return` and replace deprecation warning with an exception. ([#3896](https://github.com/dbt-labs/dbt/issues/3896))
- Build catalog for only relational, non-ephemeral nodes in the graph ([#3920](https://github.com/dbt-labs/dbt/issues/3920))
Contributors:
- [@dave-connors-3](https://github.com/dave-connors-3) ([#3920](https://github.com/dbt-labs/dbt/issues/3920))
## dbt 0.21.0 (Release TBD)
## dbt 0.21.0b2 (August 19, 2021)
### Fixes
- Fix batching for large seeds on Snowflake ([#3941](https://github.com/dbt-labs/dbt/issues/3941), [#3942](https://github.com/dbt-labs/dbt/pull/3942))
## dbt 0.21.0rc1 (September 20, 2021)
### Features
@@ -17,6 +34,7 @@
- Added timing and thread information to sources.json artifact ([#3804](https://github.com/dbt-labs/dbt/issues/3804), [#3894](https://github.com/dbt-labs/dbt/pull/3894))
- Update cli and rpc flags for the `build` task to align with other commands (`--resource-type`, `--store-failures`) ([#3596](https://github.com/dbt-labs/dbt/issues/3596), [#3884](https://github.com/dbt-labs/dbt/pull/3884))
- Log tests that are not indirectly selected. Add `--greedy` flag to `test`, `list`, `build` and `greedy` property in yaml selectors ([#3723](https://github.com/dbt-labs/dbt/pull/3723), [#3833](https://github.com/dbt-labs/dbt/pull/3833))
- Normalize global CLI arguments/flags ([#2990](https://github.com/dbt-labs/dbt/issues/2990), [#3839](https://github.com/dbt-labs/dbt/pull/3839))
### Fixes

View File

@@ -238,12 +238,6 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
@classmethod
def _rollback(cls, connection: Connection) -> None:
"""Roll back the given connection."""
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In _rollback, got {connection} - not a Connection!'
)
if connection.transaction_open is False:
raise dbt.exceptions.InternalException(
f'Tried to rollback transaction on connection '
@@ -257,12 +251,6 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
@classmethod
def close(cls, connection: Connection) -> Connection:
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In close, got {connection} - not a Connection!'
)
# if the connection is in closed or init, there's nothing to do
if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}:
return connection

View File

@@ -16,7 +16,6 @@ from dbt.exceptions import (
get_relation_returned_multiple_results,
InternalException, NotImplementedException, RuntimeException,
)
from dbt import flags
from dbt import deprecations
from dbt.adapters.protocol import (
@@ -289,9 +288,7 @@ class BaseAdapter(metaclass=AdapterMeta):
def _schema_is_cached(self, database: Optional[str], schema: str) -> bool:
"""Check if the schema is cached, and by default logs if it is not."""
if flags.USE_CACHE is False:
return False
elif (database, schema) not in self.cache:
if (database, schema) not in self.cache:
logger.debug(
'On "{}": cache miss for schema "{}.{}", this is inefficient'
.format(self.nice_connection_name(), database, schema)
@@ -324,7 +321,9 @@ class BaseAdapter(metaclass=AdapterMeta):
"""
info_schema_name_map = SchemaSearchMap()
nodes: Iterator[CompileResultNode] = chain(
manifest.nodes.values(),
[node for node in manifest.nodes.values() if (
node.is_relational and not node.is_ephemeral_model
)],
manifest.sources.values(),
)
for node in nodes:
@@ -340,9 +339,6 @@ class BaseAdapter(metaclass=AdapterMeta):
"""Populate the relations cache for the given schemas. Returns an
iterable of the schemas populated, as strings.
"""
if not flags.USE_CACHE:
return
cache_schemas = self._get_cache_schemas(manifest)
with executor(self.config) as tpe:
futures: List[Future[List[BaseRelation]]] = []
@@ -375,9 +371,6 @@ class BaseAdapter(metaclass=AdapterMeta):
"""Run a query that gets a populated cache of the relations in the
database and set the cache on this adapter.
"""
if not flags.USE_CACHE:
return
with self.cache.lock:
if clear:
self.cache.clear()
@@ -391,8 +384,7 @@ class BaseAdapter(metaclass=AdapterMeta):
raise_compiler_error(
'Attempted to cache a null relation for {}'.format(name)
)
if flags.USE_CACHE:
self.cache.add(relation)
self.cache.add(relation)
# so jinja doesn't render things
return ''
@@ -406,8 +398,7 @@ class BaseAdapter(metaclass=AdapterMeta):
raise_compiler_error(
'Attempted to drop a null relation for {}'.format(name)
)
if flags.USE_CACHE:
self.cache.drop(relation)
self.cache.drop(relation)
return ''
@available
@@ -428,8 +419,7 @@ class BaseAdapter(metaclass=AdapterMeta):
.format(src_name, dst_name, name)
)
if flags.USE_CACHE:
self.cache.rename(from_relation, to_relation)
self.cache.rename(from_relation, to_relation)
return ''
###

View File

@@ -11,7 +11,6 @@ from dbt.contracts.connection import (
Connection, ConnectionState, AdapterResponse
)
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import flags
class SQLConnectionManager(BaseConnectionManager):
@@ -144,13 +143,6 @@ class SQLConnectionManager(BaseConnectionManager):
def begin(self):
connection = self.get_thread_connection()
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In begin, got {connection} - not a Connection!'
)
if connection.transaction_open is True:
raise dbt.exceptions.InternalException(
'Tried to begin a new transaction on connection "{}", but '
@@ -163,12 +155,6 @@ class SQLConnectionManager(BaseConnectionManager):
def commit(self):
connection = self.get_thread_connection()
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In commit, got {connection} - not a Connection!'
)
if connection.transaction_open is False:
raise dbt.exceptions.InternalException(
'Tried to commit transaction on connection "{}", but '

View File

@@ -1,4 +1,4 @@
# all these are just exports, they need "noqa" so flake8 will not complain.
from .profile import Profile, PROFILES_DIR, read_user_config # noqa
from .profile import Profile, read_user_config # noqa
from .project import Project, IsFQNResource # noqa
from .runtime import RuntimeConfig, UnsetProfileConfig # noqa

View File

@@ -20,10 +20,8 @@ from dbt.utils import coerce_dict_str
from .renderer import ProfileRenderer
DEFAULT_THREADS = 1
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser('~'), '.dbt')
PROFILES_DIR = os.path.expanduser(
os.getenv('DBT_PROFILES_DIR', DEFAULT_PROFILES_DIR)
)
INVALID_PROFILE_MESSAGE = """
dbt encountered an error while trying to read your profiles.yml file.
@@ -43,7 +41,7 @@ Here, [profile name] should be replaced with a profile name
defined in your profiles.yml file. You can find profiles.yml here:
{profiles_file}/profiles.yml
""".format(profiles_file=PROFILES_DIR)
""".format(profiles_file=DEFAULT_PROFILES_DIR)
def read_profile(profiles_dir: str) -> Dict[str, Any]:
@@ -73,10 +71,10 @@ def read_user_config(directory: str) -> UserConfig:
try:
profile = read_profile(directory)
if profile:
user_cfg = coerce_dict_str(profile.get('config', {}))
if user_cfg is not None:
UserConfig.validate(user_cfg)
return UserConfig.from_dict(user_cfg)
user_config = coerce_dict_str(profile.get('config', {}))
if user_config is not None:
UserConfig.validate(user_config)
return UserConfig.from_dict(user_config)
except (RuntimeException, ValidationError):
pass
return UserConfig()
@@ -89,7 +87,7 @@ def read_user_config(directory: str) -> UserConfig:
class Profile(HasCredentials):
profile_name: str
target_name: str
config: UserConfig
user_config: UserConfig
threads: int
credentials: Credentials
@@ -97,7 +95,7 @@ class Profile(HasCredentials):
self,
profile_name: str,
target_name: str,
config: UserConfig,
user_config: UserConfig,
threads: int,
credentials: Credentials
):
@@ -106,7 +104,7 @@ class Profile(HasCredentials):
"""
self.profile_name = profile_name
self.target_name = target_name
self.config = config
self.user_config = user_config
self.threads = threads
self.credentials = credentials
@@ -124,12 +122,12 @@ class Profile(HasCredentials):
result = {
'profile_name': self.profile_name,
'target_name': self.target_name,
'config': self.config,
'user_config': self.user_config,
'threads': self.threads,
'credentials': self.credentials,
}
if serialize_credentials:
result['config'] = self.config.to_dict(omit_none=True)
result['user_config'] = self.user_config.to_dict(omit_none=True)
result['credentials'] = self.credentials.to_dict(omit_none=True)
return result
@@ -143,7 +141,7 @@ class Profile(HasCredentials):
'name': self.target_name,
'target_name': self.target_name,
'profile_name': self.profile_name,
'config': self.config.to_dict(omit_none=True),
'config': self.user_config.to_dict(omit_none=True),
})
return target
@@ -238,7 +236,7 @@ class Profile(HasCredentials):
threads: int,
profile_name: str,
target_name: str,
user_cfg: Optional[Dict[str, Any]] = None
user_config: Optional[Dict[str, Any]] = None
) -> 'Profile':
"""Create a profile from an existing set of Credentials and the
remaining information.
@@ -247,20 +245,20 @@ class Profile(HasCredentials):
:param threads: The number of threads to use for connections.
:param profile_name: The profile name used for this profile.
:param target_name: The target name used for this profile.
:param user_cfg: The user-level config block from the
:param user_config: The user-level config block from the
raw profiles, if specified.
:raises DbtProfileError: If the profile is invalid.
:returns: The new Profile object.
"""
if user_cfg is None:
user_cfg = {}
UserConfig.validate(user_cfg)
config = UserConfig.from_dict(user_cfg)
if user_config is None:
user_config = {}
UserConfig.validate(user_config)
user_config_obj: UserConfig = UserConfig.from_dict(user_config)
profile = cls(
profile_name=profile_name,
target_name=target_name,
config=config,
user_config=user_config_obj,
threads=threads,
credentials=credentials
)
@@ -313,7 +311,7 @@ class Profile(HasCredentials):
raw_profile: Dict[str, Any],
profile_name: str,
renderer: ProfileRenderer,
user_cfg: Optional[Dict[str, Any]] = None,
user_config: Optional[Dict[str, Any]] = None,
target_override: Optional[str] = None,
threads_override: Optional[int] = None,
) -> 'Profile':
@@ -325,7 +323,7 @@ class Profile(HasCredentials):
disk as yaml and its values rendered with jinja.
:param profile_name: The profile name used.
:param renderer: The config renderer.
:param user_cfg: The global config for the user, if it
:param user_config: The global config for the user, if it
was present.
:param target_override: The target to use, if provided on
the command line.
@@ -335,9 +333,9 @@ class Profile(HasCredentials):
target could not be found
:returns: The new Profile object.
"""
# user_cfg is not rendered.
if user_cfg is None:
user_cfg = raw_profile.get('config')
# user_config is not rendered.
if user_config is None:
user_config = raw_profile.get('config')
# TODO: should it be, and the values coerced to bool?
target_name, profile_data = cls.render_profile(
raw_profile, profile_name, target_override, renderer
@@ -358,7 +356,7 @@ class Profile(HasCredentials):
profile_name=profile_name,
target_name=target_name,
threads=threads,
user_cfg=user_cfg
user_config=user_config
)
@classmethod
@@ -401,13 +399,13 @@ class Profile(HasCredentials):
error_string=msg
)
)
user_cfg = raw_profiles.get('config')
user_config = raw_profiles.get('config')
return cls.from_raw_profile_info(
raw_profile=raw_profile,
profile_name=profile_name,
renderer=renderer,
user_cfg=user_cfg,
user_config=user_config,
target_override=target_override,
threads_override=threads_override,
)

View File

@@ -12,6 +12,7 @@ from .profile import Profile
from .project import Project
from .renderer import DbtProjectYamlRenderer, ProfileRenderer
from .utils import parse_cli_vars
from dbt import flags
from dbt import tracking
from dbt.adapters.factory import get_relation_class_by_name, get_include_paths
from dbt.helper_types import FQNPath, PathSet
@@ -117,7 +118,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
unrendered=project.unrendered,
profile_name=profile.profile_name,
target_name=profile.target_name,
config=profile.config,
user_config=profile.user_config,
threads=profile.threads,
credentials=profile.credentials,
args=args,
@@ -144,7 +145,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
project = Project.from_project_root(
project_root,
renderer,
verify_version=getattr(self.args, 'version_check', False),
verify_version=bool(flags.VERSION_CHECK),
)
cfg = self.from_parts(
@@ -197,7 +198,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
) -> Tuple[Project, Profile]:
# profile_name from the project
project_root = args.project_dir if args.project_dir else os.getcwd()
version_check = getattr(args, 'version_check', False)
version_check = bool(flags.VERSION_CHECK)
partial = Project.partial_load(
project_root,
verify_version=version_check
@@ -416,7 +417,7 @@ class UnsetConfig(UserConfig):
class UnsetProfile(Profile):
def __init__(self):
self.credentials = UnsetCredentials()
self.config = UnsetConfig()
self.user_config = UnsetConfig()
self.profile_name = ''
self.target_name = ''
self.threads = -1
@@ -513,7 +514,7 @@ class UnsetProfileConfig(RuntimeConfig):
unrendered=project.unrendered,
profile_name='',
target_name='',
config=UnsetConfig(),
user_config=UnsetConfig(),
threads=getattr(args, 'threads', 1),
credentials=UnsetCredentials(),
args=args,

View File

@@ -526,8 +526,6 @@ class BaseContext(metaclass=ContextMeta):
The list of valid flags are:
- `flags.STRICT_MODE`: True if `--strict` (or `-S`) was provided on the
command line
- `flags.FULL_REFRESH`: True if `--full-refresh` was provided on the
command line
- `flags.NON_DESTRUCTIVE`: True if `--non-destructive` was provided on

View File

@@ -186,14 +186,11 @@ class UserConfigContract(Protocol):
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
def set_values(self, cookie_dir: str) -> None:
...
class HasCredentials(Protocol):
credentials: Credentials
profile_name: str
config: UserConfigContract
user_config: UserConfigContract
target_name: str
threads: int

View File

@@ -223,9 +223,7 @@ class ManifestMetadata(BaseArtifactMetadata):
self.user_id = tracking.active_user.id
if self.send_anonymous_usage_stats is None:
self.send_anonymous_usage_stats = (
not tracking.active_user.do_not_track
)
self.send_anonymous_usage_stats = flags.SEND_ANONYMOUS_USAGE_STATS
@classmethod
def default(cls):

View File

@@ -156,13 +156,6 @@ class ParsedNodeMixins(dbtClassMixin):
self.columns = patch.columns
self.meta = patch.meta
self.docs = patch.docs
if flags.STRICT_MODE:
# It seems odd that an instance can be invalid
# Maybe there should be validation or restrictions
# elsewhere?
assert isinstance(self, dbtClassMixin)
dct = self.to_dict(omit_none=False)
self.validate(dct)
def get_materialization(self):
return self.config.materialized
@@ -509,11 +502,6 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID):
self.meta = patch.meta
self.docs = patch.docs
self.arguments = patch.arguments
if flags.STRICT_MODE:
# What does this actually validate?
assert isinstance(self, dbtClassMixin)
dct = self.to_dict(omit_none=False)
self.validate(dct)
def same_contents(self, other: Optional['ParsedMacro']) -> bool:
if other is None:

View File

@@ -1,9 +1,7 @@
from dbt.contracts.util import Replaceable, Mergeable, list_str
from dbt.contracts.connection import UserConfigContract, QueryComment
from dbt.contracts.connection import QueryComment, UserConfigContract
from dbt.helper_types import NoValue
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from dbt import tracking
from dbt import ui
from dbt.dataclass_schema import (
dbtClassMixin, ValidationError,
HyphenatedDbtClassMixin,
@@ -230,25 +228,20 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
use_colors: Optional[bool] = None
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
def set_values(self, cookie_dir):
if self.send_anonymous_usage_stats:
tracking.initialize_tracking(cookie_dir)
else:
tracking.do_not_track()
if self.use_colors is not None:
ui.use_colors(self.use_colors)
if self.printer_width:
ui.printer_width(self.printer_width)
write_json: Optional[bool] = None
warn_error: Optional[bool] = None
log_format: Optional[bool] = None
debug: Optional[bool] = None
version_check: Optional[bool] = None
fail_fast: Optional[bool] = None
use_experimental_parser: Optional[bool] = None
@dataclass
class ProfileConfig(HyphenatedDbtClassMixin, Replaceable):
profile_name: str = field(metadata={'preserve_underscore': True})
target_name: str = field(metadata={'preserve_underscore': True})
config: UserConfig
user_config: UserConfig = field(metadata={'preserve_underscore': True})
threads: int
# TODO: make this a dynamic union of some kind?
credentials: Optional[Dict[str, Any]]

View File

@@ -57,22 +57,6 @@ class DispatchPackagesDeprecation(DBTDeprecation):
'''
class MaterializationReturnDeprecation(DBTDeprecation):
_name = 'materialization-return'
_description = '''\
The materialization ("{materialization}") did not explicitly return a list
of relations to add to the cache. By default the target relation will be
added, but this behavior will be removed in a future version of dbt.
For more information, see:
https://docs.getdbt.com/v0.15/docs/creating-new-materializations#section-6-returning-relations
'''
class NotADictionaryDeprecation(DBTDeprecation):
_name = 'not-a-dictionary'
@@ -178,7 +162,6 @@ active_deprecations: Set[str] = set()
deprecations_list: List[DBTDeprecation] = [
DispatchPackagesDeprecation(),
MaterializationReturnDeprecation(),
NotADictionaryDeprecation(),
ColumnQuotingDeprecation(),
ModelsKeyNonModelDeprecation(),

View File

@@ -6,19 +6,47 @@ if os.name != 'nt':
from pathlib import Path
from typing import Optional
# initially all flags are set to None, the on-load call of reset() will set
# them for their first time.
STRICT_MODE = None
FULL_REFRESH = None
USE_CACHE = None
WARN_ERROR = None
TEST_NEW_PARSER = None
# PROFILES_DIR must be set before the other flags
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser('~'), '.dbt')
PROFILES_DIR = os.path.expanduser(
os.getenv('DBT_PROFILES_DIR', DEFAULT_PROFILES_DIR)
)
STRICT_MODE = False # Only here for backwards compatibility
FULL_REFRESH = False # subcommand
STORE_FAILURES = False # subcommand
GREEDY = None # subcommand
# Global CLI commands
USE_EXPERIMENTAL_PARSER = None
WARN_ERROR = None
WRITE_JSON = None
PARTIAL_PARSE = None
USE_COLORS = None
STORE_FAILURES = None
GREEDY = None
DEBUG = None
LOG_FORMAT = None
VERSION_CHECK = None
FAIL_FAST = None
SEND_ANONYMOUS_USAGE_STATS = None
PRINTER_WIDTH = 80
# Global CLI defaults. These flags are set from three places:
# CLI args, environment variables, and user_config (profiles.yml).
# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR
flag_defaults = {
"USE_EXPERIMENTAL_PARSER": False,
"WARN_ERROR": False,
"WRITE_JSON": True,
"PARTIAL_PARSE": False,
"USE_COLORS": True,
"PROFILES_DIR": DEFAULT_PROFILES_DIR,
"DEBUG": False,
"LOG_FORMAT": None,
"VERSION_CHECK": True,
"FAIL_FAST": False,
"SEND_ANONYMOUS_USAGE_STATS": True,
"PRINTER_WIDTH": 80
}
def env_set_truthy(key: str) -> Optional[str]:
@@ -31,6 +59,12 @@ def env_set_truthy(key: str) -> Optional[str]:
return value
def env_set_bool(env_value):
if env_value in ('1', 't', 'true', 'y', 'yes'):
return True
return False
def env_set_path(key: str) -> Optional[Path]:
value = os.getenv(key)
if value is None:
@@ -51,58 +85,72 @@ def _get_context():
return multiprocessing.get_context('spawn')
# This is not a flag, it's a place to store the lock
MP_CONTEXT = _get_context()
def reset():
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
STORE_FAILURES, GREEDY
STRICT_MODE = False
FULL_REFRESH = False
USE_CACHE = True
WARN_ERROR = False
TEST_NEW_PARSER = False
USE_EXPERIMENTAL_PARSER = False
WRITE_JSON = True
PARTIAL_PARSE = False
MP_CONTEXT = _get_context()
USE_COLORS = True
STORE_FAILURES = False
GREEDY = False
def set_from_args(args):
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
STORE_FAILURES, GREEDY
USE_CACHE = getattr(args, 'use_cache', USE_CACHE)
def set_from_args(args, user_config):
global STRICT_MODE, FULL_REFRESH, WARN_ERROR, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, USE_COLORS, \
STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT, GREEDY, \
VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, PRINTER_WIDTH
STRICT_MODE = False # backwards compatibility
# cli args without user_config or env var option
FULL_REFRESH = getattr(args, 'full_refresh', FULL_REFRESH)
STRICT_MODE = getattr(args, 'strict', STRICT_MODE)
WARN_ERROR = (
STRICT_MODE or
getattr(args, 'warn_error', STRICT_MODE or WARN_ERROR)
)
TEST_NEW_PARSER = getattr(args, 'test_new_parser', TEST_NEW_PARSER)
USE_EXPERIMENTAL_PARSER = getattr(args, 'use_experimental_parser', USE_EXPERIMENTAL_PARSER)
WRITE_JSON = getattr(args, 'write_json', WRITE_JSON)
PARTIAL_PARSE = getattr(args, 'partial_parse', None)
MP_CONTEXT = _get_context()
# The use_colors attribute will always have a value because it is assigned
# None by default from the add_mutually_exclusive_group function
use_colors_override = getattr(args, 'use_colors')
if use_colors_override is not None:
USE_COLORS = use_colors_override
STORE_FAILURES = getattr(args, 'store_failures', STORE_FAILURES)
GREEDY = getattr(args, 'greedy', GREEDY)
# global cli flags with env var and user_config alternatives
USE_EXPERIMENTAL_PARSER = get_flag_value('USE_EXPERIMENTAL_PARSER', args, user_config)
WARN_ERROR = get_flag_value('WARN_ERROR', args, user_config)
WRITE_JSON = get_flag_value('WRITE_JSON', args, user_config)
PARTIAL_PARSE = get_flag_value('PARTIAL_PARSE', args, user_config)
USE_COLORS = get_flag_value('USE_COLORS', args, user_config)
DEBUG = get_flag_value('DEBUG', args, user_config)
LOG_FORMAT = get_flag_value('LOG_FORMAT', args, user_config)
VERSION_CHECK = get_flag_value('VERSION_CHECK', args, user_config)
FAIL_FAST = get_flag_value('FAIL_FAST', args, user_config)
SEND_ANONYMOUS_USAGE_STATS = get_flag_value('SEND_ANONYMOUS_USAGE_STATS', args, user_config)
PRINTER_WIDTH = get_flag_value('PRINTER_WIDTH', args, user_config)
# initialize everything to the defaults on module load
reset()
def get_flag_value(flag, args, user_config):
lc_flag = flag.lower()
flag_value = getattr(args, lc_flag, None)
if flag_value is None:
# Environment variables use pattern 'DBT_{flag name}'
env_flag = f"DBT_{flag}"
env_value = os.getenv(env_flag)
if env_value is not None and env_value != '':
env_value = env_value.lower()
# non Boolean values
if flag in ['LOG_FORMAT', 'PRINTER_WIDTH']:
flag_value = env_value
else:
flag_value = env_set_bool(env_value)
elif user_config is not None and getattr(user_config, lc_flag, None) is not None:
flag_value = getattr(user_config, lc_flag)
else:
flag_value = flag_defaults[flag]
if flag == 'PRINTER_WIDTH': # printer_width must be an int or it hangs
flag_value = int(flag_value)
return flag_value
def get_flag_dict():
return {
"use_experimental_parser": USE_EXPERIMENTAL_PARSER,
"warn_error": WARN_ERROR,
"write_json": WRITE_JSON,
"partial_parse": PARTIAL_PARSE,
"use_colors": USE_COLORS,
"profiles_dir": PROFILES_DIR,
"debug": DEBUG,
"log_format": LOG_FORMAT,
"version_check": VERSION_CHECK,
"fail_fast": FAIL_FAST,
"send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS,
"printer_width": PRINTER_WIDTH,
}

View File

@@ -51,7 +51,7 @@
{% endmacro %}
{% macro get_batch_size() -%}
{{ adapter.dispatch('get_batch_size', 'dbt')() }}
{{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}
{%- endmacro %}
{% macro default__get_batch_size() %}

View File

@@ -33,7 +33,7 @@ from dbt.adapters.factory import reset_adapters, cleanup_connections
import dbt.tracking
from dbt.utils import ExitCodes
from dbt.config import PROFILES_DIR, read_user_config
from dbt.config.profile import DEFAULT_PROFILES_DIR, read_user_config
from dbt.exceptions import RuntimeException, InternalException
@@ -160,17 +160,6 @@ def handle(args):
return res
def initialize_config_values(parsed):
"""Given the parsed args, initialize the dbt tracking code.
It would be nice to re-use this profile later on instead of parsing it
twice, but dbt's intialization is not structured in a way that makes that
easy.
"""
cfg = read_user_config(parsed.profiles_dir)
cfg.set_values(parsed.profiles_dir)
@contextmanager
def adapter_management():
reset_adapters()
@@ -184,8 +173,15 @@ def handle_and_check(args):
with log_manager.applicationbound():
parsed = parse_args(args)
# we've parsed the args - we can now decide if we're debug or not
if parsed.debug:
# Set flags from args, user config, and env vars
user_config = read_user_config(parsed.profiles_dir) # This is read again later
flags.set_from_args(parsed, user_config)
dbt.tracking.initialize_from_flags()
# Set log_format from flags
parsed.cls.set_log_format()
# we've parsed the args and set the flags - we can now decide if we're debug or not
if flags.DEBUG:
log_manager.set_debug()
profiler_enabled = False
@@ -198,8 +194,6 @@ def handle_and_check(args):
outfile=parsed.record_timing_info
):
initialize_config_values(parsed)
with adapter_management():
task, res = run_from_args(parsed)
@@ -233,15 +227,17 @@ def track_run(task):
def run_from_args(parsed):
log_cache_events(getattr(parsed, 'log_cache_events', False))
flags.set_from_args(parsed)
parsed.cls.pre_init_hook(parsed)
# we can now use the logger for stdout
# set log_format in the logger
parsed.cls.pre_init_hook(parsed)
logger.info("Running with dbt{}".format(dbt.version.installed))
# this will convert DbtConfigErrors into RuntimeExceptions
# task could be any one of the task objects
task = parsed.cls.from_args(args=parsed)
logger.debug("running dbt with arguments {parsed}", parsed=str(parsed))
log_path = None
@@ -275,11 +271,12 @@ def _build_base_subparser():
base_subparser.add_argument(
'--profiles-dir',
default=PROFILES_DIR,
default=None,
dest='sub_profiles_dir', # Main cli arg precedes subcommand
type=str,
help='''
Which directory to look in for the profiles.yml file. Default = {}
'''.format(PROFILES_DIR)
'''.format(DEFAULT_PROFILES_DIR)
)
base_subparser.add_argument(
@@ -319,15 +316,6 @@ def _build_base_subparser():
help=argparse.SUPPRESS,
)
base_subparser.add_argument(
'--bypass-cache',
action='store_false',
dest='use_cache',
help='''
If set, bypass the adapter-level cache of database state
''',
)
base_subparser.set_defaults(defer=None, state=None)
return base_subparser
@@ -394,6 +382,7 @@ def _build_build_subparser(subparsers, base_subparser):
sub.add_argument(
'-x',
'--fail-fast',
dest='sub_fail_fast',
action='store_true',
help='''
Stop execution upon a first failure.
@@ -531,6 +520,7 @@ def _build_run_subparser(subparsers, base_subparser):
run_sub.add_argument(
'-x',
'--fail-fast',
dest='sub_fail_fast',
action='store_true',
help='''
Stop execution upon a first failure.
@@ -654,8 +644,9 @@ def _add_table_mutability_arguments(*subparsers):
def _add_version_check(sub):
sub.add_argument(
'--no-version-check',
dest='version_check',
dest='sub_version_check', # main cli arg precedes subcommands
action='store_false',
default=None,
help='''
If set, skip ensuring dbt's version matches the one specified in
the dbt_project.yml file ('require-dbt-version')
@@ -749,6 +740,7 @@ def _build_test_subparser(subparsers, base_subparser):
sub.add_argument(
'-x',
'--fail-fast',
dest='sub_fail_fast',
action='store_true',
help='''
Stop execution upon a first test failure.
@@ -972,6 +964,7 @@ def parse_args(args, cls=DBTArgumentParser):
'-d',
'--debug',
action='store_true',
default=None,
help='''
Display debug logging during dbt execution. Useful for debugging and
making bug reports.
@@ -981,13 +974,14 @@ def parse_args(args, cls=DBTArgumentParser):
p.add_argument(
'--log-format',
choices=['text', 'json', 'default'],
default='default',
default=None,
help='''Specify the log format, overriding the command's default.'''
)
p.add_argument(
'--no-write-json',
action='store_false',
default=None,
dest='write_json',
help='''
If set, skip writing the manifest and run_results.json files to disk
@@ -998,6 +992,7 @@ def parse_args(args, cls=DBTArgumentParser):
'--use-colors',
action='store_const',
const=True,
default=None,
dest='use_colors',
help='''
Colorize the output DBT prints to the terminal. Output is colorized by
@@ -1019,18 +1014,17 @@ def parse_args(args, cls=DBTArgumentParser):
)
p.add_argument(
'-S',
'--strict',
action='store_true',
'--printer-width',
dest='printer_width',
help='''
Run schema validations at runtime. This will surface bugs in dbt, but
may incur a performance penalty.
Sets the width of terminal output
'''
)
p.add_argument(
'--warn-error',
action='store_true',
default=None,
help='''
If dbt would normally warn, instead raise an exception. Examples
include --models that selects nothing, deprecations, configurations
@@ -1039,6 +1033,17 @@ def parse_args(args, cls=DBTArgumentParser):
'''
)
p.add_argument(
'--no-version-check',
dest='version_check',
action='store_false',
default=None,
help='''
If set, skip ensuring dbt's version matches the one specified in
the dbt_project.yml file ('require-dbt-version')
'''
)
p.add_optional_argument_inverse(
'--partial-parse',
enable_help='''
@@ -1061,26 +1066,48 @@ def parse_args(args, cls=DBTArgumentParser):
help=argparse.SUPPRESS,
)
# if set, extract all models and blocks with the jinja block extractor, and
# verify that we don't fail anywhere the actual jinja parser passes. The
# reverse (passing files that ends up failing jinja) is fine.
# TODO remove?
p.add_argument(
'--test-new-parser',
action='store_true',
help=argparse.SUPPRESS
)
# if set, will use the tree-sitter-jinja2 parser and extractor instead of
# jinja rendering when possible.
p.add_argument(
'--use-experimental-parser',
action='store_true',
default=None,
help='''
Uses an experimental parser to extract jinja values.
'''
)
p.add_argument(
'--profiles-dir',
default=None,
dest='profiles_dir',
type=str,
help='''
Which directory to look in for the profiles.yml file. Default = {}
'''.format(DEFAULT_PROFILES_DIR)
)
p.add_argument(
'--no-anonymous-usage-stats',
action='store_false',
default=None,
dest='send_anonymous_usage_stats',
help='''
Do not send anonymous usage stat to dbt Labs
'''
)
p.add_argument(
'-x',
'--fail-fast',
dest='fail_fast',
action='store_true',
default=None,
help='''
Stop execution upon a first failure.
'''
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = _build_base_subparser()
@@ -1128,8 +1155,28 @@ def parse_args(args, cls=DBTArgumentParser):
parsed = p.parse_args(args)
if hasattr(parsed, 'profiles_dir'):
# profiles_dir is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_profiles_dir'):
if parsed.sub_profiles_dir is not None:
parsed.profiles_dir = parsed.sub_profiles_dir
delattr(parsed, 'sub_profiles_dir')
if hasattr(parsed, 'profiles_dir') and parsed.profiles_dir is not None:
parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)
# needs to be set before the other flags, because it's needed to
# read the profile that contains them
flags.PROFILES_DIR = parsed.profiles_dir
# version_check is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_version_check'):
if parsed.sub_version_check is False:
parsed.version_check = False
delattr(parsed, 'sub_version_check')
# fail_fast is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_fail_fast'):
if parsed.sub_fail_fast is True:
parsed.fail_fast = True
delattr(parsed, 'sub_fail_fast')
if getattr(parsed, 'project_dir', None) is not None:
expanded_user = os.path.expanduser(parsed.project_dir)

View File

@@ -64,7 +64,6 @@ from dbt.dataclass_schema import StrEnum, dbtClassMixin
PARTIAL_PARSE_FILE_NAME = 'partial_parse.msgpack'
PARSING_STATE = DbtProcessState('parsing')
DEFAULT_PARTIAL_PARSE = False
class ReparseReason(StrEnum):
@@ -265,7 +264,7 @@ class ManifestLoader:
self.manifest._parsing_info = ParsingInfo()
if skip_parsing:
logger.info("Partial parsing enabled, no changes found, skipping parsing")
logger.debug("Partial parsing enabled, no changes found, skipping parsing")
else:
# Load Macros
# We need to parse the macros first, so they're resolvable when
@@ -539,18 +538,8 @@ class ManifestLoader:
reparse_reason = ReparseReason.project_config_changed
return valid, reparse_reason
def _partial_parse_enabled(self):
# if the CLI is set, follow that
if flags.PARTIAL_PARSE is not None:
return flags.PARTIAL_PARSE
# if the config is set, follow that
elif self.root_project.config.partial_parse is not None:
return self.root_project.config.partial_parse
else:
return DEFAULT_PARTIAL_PARSE
def read_manifest_for_partial_parse(self) -> Optional[Manifest]:
if not self._partial_parse_enabled():
if not flags.PARTIAL_PARSE:
logger.debug('Partial parsing not enabled')
return None
path = os.path.join(self.root_project.target_path,
@@ -577,7 +566,7 @@ class ManifestLoader:
)
reparse_reason = ReparseReason.load_file_failure
else:
logger.info(f"Unable to do partial parsing because {path} not found")
logger.info("Partial parse save file not found. Starting full parse.")
reparse_reason = ReparseReason.file_not_found
# this event is only fired if a full reparse is needed
@@ -587,7 +576,7 @@ class ManifestLoader:
def build_perf_info(self):
mli = ManifestLoaderInfo(
is_partial_parse_enabled=self._partial_parse_enabled(),
is_partial_parse_enabled=flags.PARTIAL_PARSE,
is_static_analysis_enabled=flags.USE_EXPERIMENTAL_PARSER
)
for project in self.all_projects.values():

View File

@@ -105,10 +105,10 @@ class PartialParsing:
}
if changed_or_deleted_macro_file:
self.macro_child_map = self.saved_manifest.build_macro_child_map()
logger.info(f"Partial parsing enabled: "
f"{len(deleted) + len(deleted_schema_files)} files deleted, "
f"{len(added)} files added, "
f"{len(changed) + len(changed_schema_files)} files changed.")
logger.debug(f"Partial parsing enabled: "
f"{len(deleted) + len(deleted_schema_files)} files deleted, "
f"{len(added)} files added, "
f"{len(changed) + len(changed_schema_files)} files changed.")
self.file_diff = file_diff
# generate the list of files that need parsing

View File

@@ -67,15 +67,16 @@ class BootstrapProcess(dbt.flags.MP_CONTEXT.Process):
keeps everything in memory.
"""
# reset flags
dbt.flags.set_from_args(self.task.args)
user_config = None
if self.task.config is not None:
user_config = self.task.config.user_config
dbt.flags.set_from_args(self.task.args, user_config)
dbt.tracking.initialize_from_flags()
# reload the active plugin
load_plugin(self.task.config.credentials.type)
# register it
register_adapter(self.task.config)
# reset tracking, etc
self.task.config.config.set_values(self.task.args.profiles_dir)
def task_exec(self) -> None:
"""task_exec runs first inside the child process"""
if type(self.task) != RemoteListTask:

View File

@@ -7,6 +7,7 @@ from typing import Type, Union, Dict, Any, Optional
from dbt import tracking
from dbt import ui
from dbt import flags
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.results import (
NodeStatus, RunResult, collect_timing_info, RunStatus
@@ -21,7 +22,7 @@ from .printer import print_skip_caused_by_error, print_skip_line
from dbt.adapters.factory import register_adapter
from dbt.config import RuntimeConfig, Project
from dbt.config.profile import read_profile, PROFILES_DIR
from dbt.config.profile import read_profile
import dbt.exceptions
@@ -34,7 +35,7 @@ class NoneConfig:
def read_profiles(profiles_dir=None):
"""This is only used for some error handling"""
if profiles_dir is None:
profiles_dir = PROFILES_DIR
profiles_dir = flags.PROFILES_DIR
raw_profiles = read_profile(profiles_dir)
@@ -69,6 +70,13 @@ class BaseTask(metaclass=ABCMeta):
else:
log_manager.format_text()
@classmethod
def set_log_format(cls):
if flags.LOG_FORMAT == 'json':
log_manager.format_json()
else:
log_manager.format_text()
@classmethod
def from_args(cls, args):
try:

View File

@@ -5,10 +5,11 @@ import sys
from typing import Optional, Dict, Any, List
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import flags
import dbt.clients.system
import dbt.exceptions
from dbt.adapters.factory import get_adapter, register_adapter
from dbt.config import Project, Profile, PROFILES_DIR
from dbt.config import Project, Profile
from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer
from dbt.config.utils import parse_cli_vars
from dbt.context.base import generate_base_context
@@ -69,7 +70,7 @@ class QueryCommentedProfile(Profile):
class DebugTask(BaseTask):
def __init__(self, args, config):
super().__init__(args, config)
self.profiles_dir = getattr(self.args, 'profiles_dir', PROFILES_DIR)
self.profiles_dir = flags.PROFILES_DIR
self.profile_path = os.path.join(self.profiles_dir, 'profiles.yml')
try:
self.project_dir = get_nearest_project_dir(self.args)
@@ -156,7 +157,7 @@ class DebugTask(BaseTask):
self.project = Project.from_project_root(
self.project_dir,
renderer,
verify_version=getattr(self.args, 'version_check', False),
verify_version=flags.VERSION_CHECK,
)
except dbt.exceptions.DbtConfigError as exc:
self.project_fail_details = str(exc)
@@ -195,7 +196,7 @@ class DebugTask(BaseTask):
try:
partial = Project.partial_load(
os.path.dirname(self.project_path),
verify_version=getattr(self.args, 'version_check', False),
verify_version=bool(flags.VERSION_CHECK),
)
renderer = DbtProjectYamlRenderer(
generate_base_context(self.cli_vars)

View File

@@ -3,6 +3,7 @@ import shutil
import dbt.config
import dbt.clients.system
from dbt import flags
from dbt.version import _get_adapter_plugin_names
from dbt.adapters.factory import load_plugin, get_include_paths
@@ -93,7 +94,7 @@ class InitTask(BaseTask):
except StopIteration:
logger.debug("No adapters installed, skipping")
profiles_dir = dbt.config.PROFILES_DIR
profiles_dir = flags.PROFILES_DIR
profiles_file = os.path.join(profiles_dir, 'profiles.yml')
self.create_profiles_dir(profiles_dir)

View File

@@ -30,8 +30,8 @@ def print_fancy_output_line(
progress=progress,
message=msg)
truncate_width = ui.PRINTER_WIDTH - 3
justified = prefix.ljust(ui.PRINTER_WIDTH, ".")
truncate_width = ui.printer_width() - 3
justified = prefix.ljust(ui.printer_width(), ".")
if truncate and len(justified) > truncate_width:
justified = justified[:truncate_width] + '...'

View File

@@ -16,7 +16,6 @@ from .printer import (
get_counts,
)
from dbt import deprecations
from dbt import tracking
from dbt import utils
from dbt.adapters.base import BaseRelation
@@ -209,11 +208,12 @@ class ModelRunner(CompileRunner):
self, result: Any, model
) -> List[BaseRelation]:
if isinstance(result, str):
deprecations.warn('materialization-return',
materialization=model.get_materialization())
return [
self.adapter.Relation.create_from(self.config, model)
]
msg = (
'The materialization ("{}") did not explicitly return a '
'list of relations to add to the cache.'
.format(str(model.get_materialization()))
)
raise CompilationException(msg, node=model)
if isinstance(result, dict):
return _validate_materialization_relations_dict(result, model)

View File

@@ -214,7 +214,7 @@ class GraphRunnableTask(ManifestTask):
logger.debug('Finished running node {}'.format(
runner.node.unique_id))
fail_fast = getattr(self.config.args, 'fail_fast', False)
fail_fast = flags.FAIL_FAST
if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast:
self._raise_next_tick = FailFastException(
@@ -281,7 +281,7 @@ class GraphRunnableTask(ManifestTask):
self._submit(pool, args, callback)
# block on completion
if getattr(self.config.args, 'fail_fast', False):
if flags.FAIL_FAST:
# checkout for an errors after task completion in case of
# fast failure
while self.job_queue.wait_until_something_was_done():
@@ -571,7 +571,11 @@ class GraphRunnableTask(ManifestTask):
)
def args_to_dict(self):
var_args = vars(self.args)
var_args = vars(self.args).copy()
# update the args with the flags, which could also come from environment
# variables or user_config
flag_dict = flags.get_flag_dict()
var_args.update(flag_dict)
dict_args = {}
# remove args keys that clutter up the dictionary
for key in var_args:
@@ -579,10 +583,11 @@ class GraphRunnableTask(ManifestTask):
continue
if var_args[key] is None:
continue
# TODO: add more default_false_keys
default_false_keys = (
'debug', 'full_refresh', 'fail_fast', 'warn_error',
'single_threaded', 'test_new_parser', 'log_cache_events',
'strict'
'single_threaded', 'log_cache_events',
'use_experimental_parser',
)
if key in default_false_keys and var_args[key] is False:
continue

View File

@@ -5,6 +5,7 @@ from dbt.clients.yaml_helper import ( # noqa:F401
)
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import version as dbt_version
from dbt import flags
from snowplow_tracker import Subject, Tracker, Emitter, logger as sp_logger
from snowplow_tracker import SelfDescribingJson
from datetime import datetime
@@ -184,7 +185,6 @@ def get_invocation_context(user, config, args):
"command": args.which,
"options": None,
"version": str(dbt_version.installed),
"run_type": get_run_type(args),
"adapter_type": adapter_type,
"adapter_unique_id": adapter_unique_id,
@@ -509,3 +509,11 @@ class InvocationProcessor(logbook.Processor):
"run_started_at": active_user.run_started_at.isoformat(),
"invocation_id": active_user.invocation_id,
})
def initialize_from_flags():
# Setting these used to be in UserConfig, but had to be moved here
if flags.SEND_ANONYMOUS_USAGE_STATS:
initialize_tracking(flags.PROFILES_DIR)
else:
do_not_track()

View File

@@ -17,17 +17,6 @@ COLOR_FG_GREEN = COLORS['green']
COLOR_FG_YELLOW = COLORS['yellow']
COLOR_RESET_ALL = COLORS['reset_all']
PRINTER_WIDTH = 80
def use_colors(use_colors_val=True):
flags.USE_COLORS = use_colors_val
def printer_width(printer_width):
global PRINTER_WIDTH
PRINTER_WIDTH = printer_width
def color(text: str, color_code: str):
if flags.USE_COLORS:
@@ -36,6 +25,12 @@ def color(text: str, color_code: str):
return text
def printer_width():
if flags.PRINTER_WIDTH:
return flags.PRINTER_WIDTH
return 80
def green(text: str):
return color(text, COLOR_FG_GREEN)
@@ -56,7 +51,7 @@ def line_wrap_message(
newlines to newlines and avoid calling textwrap.fill() on them (like
markdown)
'''
width = PRINTER_WIDTH - subtract
width = printer_width() - subtract
if dedent:
msg = textwrap.dedent(msg)

View File

@@ -96,5 +96,5 @@ def _get_dbt_plugins_info():
yield plugin_name, mod.version
__version__ = '0.21.0b2'
__version__ = '0.21.0rc1'
installed = get_installed_version()

View File

@@ -284,12 +284,12 @@ def parse_args(argv=None):
parser.add_argument('adapter')
parser.add_argument('--title-case', '-t', default=None)
parser.add_argument('--dependency', action='append')
parser.add_argument('--dbt-core-version', default='0.21.0b2')
parser.add_argument('--dbt-core-version', default='0.21.0rc1')
parser.add_argument('--email')
parser.add_argument('--author')
parser.add_argument('--url')
parser.add_argument('--sql', action='store_true')
parser.add_argument('--package-version', default='0.21.0b2')
parser.add_argument('--package-version', default='0.21.0rc1')
parser.add_argument('--project-version', default='1.0')
parser.add_argument(
'--no-dependency', action='store_false', dest='set_dependency'

View File

@@ -24,7 +24,7 @@ def read(fname):
package_name = "dbt-core"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """dbt (data build tool) is a command line tool that helps \
analysts and engineers transform data in their warehouse more effectively"""

View File

@@ -0,0 +1,75 @@
agate==1.6.1
asn1crypto==1.4.0
attrs==21.2.0
azure-common==1.1.27
azure-core==1.18.0
azure-storage-blob==12.8.1
Babel==2.9.1
boto3==1.18.44
botocore==1.21.44
cachetools==4.2.2
certifi==2021.5.30
cffi==1.14.6
chardet==4.0.0
charset-normalizer==2.0.6
colorama==0.4.4
cryptography==3.4.8
google-api-core==1.31.2
google-auth==1.35.0
google-cloud-bigquery==2.26.0
google-cloud-core==1.7.2
google-crc32c==1.1.2
google-resumable-media==2.0.2
googleapis-common-protos==1.53.0
grpcio==1.40.0
hologram==0.0.14
idna==3.2
importlib-metadata==4.8.1
isodate==0.6.0
jeepney==0.7.1
Jinja2==2.11.3
jmespath==0.10.0
json-rpc==1.13.0
jsonschema==3.1.1
keyring==21.8.0
leather==0.3.3
Logbook==1.5.3
MarkupSafe==2.0.1
mashumaro==2.5
minimal-snowplow-tracker==0.0.2
msgpack==1.0.2
msrest==0.6.21
networkx==2.6.3
oauthlib==3.1.1
oscrypto==1.2.1
packaging==20.9
parsedatetime==2.6
proto-plus==1.19.0
protobuf==3.18.0
psycopg2-binary==2.9.1
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.20
pycryptodomex==3.10.1
PyJWT==2.1.0
pyOpenSSL==20.0.1
pyparsing==2.4.7
pyrsistent==0.18.0
python-dateutil==2.8.2
python-slugify==5.0.2
pytimeparse==1.1.8
pytz==2021.1
PyYAML==5.4.1
requests==2.26.0
requests-oauthlib==1.3.0
rsa==4.7.2
s3transfer==0.5.0
SecretStorage==3.3.1
six==1.16.0
snowflake-connector-python==2.5.1
sqlparse==0.4.2
text-unidecode==1.3
typing-extensions==3.10.0.2
urllib3==1.26.6
Werkzeug==2.0.1
zipp==3.5.0

View File

@@ -1,15 +1,21 @@
TEST
# Performance Regression Testing
This directory includes dbt project setups to test on and a test runner written in Rust which runs specific dbt commands on each of the projects. Orchestration is done via the GitHub Action workflow in `/.github/workflows/performance.yml`. The workflow is scheduled to run every night, but it can also be triggered manually.
The github workflow hardcodes our baseline branch for performance metrics as `0.20.latest`. As future versions become faster, this branch will be updated to hold us to those new standards.
## Adding a new dbt project
Just make a new directory under `performance/projects/`. It will automatically be picked up by the tests.
## Adding a new dbt command
In `runner/src/measure.rs::measure` add a metric to the `metrics` Vec. The Github Action will handle recompilation if you don't have the rust toolchain installed.
## Future work
- add more projects to test different configurations that have been known bottlenecks
- add more dbt commands to measure
- possibly using the uploaded json artifacts to store these results so they can be graphed over time

View File

@@ -1 +1 @@
version = '0.21.0b2'
version = '0.21.0rc1'

View File

@@ -4,7 +4,6 @@ from dbt.dataclass_schema import dbtClassMixin, ValidationError
import dbt.deprecations
import dbt.exceptions
import dbt.flags as flags
import dbt.clients.gcloud
import dbt.clients.agate_helper
@@ -15,7 +14,6 @@ from dbt.adapters.base import (
from dbt.adapters.bigquery.relation import BigQueryRelation
from dbt.adapters.bigquery import BigQueryColumn
from dbt.adapters.bigquery import BigQueryConnectionManager
from dbt.contracts.connection import Connection
from dbt.contracts.graph.manifest import Manifest
from dbt.logger import GLOBAL_LOGGER as logger, print_timestamped_line
from dbt.utils import filter_null_values
@@ -515,19 +513,6 @@ class BigQueryAdapter(BaseAdapter):
if sql_override is None:
sql_override = model.get('compiled_sql')
if flags.STRICT_MODE:
connection = self.connections.get_thread_connection()
if not isinstance(connection, Connection):
dbt.exceptions.raise_compiler_error(
f'Got {connection} - not a Connection!'
)
model_uid = model.get('unique_id')
if connection.name != model_uid:
raise dbt.exceptions.InternalException(
f'Connection had name "{connection.name}", expected model '
f'unique id of "{model_uid}"'
)
if materialization == 'view':
res = self._materialize_as_view(model)
elif materialization == 'table':

View File

@@ -20,7 +20,7 @@ except ImportError:
package_name = "dbt-bigquery"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """The bigquery adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))

View File

@@ -1 +1 @@
version = '0.21.0b2'
version = '0.21.0rc1'

View File

@@ -41,7 +41,7 @@ def _dbt_psycopg2_name():
package_name = "dbt-postgres"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """The postgres adpter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))

View File

@@ -1 +1 @@
version = '0.21.0b2'
version = '0.21.0rc1'

View File

@@ -20,7 +20,7 @@ except ImportError:
package_name = "dbt-redshift"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """The redshift adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))

View File

@@ -1 +1 @@
version = '0.21.0b2'
version = '0.21.0rc1'

View File

@@ -1,4 +1,5 @@
{% macro snowflake__load_csv_rows(model, agate_table) %}
{% set batch_size = get_batch_size() %}
{% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}
{% set bindings = [] %}

View File

@@ -20,7 +20,7 @@ except ImportError:
package_name = "dbt-snowflake"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """The snowflake adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))

View File

@@ -24,7 +24,7 @@ with open(os.path.join(this_directory, 'README.md')) as f:
package_name = "dbt"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """With dbt, data analysts and engineers can build analytics \
the way engineers build applications."""

View File

@@ -0,0 +1 @@
*.csv

View File

@@ -1,5 +1,5 @@
import os
import csv
from test.integration.base import DBTIntegrationTest, use_profile
@@ -311,4 +311,43 @@ class TestSimpleSeedWithDots(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_simple_seed(self):
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.assertEqual(len(results), 1)
class TestSimpleBigSeedBatched(DBTIntegrationTest):
@property
def schema(self):
return "simple_seed_005"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
"data-paths": ['data-big'],
'seeds': {
'quote_columns': False,
}
}
def test_big_batched_seed(self):
with open('data-big/my_seed.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['id'])
for i in range(0, 20000):
writer.writerow([i])
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
@use_profile('postgres')
def test_postgres_big_batched_seed(self):
self.test_big_batched_seed()
@use_profile('snowflake')
def test_snowflake_big_batched_seed(self):
self.test_big_batched_seed()

View File

@@ -8,6 +8,7 @@ from unittest import mock
import dbt.semver
import dbt.config
import dbt.exceptions
import dbt.flags
class BaseDependencyTest(DBTIntegrationTest):
@@ -45,8 +46,6 @@ class BaseDependencyTest(DBTIntegrationTest):
}
def run_dbt(self, *args, **kwargs):
strict = kwargs.pop('strict', False)
kwargs['strict'] = strict
return super().run_dbt(*args, **kwargs)
@@ -115,12 +114,9 @@ class TestMissingDependency(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_missing_dependency(self):
# dbt should raise a dbt exception, not raise a parse-time TypeError.
with self.assertRaises(dbt.exceptions.Exception) as exc:
self.run_dbt(['compile'], strict=False)
message = str(exc.exception)
self.assertIn('no_such_dependency', message)
self.assertIn('is undefined', message)
# dbt should raise a runtime exception
with self.assertRaises(dbt.exceptions.RuntimeException) as exc:
self.run_dbt(['compile'])
class TestSimpleDependencyWithSchema(TestSimpleDependency):
@@ -175,6 +171,54 @@ class TestSimpleDependencyWithSchema(TestSimpleDependency):
self.assertEqual(len(results), 5)
class TestSimpleDependencyNoVersionCheckConfig(TestSimpleDependency):
def run_dbt(self, cmd, *args, **kwargs):
# we can't add this to the config because Sources don't respect dbt_project.yml
vars_arg = yaml.safe_dump({
'schema_override': self.base_schema(),
})
cmd.extend(['--vars', vars_arg])
return super().run_dbt(cmd, *args, **kwargs)
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': ['schema_override_macros'],
'models': {
'schema': 'dbt_test',
},
'seeds': {
'schema': 'dbt_test',
}
}
@property
def profile_config(self):
return {
'config': {
'send_anonymous_usage_stats': False,
'version_check': False,
}
}
def base_schema(self):
return 'dbt_test_{}_macro'.format(self.unique_schema())
def configured_schema(self):
return 'configured_{}_macro'.format(self.unique_schema())
@use_profile('postgres')
@mock.patch('dbt.config.project.get_installed_version')
def test_postgres_local_dependency_out_of_date_no_check(self, mock_get):
mock_get.return_value = dbt.semver.VersionSpecifier.from_version_string('0.0.1')
self.run_dbt(['deps'])
self.assertFalse(dbt.flags.VERSION_CHECK)
self.run_dbt(['seed'])
results = self.run_dbt(['run'])
self.assertEqual(len(results), 5)
class TestSimpleDependencyHooks(DBTIntegrationTest):
@property
def schema(self):
@@ -245,11 +289,6 @@ class TestSimpleDependencyDuplicateName(DBTIntegrationTest):
]
}
def run_dbt(self, *args, **kwargs):
strict = kwargs.pop('strict', False)
kwargs['strict'] = strict
return super().run_dbt(*args, **kwargs)
@use_profile('postgres')
def test_postgres_local_dependency_same_name(self):
with self.assertRaises(dbt.exceptions.DependencyException):

View File

@@ -1,7 +1,7 @@
import os
import tempfile
from test.integration.base import DBTIntegrationTest, use_profile
from dbt.exceptions import CompilationException
from dbt.exceptions import CompilationException, DependencyException
from dbt import deprecations
@@ -110,10 +110,7 @@ class TestSimpleDependencyUnpinned(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_simple_dependency(self):
with self.assertRaises(CompilationException) as exc:
self.run_dbt(["deps"])
assert 'is not pinned' in str(exc.exception)
self.run_dbt(['deps'], strict=False)
self.run_dbt(["deps"])
class TestSimpleDependencyWithDuplicates(DBTIntegrationTest):

View File

@@ -117,12 +117,9 @@ class TestMalformedSchemaTests(DBTIntegrationTest):
return test_task.run()
@use_profile('postgres')
def test_postgres_malformed_schema_strict_will_break_run(self):
def test_postgres_malformed_schema_will_break_run(self):
with self.assertRaises(CompilationException):
self.run_dbt(strict=True)
# even if strict = False!
with self.assertRaises(CompilationException):
self.run_dbt(strict=False)
self.run_dbt()
class TestCustomConfigSchemaTests(DBTIntegrationTest):
@@ -149,16 +146,11 @@ class TestCustomConfigSchemaTests(DBTIntegrationTest):
def test_postgres_config(self):
""" Test that tests use configs properly. All tests for
this project will fail, configs are set to make test pass. """
results = self.run_dbt()
results = self.run_dbt(['test'], strict=False)
results = self.run_dbt(['test'], expect_pass=False)
self.assertEqual(len(results), 7)
for result in results:
self.assertFalse(result.skipped)
self.assertEqual(
result.failures, 0,
'test {} failed'.format(result.node.name)
)
class TestHooksInTests(DBTIntegrationTest):
@@ -393,16 +385,16 @@ class TestSchemaCaseInsensitive(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_schema_lowercase_sql(self):
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 2)
results = self.run_dbt(['test', '-m', 'lowercase'], strict=False)
results = self.run_dbt(['test', '-m', 'lowercase'])
self.assertEqual(len(results), 1)
@use_profile('postgres')
def test_postgres_schema_uppercase_sql(self):
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 2)
results = self.run_dbt(['test', '-m', 'uppercase'], strict=False)
results = self.run_dbt(['test', '-m', 'uppercase'])
self.assertEqual(len(results), 1)
@@ -440,7 +432,7 @@ class TestSchemaTestContext(DBTIntegrationTest):
# This test tests the the TestContext and TestMacroNamespace
# are working correctly
self.run_dbt(['deps'])
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 3)
run_result = self.run_dbt(['test'], expect_pass=False)
@@ -457,7 +449,7 @@ class TestSchemaTestContext(DBTIntegrationTest):
self.assertEqual(results[3].status, TestStatus.Fail)
self.assertRegex(results[3].node.compiled_sql, r'union all')
# type_two_model_a_
self.assertEqual(results[4].status, TestStatus.Fail)
self.assertEqual(results[4].status, TestStatus.Warn)
self.assertEqual(results[4].node.config.severity, 'WARN')
class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest):
@@ -500,7 +492,7 @@ class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest):
# This test tests the the TestContext and TestMacroNamespace
# are working correctly
self.run_dbt(['deps'])
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 3)
run_result = self.run_dbt(['test'], expect_pass=False)
@@ -515,7 +507,7 @@ class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest):
self.assertEqual(results[2].status, TestStatus.Fail)
self.assertRegex(results[2].node.compiled_sql, r'union all')
# type_two_model_a_
self.assertEqual(results[3].status, TestStatus.Fail)
self.assertEqual(results[3].status, TestStatus.Warn)
self.assertEqual(results[3].node.config.severity, 'WARN')
class TestSchemaTestNameCollision(DBTIntegrationTest):

View File

@@ -25,41 +25,16 @@ class TestDeprecations(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
self.run_dbt(strict=True, expect_pass=False)
self.run_dbt(['--warn-error', 'run'], expect_pass=False)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter:already_exists'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestMaterializationReturnDeprecation(BaseTestDeprecations):
@property
def models(self):
return self.dir('custom-models')
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': [self.dir('custom-materialization-macros')],
}
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
# this should fail at runtime
self.run_dbt(strict=True, expect_pass=False)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'materialization-return'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestAdapterMacroDeprecation(BaseTestDeprecations):
@property
def models(self):
@@ -75,7 +50,7 @@ class TestAdapterMacroDeprecation(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -83,7 +58,7 @@ class TestAdapterMacroDeprecation(BaseTestDeprecations):
def test_postgres_adapter_macro_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
self.run_dbt(['--warn-error', 'run'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'The "adapter_macro" macro has been deprecated' in exc_str
@@ -91,7 +66,7 @@ class TestAdapterMacroDeprecation(BaseTestDeprecations):
def test_redshift_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
# pick up the postgres macro
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -100,7 +75,7 @@ class TestAdapterMacroDeprecation(BaseTestDeprecations):
self.assertEqual(deprecations.active_deprecations, set())
# picked up the default -> error
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=False, expect_pass=False)
self.run_dbt(expect_pass=False)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'not allowed' in exc_str # we saw the default macro
@@ -120,7 +95,7 @@ class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_adapter_macro_pkg(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -128,7 +103,7 @@ class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
def test_postgres_adapter_macro_pkg_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
self.run_dbt(['--warn-error', 'run'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'The "adapter_macro" macro has been deprecated' in exc_str
@@ -137,7 +112,7 @@ class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
self.assertEqual(deprecations.active_deprecations, set())
# pick up the postgres macro
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -146,7 +121,7 @@ class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
self.assertEqual(deprecations.active_deprecations, set())
# picked up the default -> error
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=False, expect_pass=False)
self.run_dbt(expect_pass=False)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'not allowed' in exc_str # we saw the default macro
@@ -176,7 +151,7 @@ class TestDispatchPackagesDeprecation(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'dispatch-packages'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -184,7 +159,7 @@ class TestDispatchPackagesDeprecation(BaseTestDeprecations):
def test_postgres_adapter_macro_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
self.run_dbt(['--warn-error', 'run'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'Raised during dispatch for: string_literal' in exc_str
@@ -208,7 +183,7 @@ class TestPackageRedirectDeprecation(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_package_redirect(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(['deps'], strict=False)
self.run_dbt(['deps'])
expected = {'package-redirect'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -216,7 +191,7 @@ class TestPackageRedirectDeprecation(BaseTestDeprecations):
def test_postgres_package_redirect_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(['deps'], strict=True)
self.run_dbt(['--warn-error', 'deps'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
expected = "The `fishtown-analytics/dbt_utils` package is deprecated in favor of `dbt-labs/dbt_utils`"
assert expected in exc_str

View File

@@ -143,7 +143,8 @@ class TestContextVars(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_env_vars_secrets(self):
_, log_output = self.run_dbt_and_capture(['--debug', 'run', '--target', 'prod'])
os.environ['DBT_DEBUG'] = 'True'
_, log_output = self.run_dbt_and_capture(['run', '--target', 'prod'])
self.assertFalse("secret_variable" in log_output)
self.assertTrue("regular_variable" in log_output)
@@ -159,9 +160,7 @@ class TestEmitWarning(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_warn(self):
with pytest.raises(dbt.exceptions.CompilationException):
self.run_dbt(['run'], strict=True)
self.run_dbt(['run'], strict=False, expect_pass=True)
self.run_dbt(['run'], expect_pass=True)
class TestVarDependencyInheritance(DBTIntegrationTest):
@@ -199,9 +198,9 @@ class TestVarDependencyInheritance(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_var_mutual_overrides_v1_conversion(self):
self.run_dbt(['deps'], strict=False)
assert len(self.run_dbt(['seed'], strict=False)) == 2
assert len(self.run_dbt(['run'], strict=False)) == 2
self.run_dbt(['deps'])
assert len(self.run_dbt(['seed'])) == 2
assert len(self.run_dbt(['run'])) == 2
self.assertTablesEqual('root_model_expected', 'model')
self.assertTablesEqual('first_dep_expected', 'first_dep_model')

View File

@@ -2964,7 +2964,6 @@ class TestDocsGenerate(DBTIntegrationTest):
'project_id'] == '098f6bcd4621d373cade4e832627b4f6'
assert 'send_anonymous_usage_stats' in metadata and metadata[
'send_anonymous_usage_stats'] is False
assert 'user_id' in metadata and metadata['user_id'] is None
assert 'adapter_type' in metadata and metadata['adapter_type'] == self.adapter_type
else:
self.assertIn(key, expected_manifest) # sanity check
@@ -3292,7 +3291,7 @@ class TestDocsGenerateOverride(DBTIntegrationTest):
self.assertEqual(len(self.run_dbt(['run'])), 1)
# this should pick up our failure macro and raise a compilation exception
with self.assertRaises(CompilationException) as exc:
self.run_dbt(['docs', 'generate'])
self.run_dbt(['--warn-error', 'docs', 'generate'])
self.assertIn('rejected: no catalogs for you', str(exc.exception))

View File

@@ -180,11 +180,11 @@ class TestDisabledConfigs(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_conditional_model(self):
# no seeds/models - enabled should eval to False because of the target
results = self.run_dbt(['seed', '--target', 'disabled'], strict=False)
results = self.run_dbt(['seed', '--target', 'disabled'])
self.assertEqual(len(results), 0)
results = self.run_dbt(['run', '--target', 'disabled'], strict=False)
results = self.run_dbt(['run', '--target', 'disabled'])
self.assertEqual(len(results), 0)
results = self.run_dbt(['test', '--target', 'disabled'], strict=False)
results = self.run_dbt(['test', '--target', 'disabled'])
self.assertEqual(len(results), 0)
# has seeds/models - enabled should eval to True because of the target
@@ -192,7 +192,7 @@ class TestDisabledConfigs(DBTIntegrationTest):
self.assertEqual(len(results), 1)
results = self.run_dbt(['run'])
self.assertEqual(len(results), 2)
results = self.run_dbt(['test'], strict=False)
results = self.run_dbt(['test'])
self.assertEqual(len(results), 5)
@@ -234,14 +234,14 @@ class TestUnusedModelConfigs(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_warn_unused_configuration_paths(self):
with self.assertRaises(CompilationException) as exc:
self.run_dbt(['seed'])
self.run_dbt(['--warn-error', 'seed'])
self.assertIn('Configuration paths exist', str(exc.exception))
self.assertIn('- sources.test', str(exc.exception))
self.assertIn('- models.test', str(exc.exception))
self.assertIn('- models.test', str(exc.exception))
self.run_dbt(['seed'], strict=False)
self.run_dbt(['seed'])
class TestConfigIndivTests(DBTIntegrationTest):
@property
@@ -280,19 +280,19 @@ class TestConfigIndivTests(DBTIntegrationTest):
self.assertEqual(len(self.run_dbt(['run'])), 2)
# all tests on (minus sleeper_agent) + WARN
self.assertEqual(len(self.run_dbt(['test'], strict=False)), 5)
self.assertEqual(len(self.run_dbt(['test'])), 5)
# turn off two of them directly
self.assertEqual(len(self.run_dbt(['test', '--vars', '{"enabled_direct": False}'], strict=False)), 3)
self.assertEqual(len(self.run_dbt(['test', '--vars', '{"enabled_direct": False}'])), 3)
# turn on sleeper_agent data test directly
self.assertEqual(len(self.run_dbt(['test', '--models', 'sleeper_agent',
'--vars', '{"enabled_direct": True}'], strict=False)), 1)
'--vars', '{"enabled_direct": True}'])), 1)
# set three to ERROR directly
results = self.run_dbt(['test', '--models', 'config.severity:error',
'--vars', '{"enabled_direct": True, "severity_direct": "ERROR"}'
], strict=False, expect_pass = False)
], expect_pass = False)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'fail')
self.assertEqual(results[1].status, 'fail')

View File

@@ -68,9 +68,6 @@ class BaseOverrideDatabase(DBTIntegrationTest):
}
}
def run_dbt_notstrict(self, args):
return self.run_dbt(args, strict=False)
class TestModelOverride(BaseOverrideDatabase):
def run_database_override(self):
@@ -79,9 +76,9 @@ class TestModelOverride(BaseOverrideDatabase):
else:
func = lambda x: x
self.run_dbt_notstrict(['seed'])
self.run_dbt(['seed'])
self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4)
self.assertEqual(len(self.run_dbt(['run'])), 4)
self.assertManyRelationsEqual([
(func('seed'), self.unique_schema(), self.default_database),
(func('view_2'), self.unique_schema(), self.alternative_database),
@@ -115,8 +112,8 @@ class BaseTestProjectModelOverride(BaseOverrideDatabase):
assert False, 'No profile database found!'
def run_database_override(self):
self.run_dbt_notstrict(['seed'])
self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4)
self.run_dbt(['seed'])
self.assertEqual(len(self.run_dbt(['run'])), 4)
self.assertExpectedRelations()
def assertExpectedRelations(self):
@@ -217,9 +214,9 @@ class TestProjectSeedOverride(BaseOverrideDatabase):
'database': self.alternative_database
},
})
self.run_dbt_notstrict(['seed'])
self.run_dbt(['seed'])
self.assertEqual(len(self.run_dbt_notstrict(['run'])), 4)
self.assertEqual(len(self.run_dbt(['run'])), 4)
self.assertManyRelationsEqual([
(func('seed'), self.unique_schema(), self.alternative_database),
(func('view_2'), self.unique_schema(), self.alternative_database),

View File

@@ -51,7 +51,7 @@ class BaseSourcesTest(DBTIntegrationTest):
class SuccessfulSourcesTest(BaseSourcesTest):
def setUp(self):
super().setUp()
self.run_dbt_with_vars(['seed'], strict=False)
self.run_dbt_with_vars(['seed'])
self.maxDiff = None
self._id = 101
# this is the db initial value
@@ -459,14 +459,9 @@ class TestMalformedSources(BaseSourcesTest):
return "malformed_models"
@use_profile('postgres')
def test_postgres_malformed_schema_nonstrict_will_break_run(self):
def test_postgres_malformed_schema_will_break_run(self):
with self.assertRaises(CompilationException):
self.run_dbt_with_vars(['seed'], strict=False)
@use_profile('postgres')
def test_postgres_malformed_schema_strict_will_break_run(self):
with self.assertRaises(CompilationException):
self.run_dbt_with_vars(['seed'], strict=True)
self.run_dbt_with_vars(['seed'])
class TestRenderingInSourceTests(BaseSourcesTest):

View File

@@ -28,10 +28,10 @@ class TestSeverity(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_severity_warnings(self):
self.run_dbt_with_vars(['seed'], 'false', strict=False)
self.run_dbt_with_vars(['run'], 'false', strict=False)
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--schema'], 'false', strict=False)
['test', '--schema'], 'false')
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'warn')
self.assertEqual(results[0].failures, 2)
@@ -40,10 +40,10 @@ class TestSeverity(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_severity_rendered_errors(self):
self.run_dbt_with_vars(['seed'], 'false', strict=False)
self.run_dbt_with_vars(['run'], 'false', strict=False)
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--schema'], 'true', strict=False, expect_pass=False)
['test', '--schema'], 'true', expect_pass=False)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'fail')
self.assertEqual(results[0].failures, 2)
@@ -52,42 +52,42 @@ class TestSeverity(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_severity_warnings_strict(self):
self.run_dbt_with_vars(['seed'], 'false', strict=False)
self.run_dbt_with_vars(['run'], 'false', strict=False)
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--schema'], 'false', expect_pass=False)
['test', '--schema'], 'false', expect_pass=True)
self.assertEqual(len(results), 2)
self.assertEqual(results[0].status, 'fail')
self.assertEqual(results[0].status, 'warn')
self.assertEqual(results[0].failures, 2)
self.assertEqual(results[1].status, 'fail')
self.assertEqual(results[1].status, 'warn')
self.assertEqual(results[1].failures, 2)
@use_profile('postgres')
def test_postgres_data_severity_warnings(self):
self.run_dbt_with_vars(['seed'], 'false', strict=False)
self.run_dbt_with_vars(['run'], 'false', strict=False)
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--data'], 'false', strict=False)
['test', '--data'], 'false')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].status, 'warn')
self.assertEqual(results[0].failures, 2)
@use_profile('postgres')
def test_postgres_data_severity_rendered_errors(self):
self.run_dbt_with_vars(['seed'], 'false', strict=False)
self.run_dbt_with_vars(['run'], 'false', strict=False)
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--data'], 'true', strict=False, expect_pass=False)
['test', '--data'], 'true', expect_pass=False)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].status, 'fail')
self.assertEqual(results[0].failures, 2)
@use_profile('postgres')
def test_postgres_data_severity_warnings_strict(self):
self.run_dbt_with_vars(['seed'], 'false', strict=False)
self.run_dbt_with_vars(['run'], 'false', strict=False)
self.run_dbt_with_vars(['seed'], 'false')
self.run_dbt_with_vars(['run'], 'false')
results = self.run_dbt_with_vars(
['test', '--data'], 'false', expect_pass=False)
['test', '--data'], 'false', expect_pass=True)
self.assertEqual(len(results), 1)
self.assertTrue(results[0].status, 'fail')
self.assertEqual(results[0].failures, 2)

View File

@@ -44,8 +44,7 @@ class TestStrictUndefined(DBTIntegrationTest):
if args is not None:
full_args = full_args + args
result = self.run_dbt(args=full_args, expect_pass=expect_pass,
strict=False, parser=False)
result = self.run_dbt(args=full_args, expect_pass=expect_pass)
log_manager.stdout_console()
return result

View File

@@ -33,7 +33,7 @@ class TestColumnQuotingDefault(BaseColumnQuotingTest):
return self.dir('models-unquoted')
def run_dbt(self, *args, **kwargs):
return super().run_dbt(*args, strict=False, **kwargs)
return super().run_dbt(*args, **kwargs)
@use_profile('postgres')
def test_postgres_column_quotes(self):

View File

@@ -0,0 +1,4 @@
name: view_adapter_override
version: 2
macro-paths: ['macros']
config-version: 2

View File

@@ -0,0 +1,66 @@
{# copy+pasting the default view impl #}
{% materialization view, default %}
{%- set identifier = model['alias'] -%}
{%- set tmp_identifier = model['name'] + '__dbt_tmp' -%}
{%- set backup_identifier = model['name'] + '__dbt_backup' -%}
{%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}
{%- set target_relation = api.Relation.create(identifier=identifier, schema=schema, database=database,
type='view') -%}
{%- set intermediate_relation = api.Relation.create(identifier=tmp_identifier,
schema=schema, database=database, type='view') -%}
/*
This relation (probably) doesn't exist yet. If it does exist, it's a leftover from
a previous run, and we're going to try to drop it immediately. At the end of this
materialization, we're going to rename the "old_relation" to this identifier,
and then we're going to drop it. In order to make sure we run the correct one of:
- drop view ...
- drop table ...
We need to set the type of this relation to be the type of the old_relation, if it exists,
or else "view" as a sane default if it does not. Note that if the old_relation does not
exist, then there is nothing to move out of the way and subsequentally drop. In that case,
this relation will be effectively unused.
*/
{%- set backup_relation_type = 'view' if old_relation is none else old_relation.type -%}
{%- set backup_relation = api.Relation.create(identifier=backup_identifier,
schema=schema, database=database,
type=backup_relation_type) -%}
{%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}
{{ run_hooks(pre_hooks, inside_transaction=False) }}
-- drop the temp relations if they exists for some reason
{{ adapter.drop_relation(intermediate_relation) }}
{{ adapter.drop_relation(backup_relation) }}
-- `BEGIN` happens here:
{{ run_hooks(pre_hooks, inside_transaction=True) }}
-- build model
{% call statement('main') -%}
{{ create_view_as(intermediate_relation, sql) }}
{%- endcall %}
-- cleanup
-- move the existing view out of the way
{% if old_relation is not none %}
{{ adapter.rename_relation(target_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(intermediate_relation, target_relation) }}
{{ run_hooks(post_hooks, inside_transaction=True) }}
{{ adapter.commit() }}
{{ drop_relation_if_exists(backup_relation) }}
{{ run_hooks(post_hooks, inside_transaction=False) }}
{# do not return anything! #}
{# {{ return({'relations': [target_relation]}) }} #}
{%- endmaterialization -%}

View File

@@ -98,3 +98,18 @@ class TestOverrideAdapterLocal(BaseTestCustomMaterialization):
self.run_dbt(['deps'])
# this should error because the override is buggy
self.run_dbt(['run'], expect_pass=False)
class TestOverrideDefaultReturn(BaseTestCustomMaterialization):
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': ['override-view-return-no-relation']
}
@use_profile('postgres')
def test_postgres_default_dependency(self):
self.run_dbt(['deps'])
results = self.run_dbt(['run'], expect_pass=False)
assert 'did not explicitly return a list of relations' in results[0].message

View File

@@ -43,8 +43,27 @@ class TestFastFailingDuringRun(DBTIntegrationTest):
vals = self.run_sql(query, fetch='all')
self.assertFalse(len(vals) == count, 'Execution was not stopped before run end')
@use_profile('postgres')
def test_postgres_fail_fast_run(self):
with self.assertRaises(FailFastException):
self.run_dbt(['run', '--threads', '1', '--fail-fast'])
self.check_audit_table()
class FailFastFromConfig(TestFastFailingDuringRun):
@property
def profile_config(self):
return {
'config': {
'send_anonymous_usage_stats': False,
'fail_fast': True,
}
}
@use_profile('postgres')
def test_postgres_fail_fast_run_user_config(self):
with self.assertRaises(FailFastException):
self.run_dbt(['run', '--threads', '1'])
self.check_audit_table()

View File

@@ -54,7 +54,7 @@ class TestModifiedState(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_changed_seed_contents_state(self):
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True)
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
assert len(results) == 0
with open('data/seed.csv') as fp:
fp.readline()
@@ -91,12 +91,12 @@ class TestModifiedState(DBTIntegrationTest):
fp.write(f'{idx},{value}{newline}')
# now if we run again, we should get a warning
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False)
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
assert len(results) == 1
assert results[0] == 'test.seed'
with pytest.raises(CompilationException) as exc:
self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=True)
self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
assert '>1MB' in str(exc.value)
shutil.rmtree('./state')
@@ -106,12 +106,12 @@ class TestModifiedState(DBTIntegrationTest):
with open('data/seed.csv', 'a') as fp:
fp.write(f'{random},test{newline}')
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True)
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
assert len(results) == 0
@use_profile('postgres')
def test_postgres_changed_seed_config(self):
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True)
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
assert len(results) == 0
self.use_default_project({'seeds': {'test': {'quote_columns': False}}})
@@ -123,7 +123,7 @@ class TestModifiedState(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_unrendered_config_same(self):
results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], strict=False, expect_pass=True)
results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], expect_pass=True)
assert len(results) == 0
# although this is the default value, dbt will recognize it as a change
@@ -135,7 +135,7 @@ class TestModifiedState(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_changed_model_contents(self):
results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'], strict=False)
results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'])
assert len(results) == 0
with open('models/table_model.sql') as fp:
@@ -164,7 +164,7 @@ class TestModifiedState(DBTIntegrationTest):
with open('macros/second_macro.sql', 'w') as fp:
fp.write(new_macro)
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'], strict=False)
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
assert len(results) == 0
os.remove('macros/second_macro.sql')
@@ -172,7 +172,7 @@ class TestModifiedState(DBTIntegrationTest):
with open('macros/macros.sql', 'a') as fp:
fp.write(new_macro)
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'], strict=False)
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
assert len(results) == 0
@use_profile('postgres')
@@ -191,7 +191,7 @@ class TestModifiedState(DBTIntegrationTest):
fp.write(newline)
# table_model calls this macro
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'], strict=False)
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
assert len(results) == 1
@use_profile('postgres')

View File

@@ -113,7 +113,7 @@ class TestModels(DBTIntegrationTest):
shutil.copyfile('extra-files/models-schema2.yml', 'models-a/schema.yml')
os.remove(normalize('models-a/model_three.sql'))
with self.assertRaises(CompilationException):
results = self.run_dbt(["--partial-parse", "run"])
results = self.run_dbt(["--partial-parse", "--warn-error", "run"])
# Put model back again
shutil.copyfile('extra-files/model_three.sql', 'models-a/model_three.sql')
@@ -161,7 +161,7 @@ class TestModels(DBTIntegrationTest):
# Remove the macro
os.remove(normalize('macros/my_macro.sql'))
with self.assertRaises(CompilationException):
results = self.run_dbt(["--partial-parse", "run"])
results = self.run_dbt(["--partial-parse", "--warn-error", "run"])
# put back macro file, got back to schema file with no macro
# add separate macro patch schema file
@@ -317,7 +317,7 @@ class TestSources(DBTIntegrationTest):
# Change seed name to wrong name
shutil.copyfile('extra-files/schema-sources5.yml', 'models-b/sources.yml')
with self.assertRaises(CompilationException):
results = self.run_dbt(["--partial-parse", "run"])
results = self.run_dbt(["--partial-parse", "--warn-error", "run"])
# Put back seed name to right name
shutil.copyfile('extra-files/schema-sources4.yml', 'models-b/sources.yml')
@@ -441,7 +441,7 @@ class TestMacros(DBTIntegrationTest):
def test_postgres_nested_macros(self):
shutil.copyfile('extra-files/custom_schema_tests1.sql', 'macros-macros/custom_schema_tests.sql')
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 2)
manifest = get_manifest()
macro_child_map = manifest.build_macro_child_map()
@@ -454,7 +454,7 @@ class TestMacros(DBTIntegrationTest):
self.assertEqual(results[0].status, TestStatus.Fail)
self.assertRegex(results[0].node.compiled_sql, r'union all')
# type_two_model_a_
self.assertEqual(results[1].status, TestStatus.Fail)
self.assertEqual(results[1].status, TestStatus.Warn)
self.assertEqual(results[1].node.config.severity, 'WARN')
shutil.copyfile('extra-files/custom_schema_tests2.sql', 'macros-macros/custom_schema_tests.sql')

View File

@@ -34,6 +34,18 @@ class TestBasicExperimentalParser(DBTIntegrationTest):
self.assertEqual(node.config._extra, {'x': True})
self.assertEqual(node.config.tags, ['hello', 'world'])
@use_profile('postgres')
def test_postgres_env_experimental_parser(self):
os.environ['DBT_USE_EXPERIMENTAL_PARSER'] = 'true'
results = self.run_dbt(['parse'])
manifest = get_manifest()
node = manifest.nodes['model.test.model_a']
self.assertEqual(node.refs, [['model_a']])
self.assertEqual(node.sources, [['my_src', 'my_tbl']])
self.assertEqual(node.config._extra, {'x': True})
self.assertEqual(node.config.tags, ['hello', 'world'])
class TestRefOverrideExperimentalParser(DBTIntegrationTest):
@property
def schema(self):

View File

@@ -112,7 +112,7 @@ class HasRPCServer(DBTIntegrationTest):
super().setUp()
os.environ['DBT_TEST_SCHEMA_NAME_VARIABLE'] = 'test_run_schema'
if self.should_seed:
self.run_dbt_with_vars(['seed'], strict=False)
self.run_dbt_with_vars(['seed'])
port = random.randint(49152, 61000)
self._server = self.ServerProcess(
cli_vars='{{test_run_schema: {}}}'.format(self.unique_schema()),

View File

@@ -399,7 +399,6 @@ class DBTIntegrationTest(unittest.TestCase):
self._created_schemas = set()
reset_deprecations()
flags.reset()
template_cache.clear()
self.use_profile(self._pick_profile())
@@ -579,8 +578,8 @@ class DBTIntegrationTest(unittest.TestCase):
def profile_config(self):
return {}
def run_dbt(self, args=None, expect_pass=True, strict=True, parser=True, profiles_dir=True):
res, success = self.run_dbt_and_check(args=args, strict=strict, parser=parser, profiles_dir=profiles_dir)
def run_dbt(self, args=None, expect_pass=True, profiles_dir=True):
res, success = self.run_dbt_and_check(args=args, profiles_dir=profiles_dir)
self.assertEqual(
success, expect_pass,
"dbt exit state did not match expected")
@@ -603,17 +602,13 @@ class DBTIntegrationTest(unittest.TestCase):
return res, stdout
def run_dbt_and_check(self, args=None, strict=True, parser=False, profiles_dir=True):
def run_dbt_and_check(self, args=None, profiles_dir=True):
log_manager.reset_handlers()
if args is None:
args = ["run"]
final_args = []
if strict:
final_args.append('--strict')
if parser:
final_args.append('--test-new-parser')
if os.getenv('DBT_TEST_SINGLE_THREADED') in ('y', 'Y', '1'):
final_args.append('--single-threaded')

View File

@@ -40,8 +40,6 @@ def _bq_conn():
class BaseTestBigQueryAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = True
self.raw_profile = {
'outputs': {
'oauth': {
@@ -375,7 +373,7 @@ class TestConnectionNamePassthrough(BaseTestBigQueryAdapter):
class TestBigQueryRelation(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = True
pass
def test_view_temp_relation(self):
kwargs = {
@@ -455,7 +453,7 @@ class TestBigQueryRelation(unittest.TestCase):
class TestBigQueryInformationSchema(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = True
pass
def test_replace(self):

View File

@@ -22,8 +22,6 @@ class CompilerTest(unittest.TestCase):
"".join(b.split()))
def setUp(self):
dbt.flags.STRICT_MODE = True
self.maxDiff = None
self.model_config = NodeConfig.from_dict({

View File

@@ -252,8 +252,8 @@ class TestProfile(BaseConfigTest):
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'postgres')
self.assertEqual(profile.threads, 7)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertTrue(isinstance(profile.credentials, PostgresCredentials))
self.assertEqual(profile.credentials.type, 'postgres')
self.assertEqual(profile.credentials.host, 'postgres-db-hostname')
@@ -271,8 +271,8 @@ class TestProfile(BaseConfigTest):
profile = self.from_raw_profiles()
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'postgres')
self.assertFalse(profile.config.send_anonymous_usage_stats)
self.assertFalse(profile.config.use_colors)
self.assertFalse(profile.user_config.send_anonymous_usage_stats)
self.assertFalse(profile.user_config.use_colors)
def test_partial_config_override(self):
self.default_profile_data['config'] = {
@@ -282,9 +282,9 @@ class TestProfile(BaseConfigTest):
profile = self.from_raw_profiles()
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'postgres')
self.assertFalse(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertEqual(profile.config.printer_width, 60)
self.assertFalse(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertEqual(profile.user_config.printer_width, 60)
def test_missing_type(self):
del self.default_profile_data['default']['outputs']['postgres']['type']
@@ -415,8 +415,8 @@ class TestProfileFile(BaseFileTest):
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'postgres')
self.assertEqual(profile.threads, 7)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertTrue(isinstance(profile.credentials, PostgresCredentials))
self.assertEqual(profile.credentials.type, 'postgres')
self.assertEqual(profile.credentials.host, 'postgres-db-hostname')
@@ -440,8 +440,8 @@ class TestProfileFile(BaseFileTest):
self.assertEqual(profile.profile_name, 'other')
self.assertEqual(profile.target_name, 'other-postgres')
self.assertEqual(profile.threads, 3)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertTrue(isinstance(profile.credentials, PostgresCredentials))
self.assertEqual(profile.credentials.type, 'postgres')
self.assertEqual(profile.credentials.host, 'other-postgres-db-hostname')
@@ -462,8 +462,8 @@ class TestProfileFile(BaseFileTest):
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'redshift')
self.assertEqual(profile.threads, 1)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertTrue(isinstance(profile.credentials, RedshiftCredentials))
self.assertEqual(profile.credentials.type, 'redshift')
self.assertEqual(profile.credentials.host, 'redshift-db-hostname')
@@ -485,8 +485,8 @@ class TestProfileFile(BaseFileTest):
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'with-vars')
self.assertEqual(profile.threads, 1)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertEqual(profile.credentials.type, 'postgres')
self.assertEqual(profile.credentials.host, 'env-postgres-host')
self.assertEqual(profile.credentials.port, 6543)
@@ -507,8 +507,8 @@ class TestProfileFile(BaseFileTest):
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'with-vars')
self.assertEqual(profile.threads, 1)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertEqual(profile.credentials.type, 'postgres')
self.assertEqual(profile.credentials.host, 'env-postgres-host')
self.assertEqual(profile.credentials.port, 6543)
@@ -539,8 +539,8 @@ class TestProfileFile(BaseFileTest):
self.assertEqual(profile.profile_name, 'default')
self.assertEqual(profile.target_name, 'cli-and-env-vars')
self.assertEqual(profile.threads, 1)
self.assertTrue(profile.config.send_anonymous_usage_stats)
self.assertIsNone(profile.config.use_colors)
self.assertTrue(profile.user_config.send_anonymous_usage_stats)
self.assertIsNone(profile.user_config.use_colors)
self.assertEqual(profile.credentials.type, 'postgres')
self.assertEqual(profile.credentials.host, 'cli-postgres-host')
self.assertEqual(profile.credentials.port, 6543)
@@ -1034,7 +1034,7 @@ class TestRuntimeConfig(BaseConfigTest):
project = self.get_project()
profile = self.get_profile()
# invalid - must be boolean
profile.config.use_colors = 100
profile.user_config.use_colors = 100
with self.assertRaises(dbt.exceptions.DbtProjectError):
dbt.config.RuntimeConfig.from_parts(project, profile, {})

View File

@@ -45,13 +45,6 @@ from dbt.dataclass_schema import ValidationError
from .utils import ContractTestCase, assert_symmetric, assert_from_dict, compare_dicts, assert_fails_validation, dict_replace, replace_config
@pytest.fixture(autouse=True)
def strict_mode():
flags.STRICT_MODE = True
yield
flags.STRICT_MODE = False
@pytest.fixture
def populated_node_config_object():
result = NodeConfig(
@@ -728,13 +721,6 @@ def test_patch_parsed_model(basic_parsed_model_object, basic_parsed_model_patch_
assert patched_model_object == pre_patch
def test_patch_parsed_model_invalid(basic_parsed_model_object, basic_parsed_model_patch_object):
pre_patch = basic_parsed_model_object # ParsedModelNode
patch = basic_parsed_model_patch_object.replace(description=None)
with pytest.raises(ValidationError):
pre_patch.patch(patch)
@pytest.fixture
def minimal_parsed_hook_dict():
return {

View File

@@ -9,7 +9,6 @@ from dbt.task import generate
class GenerateTest(unittest.TestCase):
def setUp(self):
dbt.flags.STRICT_MODE = True
self.maxDiff = None
self.manifest = mock.MagicMock()
self.patcher = mock.patch('dbt.task.generate.get_unique_id_mapping')

181
test/unit/test_flags.py Normal file
View File

@@ -0,0 +1,181 @@
import os
from unittest import mock, TestCase
from argparse import Namespace
from .utils import normalize
from dbt import flags
from dbt.contracts.project import UserConfig
from dbt.config.profile import DEFAULT_PROFILES_DIR
class TestFlags(TestCase):
def setUp(self):
self.args = Namespace()
self.user_config = UserConfig()
def test__flags(self):
# use_experimental_parser
self.user_config.use_experimental_parser = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, True)
os.environ['DBT_USE_EXPERIMENTAL_PARSER'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, False)
setattr(self.args, 'use_experimental_parser', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.USE_EXPERIMENTAL_PARSER, True)
# cleanup
os.environ.pop('DBT_USE_EXPERIMENTAL_PARSER')
delattr(self.args, 'use_experimental_parser')
flags.USE_EXPERIMENTAL_PARSER = False
self.user_config.use_experimental_parser = None
# warn_error
self.user_config.warn_error = False
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.WARN_ERROR, False)
os.environ['DBT_WARN_ERROR'] = 'true'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.WARN_ERROR, True)
setattr(self.args, 'warn_error', False)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.WARN_ERROR, False)
# cleanup
os.environ.pop('DBT_WARN_ERROR')
delattr(self.args, 'warn_error')
flags.WARN_ERROR = False
self.user_config.warn_error = None
# write_json
self.user_config.write_json = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.WRITE_JSON, True)
os.environ['DBT_WRITE_JSON'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.WRITE_JSON, False)
setattr(self.args, 'write_json', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.WRITE_JSON, True)
# cleanup
os.environ.pop('DBT_WRITE_JSON')
delattr(self.args, 'write_json')
# partial_parse
self.user_config.partial_parse = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.PARTIAL_PARSE, True)
os.environ['DBT_PARTIAL_PARSE'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.PARTIAL_PARSE, False)
setattr(self.args, 'partial_parse', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.PARTIAL_PARSE, True)
# cleanup
os.environ.pop('DBT_PARTIAL_PARSE')
delattr(self.args, 'partial_parse')
self.user_config.partial_parse = False
# use_colors
self.user_config.use_colors = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.USE_COLORS, True)
os.environ['DBT_USE_COLORS'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.USE_COLORS, False)
setattr(self.args, 'use_colors', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.USE_COLORS, True)
# cleanup
os.environ.pop('DBT_USE_COLORS')
delattr(self.args, 'use_colors')
# debug
self.user_config.debug = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.DEBUG, True)
os.environ['DBT_DEBUG'] = 'True'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.DEBUG, True)
os.environ['DBT_DEUBG'] = 'False'
setattr(self.args, 'debug', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.DEBUG, True)
# cleanup
os.environ.pop('DBT_DEBUG')
delattr(self.args, 'debug')
self.user_config.debug = None
# log_format -- text, json, default
self.user_config.log_format = 'text'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.LOG_FORMAT, 'text')
os.environ['DBT_LOG_FORMAT'] = 'json'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.LOG_FORMAT, 'json')
setattr(self.args, 'log_format', 'text')
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.LOG_FORMAT, 'text')
# cleanup
os.environ.pop('DBT_LOG_FORMAT')
delattr(self.args, 'log_format')
self.user_config.log_format = None
# version_check
self.user_config.version_check = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.VERSION_CHECK, True)
os.environ['DBT_VERSION_CHECK'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.VERSION_CHECK, False)
setattr(self.args, 'version_check', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.VERSION_CHECK, True)
# cleanup
os.environ.pop('DBT_VERSION_CHECK')
delattr(self.args, 'version_check')
# fail_fast
self.user_config.fail_fast = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.FAIL_FAST, True)
os.environ['DBT_FAIL_FAST'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.FAIL_FAST, False)
setattr(self.args, 'fail_fast', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.FAIL_FAST, True)
# cleanup
os.environ.pop('DBT_FAIL_FAST')
delattr(self.args, 'fail_fast')
self.user_config.fail_fast = False
# send_anonymous_usage_stats
self.user_config.send_anonymous_usage_stats = True
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, True)
os.environ['DBT_SEND_ANONYMOUS_USAGE_STATS'] = 'false'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, False)
setattr(self.args, 'send_anonymous_usage_stats', True)
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.SEND_ANONYMOUS_USAGE_STATS, True)
# cleanup
os.environ.pop('DBT_SEND_ANONYMOUS_USAGE_STATS')
delattr(self.args, 'send_anonymous_usage_stats')
# printer_width
self.user_config.printer_width = 100
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.PRINTER_WIDTH, 100)
os.environ['DBT_PRINTER_WIDTH'] = '80'
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.PRINTER_WIDTH, 80)
setattr(self.args, 'printer_width', '120')
flags.set_from_args(self.args, self.user_config)
self.assertEqual(flags.PRINTER_WIDTH, 120)
# cleanup
os.environ.pop('DBT_PRINTER_WIDTH')
delattr(self.args, 'printer_width')
self.user_config.printer_width = None

View File

@@ -41,7 +41,6 @@ class GraphTest(unittest.TestCase):
def setUp(self):
# create various attributes
dbt.flags.STRICT_MODE = True
self.graph_result = None
tracking.do_not_track()
self.profile = {

View File

@@ -1,106 +0,0 @@
import os
import shutil
import tempfile
import unittest
from unittest import mock
import yaml
from dbt import main
class FakeArgs:
def __init__(self, profiles_dir):
self.profiles_dir = profiles_dir
self.profile = 'test'
@mock.patch('dbt.ui.use_colors')
@mock.patch('dbt.tracking.do_not_track')
@mock.patch('dbt.tracking.initialize_tracking')
class TestInitializeConfig(unittest.TestCase):
def setUp(self):
self.base_dir = tempfile.mkdtemp()
self.profiles_path = os.path.join(self.base_dir, 'profiles.yml')
self.args = FakeArgs(self.base_dir)
def _base_config(self):
return {
'test': {
'outputs': {
'default': {
'type': 'postgres',
'host': 'test',
'port': 5555,
'user': 'db_user',
'pass': 'db_pass',
'dbname': 'dbname',
'schema': 'schema',
},
},
'target': 'default',
}
}
def set_up_empty_config(self):
with open(self.profiles_path, 'w') as f:
f.write(yaml.dump(self._base_config()))
def set_up_config_options(self, **kwargs):
config = self._base_config()
config.update(config=kwargs)
with open(self.profiles_path, 'w') as f:
f.write(yaml.dump(config))
def tearDown(self):
try:
shutil.rmtree(self.base_dir)
except:
pass
def test__implicit_missing(self, initialize_tracking, do_not_track, use_colors):
main.initialize_config_values(self.args)
initialize_tracking.assert_called_once_with(self.base_dir)
do_not_track.assert_not_called()
use_colors.assert_not_called()
def test__implicit_opt_in_colors(self, initialize_tracking, do_not_track, use_colors):
self.set_up_empty_config()
main.initialize_config_values(self.args)
initialize_tracking.assert_called_once_with(self.base_dir)
do_not_track.assert_not_called()
use_colors.assert_not_called()
def test__explicit_opt_out(self, initialize_tracking, do_not_track, use_colors):
self.set_up_config_options(send_anonymous_usage_stats=False)
main.initialize_config_values(self.args)
initialize_tracking.assert_not_called()
do_not_track.assert_called_once_with()
use_colors.assert_not_called()
def test__explicit_opt_in(self, initialize_tracking, do_not_track, use_colors):
self.set_up_config_options(send_anonymous_usage_stats=True)
main.initialize_config_values(self.args)
initialize_tracking.assert_called_once_with(self.base_dir)
do_not_track.assert_not_called()
use_colors.assert_not_called()
def test__explicit_no_colors(self, initialize_tracking, do_not_track, use_colors):
self.set_up_config_options(use_colors=False)
main.initialize_config_values(self.args)
initialize_tracking.assert_called_once_with(self.base_dir)
do_not_track.assert_not_called()
use_colors.assert_called_once_with(False)
def test__explicit_yes_colors(self, initialize_tracking, do_not_track, use_colors):
self.set_up_config_options(use_colors=True)
main.initialize_config_values(self.args)
initialize_tracking.assert_called_once_with(self.base_dir)
do_not_track.assert_not_called()
use_colors.assert_called_once_with(True)

View File

@@ -55,7 +55,6 @@ ENV_KEY_NAME = 'KEY' if os.name == 'nt' else 'key'
class ManifestTest(unittest.TestCase):
def setUp(self):
dbt.flags.STRICT_MODE = True
# TODO: why is this needed for tests in this module to pass?
tracking.active_user = None
@@ -373,7 +372,7 @@ class ManifestTest(unittest.TestCase):
def test_metadata(self, mock_user):
mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf'
mock_user.invocation_id = '01234567-0123-0123-0123-0123456789ab'
mock_user.do_not_track = True
dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False
now = datetime.utcnow()
self.assertEqual(
ManifestMetadata(
@@ -396,7 +395,7 @@ class ManifestTest(unittest.TestCase):
def test_no_nodes_with_metadata(self, mock_user):
mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf'
mock_user.invocation_id = '01234567-0123-0123-0123-0123456789ab'
mock_user.do_not_track = True
dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False
metadata = ManifestMetadata(
project_id='098f6bcd4621d373cade4e832627b4f6',
adapter_type='postgres',
@@ -486,8 +485,6 @@ class ManifestTest(unittest.TestCase):
class MixedManifestTest(unittest.TestCase):
def setUp(self):
dbt.flags.STRICT_MODE = True
self.maxDiff = None
self.model_config = NodeConfig.from_dict({

View File

@@ -68,7 +68,6 @@ class BaseParserTest(unittest.TestCase):
yield pm
def setUp(self):
dbt.flags.STRICT_MODE = True
dbt.flags.WARN_ERROR = True
# HACK: this is needed since tracking events can
# be sent when using the model parser

View File

@@ -23,7 +23,6 @@ from .utils import config_from_parts_or_dicts, inject_adapter, mock_connection,
class TestPostgresAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = True
project_cfg = {
'name': 'X',
'version': '0.1',
@@ -331,8 +330,6 @@ class TestPostgresAdapter(unittest.TestCase):
class TestConnectingPostgresAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = False
self.target_dict = {
'type': 'postgres',
'dbname': 'postgres',

View File

@@ -30,8 +30,6 @@ def fetch_cluster_credentials(*args, **kwargs):
class TestRedshiftAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = True
profile_cfg = {
'outputs': {
'test': {

View File

@@ -21,8 +21,6 @@ from .utils import config_from_parts_or_dicts, inject_adapter, mock_connection,
class TestSnowflakeAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = False
profile_cfg = {
'outputs': {
'test': {