Compare commits

...

36 Commits

Author SHA1 Message Date
Jeremy Cohen
1bed1e9fbc Rm plugins/redshift, plugins/snowflake 2021-09-29 00:22:50 +02:00
Kyle Wigley
581d567440 Consolidate dbt-snowflake
Continue pulling apart Snowflake tests

Fixups

Rip out even more
2021-09-29 00:22:47 +02:00
Kyle Wigley
db6402d77d Consolidate dbt-redshift
Start moving unit tests

fix typo; rm breakpoints

rm references to redshift

rm all references to redshift except `integration/100_rpc_test` 🙁

cleanup CI

fix tests!

fix docs test bc of schema version change
2021-09-29 00:22:32 +02:00
Jeremy Cohen
571beb13d9 Avoid infinite recursion in macro state check (#3957)
* Avoid infinite recursion in macro state check

* Add test case for infinite macro recursion
2021-09-27 08:43:29 -07:00
Emily Rockman
2fc8e5e0b6 Merge pull request #3907 from dbt-labs/3900-execute-macro-release
removed the release parameter and associated deprecation
2021-09-27 07:16:37 -05:00
Emily Rockman
5ab07273ba fixed flake error 2021-09-27 06:47:48 -05:00
Emily Rockman
19c9e5bfdf Merge branch 'develop' into 3900-execute-macro-release 2021-09-27 06:42:11 -05:00
Emily Rockman
60794367a5 Merge pull request #3918 from dbt-labs/er/3898-column-quoting-unset
changes default quote_columns to True and removed associated deprecation
2021-09-27 06:39:26 -05:00
leahwicz
ea07729bbf Bumping artifact versions for 0.21 (#3945)
* Bumping artifact versions for 0.21

* Adding sources bump

* Updating some tests

* Updated sources test

* Adding changelog update
2021-09-23 22:58:40 -04:00
Gerda Shank
c4370773f6 Merge pull request #3943 from dbt-labs/fix_flags_profiles_dir
[#3940] Use flags.PROFILES_DIR in a few more places
2021-09-23 13:51:10 -04:00
Gerda Shank
fda17b456e [#3940] Use flags.PROFILES_DIR in a few more places 2021-09-23 13:27:46 -04:00
Emily Rockman
bc3e1a0a71 fixed test and updated changelog 2021-09-23 12:01:53 -05:00
Emily Rockman
a06988706c cleaned up snowflake test 2021-09-23 11:48:06 -05:00
Emily Rockman
ce73124bbf fixed snowflake test 2021-09-23 11:48:06 -05:00
Emily Rockman
352c62f3c3 fixed missing import 2021-09-23 11:48:06 -05:00
Emily Rockman
81a51d3942 override for quote_seed_column methos in snowflake adapter 2021-09-23 11:48:06 -05:00
Emily Rockman
64fc3a39a7 changes default quote_columns to True and removed depracation 2021-09-23 11:48:05 -05:00
Emily Rockman
e5b6f4f293 updated changelog 2021-09-23 11:38:24 -05:00
Emily Rockman
d26e63ed9a removed teh release parameter and associated deprications 2021-09-23 11:37:50 -05:00
dave-connors-3
f4f5d31959 Feature/catalog relational objects (#3922)
* filter to relational nodes

* cleanup

* flake formatting

* changelog
2021-09-23 08:54:05 -07:00
Jeremy Cohen
e7e12075b9 Fix batching for Snowflake seeds >10k rows (#3942)
* Call get_batch_size in snowflake__load_csv_rows

* Git ignore big csv. Update changelog
2021-09-23 08:49:52 -07:00
Emily Rockman
74dda5aa19 Merge pull request #3893 from dbt-labs/2798_enact_deprecations
removed deprecation for materialization-return and replaced with exception
2021-09-22 14:35:05 -05:00
Emily Rockman
092e96ce70 Merge branch 'develop' into 2798_enact_deprecations 2021-09-22 14:09:35 -05:00
Kyle Wigley
18102027ba Pull in changes for the 0.21.0rc1 release (#3935)
Co-authored-by: Github Build Bot <buildbot@fishtownanalytics.com>
2021-09-22 13:53:43 -05:00
Emily Rockman
f80825d63e updated changelog 2021-09-22 12:55:49 -05:00
Kyle Wigley
9316e47b77 Pull in changes for the 0.21.0rc1 release (#3935)
Co-authored-by: Github Build Bot <buildbot@fishtownanalytics.com>
2021-09-22 13:25:46 -04:00
Emily Rockman
f99cf1218a fixed conflict 2021-09-22 11:36:22 -05:00
Emily Rockman
5871915ce9 Merge branch '2798_enact_deprecations' of https://github.com/dbt-labs/dbt into 2798_enact_deprecations
# Conflicts:
#	test/integration/012_deprecation_tests/test_deprecations.py
2021-09-22 11:34:51 -05:00
Emily Rockman
5ce290043f more explicit error check 2021-09-22 11:16:59 -05:00
Emily Rockman
080d27321b removed deprication for materialization-return and replaced it with an exception 2021-09-22 11:16:59 -05:00
Gerda Shank
1d0936bd14 Merge pull request #3889 from dbt-labs/3886_pp_log_levels
[#3886] Tweak partial parsing log messages
2021-09-22 10:48:21 -04:00
Gerda Shank
706b8ca9df Merge pull request #3839 from dbt-labs/2990_global_cli_flags
[#2990] Normalize global CLI args/flags
2021-09-22 10:47:54 -04:00
Gerda Shank
779c789a64 [#2990] Normalize global CLI args/flags 2021-09-22 09:58:07 -04:00
Gerda Shank
409b4ba109 [#3886] Tweak partial parsing log messages 2021-09-22 09:20:24 -04:00
Emily Rockman
237048c7ac more explicit error check 2021-09-17 10:51:53 -05:00
Emily Rockman
30ff395b7b removed deprication for materialization-return and replaced it with an exception 2021-09-17 10:51:53 -05:00
188 changed files with 1077 additions and 6498 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.21.0b2
current_version = 0.21.0rc1
parse = (?P<major>\d+)
\.(?P<minor>\d+)
\.(?P<patch>\d+)
@@ -34,17 +34,9 @@ first_value = 1
[bumpversion:file:plugins/postgres/setup.py]
[bumpversion:file:plugins/redshift/setup.py]
[bumpversion:file:plugins/snowflake/setup.py]
[bumpversion:file:plugins/bigquery/setup.py]
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
[bumpversion:file:plugins/redshift/dbt/adapters/redshift/__version__.py]
[bumpversion:file:plugins/snowflake/dbt/adapters/snowflake/__version__.py]
[bumpversion:file:plugins/bigquery/dbt/adapters/bigquery/__version__.py]

View File

@@ -21,16 +21,6 @@ updates:
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/redshift"
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/snowflake"
schedule:
interval: "daily"
rebase-strategy: "disabled"
# docker dependencies
- package-ecosystem: "docker"

View File

@@ -1,7 +1,7 @@
module.exports = ({ context }) => {
const defaultPythonVersion = "3.8";
const supportedPythonVersions = ["3.6", "3.7", "3.8", "3.9"];
const supportedAdapters = ["snowflake", "postgres", "bigquery", "redshift"];
const supportedAdapters = ["postgres", "bigquery"];
// if PR, generate matrix based on files changed and PR labels
if (context.eventName.includes("pull_request")) {

View File

@@ -91,16 +91,9 @@ jobs:
- 'core/**'
- 'plugins/postgres/**'
- 'dev-requirements.txt'
snowflake:
- 'core/**'
- 'plugins/snowflake/**'
bigquery:
- 'core/**'
- 'plugins/bigquery/**'
redshift:
- 'core/**'
- 'plugins/redshift/**'
- 'plugins/postgres/**'
- name: Generate integration test matrix
id: generate-matrix
@@ -191,33 +184,6 @@ jobs:
if: matrix.adapter == 'postgres'
run: tox
- name: Run tox (redshift)
if: matrix.adapter == 'redshift'
env:
REDSHIFT_TEST_DBNAME: ${{ secrets.REDSHIFT_TEST_DBNAME }}
REDSHIFT_TEST_PASS: ${{ secrets.REDSHIFT_TEST_PASS }}
REDSHIFT_TEST_USER: ${{ secrets.REDSHIFT_TEST_USER }}
REDSHIFT_TEST_PORT: ${{ secrets.REDSHIFT_TEST_PORT }}
REDSHIFT_TEST_HOST: ${{ secrets.REDSHIFT_TEST_HOST }}
run: tox
- name: Run tox (snowflake)
if: matrix.adapter == 'snowflake'
env:
SNOWFLAKE_TEST_ACCOUNT: ${{ secrets.SNOWFLAKE_TEST_ACCOUNT }}
SNOWFLAKE_TEST_PASSWORD: ${{ secrets.SNOWFLAKE_TEST_PASSWORD }}
SNOWFLAKE_TEST_USER: ${{ secrets.SNOWFLAKE_TEST_USER }}
SNOWFLAKE_TEST_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_WAREHOUSE }}
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: ${{ secrets.SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN }}
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_ID }}
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET }}
SNOWFLAKE_TEST_ALT_DATABASE: ${{ secrets.SNOWFLAKE_TEST_ALT_DATABASE }}
SNOWFLAKE_TEST_ALT_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_ALT_WAREHOUSE }}
SNOWFLAKE_TEST_DATABASE: ${{ secrets.SNOWFLAKE_TEST_DATABASE }}
SNOWFLAKE_TEST_QUOTED_DATABASE: ${{ secrets.SNOWFLAKE_TEST_QUOTED_DATABASE }}
SNOWFLAKE_TEST_ROLE: ${{ secrets.SNOWFLAKE_TEST_ROLE }}
run: tox
- name: Run tox (bigquery)
if: matrix.adapter == 'bigquery'
env:

View File

@@ -26,7 +26,7 @@ This is the docs website code. It comes from the dbt-docs repository, and is gen
## Adapters
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. The four core adapters that are in the main repository, contained within the [`plugins`](plugins) subdirectory, are: Postgres Redshift, Snowflake and BigQuery. Other warehouses use adapter plugins defined in separate repositories (e.g. [dbt-spark](https://github.com/dbt-labs/dbt-spark), [dbt-presto](https://github.com/dbt-labs/dbt-presto)).
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. For testing and development purposes, the dbt-postgres plugin lives alongside the dbt-core codebase, in the [`plugins`](plugins) subdirectory. Like other adapter plugins, it is a self-contained codebase and package that builds on top of dbt-core.
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.

View File

@@ -1,7 +1,32 @@
## dbt 1.0.0 (Release TBD)
### Features
- Normalize global CLI arguments/flags ([#2990](https://github.com/dbt-labs/dbt/issues/2990), [#3839](https://github.com/dbt-labs/dbt/pull/3839))
### Fixes
### Under the hood
- Enact deprecation for `materialization-return` and replace deprecation warning with an exception. ([#3896](https://github.com/dbt-labs/dbt/issues/3896))
- Build catalog for only relational, non-ephemeral nodes in the graph ([#3920](https://github.com/dbt-labs/dbt/issues/3920))
- Enact deprecation to remove the `release` arg from the `execute_macro` method. ([#3900](https://github.com/dbt-labs/dbt/issues/3900))
- Enact deprecation for default quoting to be True. Override for the `dbt-snowflake` adapter so it stays `False`. ([#3898](https://github.com/dbt-labs/dbt/issues/3898))
Contributors:
- [@dave-connors-3](https://github.com/dave-connors-3) ([#3920](https://github.com/dbt-labs/dbt/issues/3920))
## dbt 0.21.0 (Release TBD)
## dbt 0.21.0b2 (August 19, 2021)
### Fixes
- Fix batching for large seeds on Snowflake ([#3941](https://github.com/dbt-labs/dbt/issues/3941), [#3942](https://github.com/dbt-labs/dbt/pull/3942))
- Avoid infinite recursion in `state:modified.macros` check ([#3904](https://github.com/dbt-labs/dbt/issues/3904), [#3957](https://github.com/dbt-labs/dbt/pull/3957))
### Under the hood
- Bump artifact schema versions for 0.21.0 ([#3945](https://github.com/dbt-labs/dbt/pull/3945))
## dbt 0.21.0rc1 (September 20, 2021)
### Features

View File

@@ -44,22 +44,6 @@ integration-postgres: .env ## Runs postgres integration tests with py38.
integration-postgres-fail-fast: .env ## Runs postgres integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-postgres -- -x -nauto
.PHONY: integration-redshift
integration-redshift: .env ## Runs redshift integration tests with py38.
$(DOCKER_CMD) tox -e py38-redshift -- -nauto
.PHONY: integration-redshift-fail-fast
integration-redshift-fail-fast: .env ## Runs redshift integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-redshift -- -x -nauto
.PHONY: integration-snowflake
integration-snowflake: .env ## Runs snowflake integration tests with py38.
$(DOCKER_CMD) tox -e py38-snowflake -- -nauto
.PHONY: integration-snowflake-fail-fast
integration-snowflake-fail-fast: .env ## Runs snowflake integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-snowflake -- -x -nauto
.PHONY: integration-bigquery
integration-bigquery: .env ## Runs bigquery integration tests with py38.
$(DOCKER_CMD) tox -e py38-bigquery -- -nauto

View File

@@ -238,12 +238,6 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
@classmethod
def _rollback(cls, connection: Connection) -> None:
"""Roll back the given connection."""
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In _rollback, got {connection} - not a Connection!'
)
if connection.transaction_open is False:
raise dbt.exceptions.InternalException(
f'Tried to rollback transaction on connection '
@@ -257,12 +251,6 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
@classmethod
def close(cls, connection: Connection) -> Connection:
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In close, got {connection} - not a Connection!'
)
# if the connection is in closed or init, there's nothing to do
if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}:
return connection

View File

@@ -16,9 +16,7 @@ from dbt.exceptions import (
get_relation_returned_multiple_results,
InternalException, NotImplementedException, RuntimeException,
)
from dbt import flags
from dbt import deprecations
from dbt.adapters.protocol import (
AdapterConfig,
ConnectionManagerProtocol,
@@ -289,9 +287,7 @@ class BaseAdapter(metaclass=AdapterMeta):
def _schema_is_cached(self, database: Optional[str], schema: str) -> bool:
"""Check if the schema is cached, and by default logs if it is not."""
if flags.USE_CACHE is False:
return False
elif (database, schema) not in self.cache:
if (database, schema) not in self.cache:
logger.debug(
'On "{}": cache miss for schema "{}.{}", this is inefficient'
.format(self.nice_connection_name(), database, schema)
@@ -324,7 +320,9 @@ class BaseAdapter(metaclass=AdapterMeta):
"""
info_schema_name_map = SchemaSearchMap()
nodes: Iterator[CompileResultNode] = chain(
manifest.nodes.values(),
[node for node in manifest.nodes.values() if (
node.is_relational and not node.is_ephemeral_model
)],
manifest.sources.values(),
)
for node in nodes:
@@ -340,9 +338,6 @@ class BaseAdapter(metaclass=AdapterMeta):
"""Populate the relations cache for the given schemas. Returns an
iterable of the schemas populated, as strings.
"""
if not flags.USE_CACHE:
return
cache_schemas = self._get_cache_schemas(manifest)
with executor(self.config) as tpe:
futures: List[Future[List[BaseRelation]]] = []
@@ -375,9 +370,6 @@ class BaseAdapter(metaclass=AdapterMeta):
"""Run a query that gets a populated cache of the relations in the
database and set the cache on this adapter.
"""
if not flags.USE_CACHE:
return
with self.cache.lock:
if clear:
self.cache.clear()
@@ -391,8 +383,7 @@ class BaseAdapter(metaclass=AdapterMeta):
raise_compiler_error(
'Attempted to cache a null relation for {}'.format(name)
)
if flags.USE_CACHE:
self.cache.add(relation)
self.cache.add(relation)
# so jinja doesn't render things
return ''
@@ -406,8 +397,7 @@ class BaseAdapter(metaclass=AdapterMeta):
raise_compiler_error(
'Attempted to drop a null relation for {}'.format(name)
)
if flags.USE_CACHE:
self.cache.drop(relation)
self.cache.drop(relation)
return ''
@available
@@ -428,8 +418,7 @@ class BaseAdapter(metaclass=AdapterMeta):
.format(src_name, dst_name, name)
)
if flags.USE_CACHE:
self.cache.rename(from_relation, to_relation)
self.cache.rename(from_relation, to_relation)
return ''
###
@@ -807,12 +796,11 @@ class BaseAdapter(metaclass=AdapterMeta):
def quote_seed_column(
self, column: str, quote_config: Optional[bool]
) -> str:
# this is the default for now
quote_columns: bool = False
quote_columns: bool = True
if isinstance(quote_config, bool):
quote_columns = quote_config
elif quote_config is None:
deprecations.warn('column-quoting-unset')
pass
else:
raise_compiler_error(
f'The seed configuration value of "quote_columns" has an '
@@ -944,7 +932,6 @@ class BaseAdapter(metaclass=AdapterMeta):
project: Optional[str] = None,
context_override: Optional[Dict[str, Any]] = None,
kwargs: Dict[str, Any] = None,
release: bool = False,
text_only_columns: Optional[Iterable[str]] = None,
) -> agate.Table:
"""Look macro_name up in the manifest and execute its results.
@@ -958,10 +945,8 @@ class BaseAdapter(metaclass=AdapterMeta):
execution context.
:param kwargs: An optional dict of keyword args used to pass to the
macro.
:param release: Ignored.
"""
if release is not False:
deprecations.warn('execute-macro-release')
if kwargs is None:
kwargs = {}
if context_override is None:

View File

@@ -11,7 +11,6 @@ from dbt.contracts.connection import (
Connection, ConnectionState, AdapterResponse
)
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import flags
class SQLConnectionManager(BaseConnectionManager):
@@ -144,13 +143,6 @@ class SQLConnectionManager(BaseConnectionManager):
def begin(self):
connection = self.get_thread_connection()
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In begin, got {connection} - not a Connection!'
)
if connection.transaction_open is True:
raise dbt.exceptions.InternalException(
'Tried to begin a new transaction on connection "{}", but '
@@ -163,12 +155,6 @@ class SQLConnectionManager(BaseConnectionManager):
def commit(self):
connection = self.get_thread_connection()
if flags.STRICT_MODE:
if not isinstance(connection, Connection):
raise dbt.exceptions.CompilerException(
f'In commit, got {connection} - not a Connection!'
)
if connection.transaction_open is False:
raise dbt.exceptions.InternalException(
'Tried to commit transaction on connection "{}", but '

View File

@@ -1,4 +1,4 @@
# all these are just exports, they need "noqa" so flake8 will not complain.
from .profile import Profile, PROFILES_DIR, read_user_config # noqa
from .profile import Profile, read_user_config # noqa
from .project import Project, IsFQNResource # noqa
from .runtime import RuntimeConfig, UnsetProfileConfig # noqa

View File

@@ -4,6 +4,7 @@ import os
from dbt.dataclass_schema import ValidationError
from dbt import flags
from dbt.clients.system import load_file_contents
from dbt.clients.yaml_helper import load_yaml_text
from dbt.contracts.connection import Credentials, HasCredentials
@@ -20,10 +21,8 @@ from dbt.utils import coerce_dict_str
from .renderer import ProfileRenderer
DEFAULT_THREADS = 1
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser('~'), '.dbt')
PROFILES_DIR = os.path.expanduser(
os.getenv('DBT_PROFILES_DIR', DEFAULT_PROFILES_DIR)
)
INVALID_PROFILE_MESSAGE = """
dbt encountered an error while trying to read your profiles.yml file.
@@ -43,7 +42,7 @@ Here, [profile name] should be replaced with a profile name
defined in your profiles.yml file. You can find profiles.yml here:
{profiles_file}/profiles.yml
""".format(profiles_file=PROFILES_DIR)
""".format(profiles_file=DEFAULT_PROFILES_DIR)
def read_profile(profiles_dir: str) -> Dict[str, Any]:
@@ -73,10 +72,10 @@ def read_user_config(directory: str) -> UserConfig:
try:
profile = read_profile(directory)
if profile:
user_cfg = coerce_dict_str(profile.get('config', {}))
if user_cfg is not None:
UserConfig.validate(user_cfg)
return UserConfig.from_dict(user_cfg)
user_config = coerce_dict_str(profile.get('config', {}))
if user_config is not None:
UserConfig.validate(user_config)
return UserConfig.from_dict(user_config)
except (RuntimeException, ValidationError):
pass
return UserConfig()
@@ -89,7 +88,7 @@ def read_user_config(directory: str) -> UserConfig:
class Profile(HasCredentials):
profile_name: str
target_name: str
config: UserConfig
user_config: UserConfig
threads: int
credentials: Credentials
@@ -97,7 +96,7 @@ class Profile(HasCredentials):
self,
profile_name: str,
target_name: str,
config: UserConfig,
user_config: UserConfig,
threads: int,
credentials: Credentials
):
@@ -106,7 +105,7 @@ class Profile(HasCredentials):
"""
self.profile_name = profile_name
self.target_name = target_name
self.config = config
self.user_config = user_config
self.threads = threads
self.credentials = credentials
@@ -124,12 +123,12 @@ class Profile(HasCredentials):
result = {
'profile_name': self.profile_name,
'target_name': self.target_name,
'config': self.config,
'user_config': self.user_config,
'threads': self.threads,
'credentials': self.credentials,
}
if serialize_credentials:
result['config'] = self.config.to_dict(omit_none=True)
result['user_config'] = self.user_config.to_dict(omit_none=True)
result['credentials'] = self.credentials.to_dict(omit_none=True)
return result
@@ -143,7 +142,7 @@ class Profile(HasCredentials):
'name': self.target_name,
'target_name': self.target_name,
'profile_name': self.profile_name,
'config': self.config.to_dict(omit_none=True),
'config': self.user_config.to_dict(omit_none=True),
})
return target
@@ -238,7 +237,7 @@ class Profile(HasCredentials):
threads: int,
profile_name: str,
target_name: str,
user_cfg: Optional[Dict[str, Any]] = None
user_config: Optional[Dict[str, Any]] = None
) -> 'Profile':
"""Create a profile from an existing set of Credentials and the
remaining information.
@@ -247,20 +246,20 @@ class Profile(HasCredentials):
:param threads: The number of threads to use for connections.
:param profile_name: The profile name used for this profile.
:param target_name: The target name used for this profile.
:param user_cfg: The user-level config block from the
:param user_config: The user-level config block from the
raw profiles, if specified.
:raises DbtProfileError: If the profile is invalid.
:returns: The new Profile object.
"""
if user_cfg is None:
user_cfg = {}
UserConfig.validate(user_cfg)
config = UserConfig.from_dict(user_cfg)
if user_config is None:
user_config = {}
UserConfig.validate(user_config)
user_config_obj: UserConfig = UserConfig.from_dict(user_config)
profile = cls(
profile_name=profile_name,
target_name=target_name,
config=config,
user_config=user_config_obj,
threads=threads,
credentials=credentials
)
@@ -313,7 +312,7 @@ class Profile(HasCredentials):
raw_profile: Dict[str, Any],
profile_name: str,
renderer: ProfileRenderer,
user_cfg: Optional[Dict[str, Any]] = None,
user_config: Optional[Dict[str, Any]] = None,
target_override: Optional[str] = None,
threads_override: Optional[int] = None,
) -> 'Profile':
@@ -325,7 +324,7 @@ class Profile(HasCredentials):
disk as yaml and its values rendered with jinja.
:param profile_name: The profile name used.
:param renderer: The config renderer.
:param user_cfg: The global config for the user, if it
:param user_config: The global config for the user, if it
was present.
:param target_override: The target to use, if provided on
the command line.
@@ -335,9 +334,9 @@ class Profile(HasCredentials):
target could not be found
:returns: The new Profile object.
"""
# user_cfg is not rendered.
if user_cfg is None:
user_cfg = raw_profile.get('config')
# user_config is not rendered.
if user_config is None:
user_config = raw_profile.get('config')
# TODO: should it be, and the values coerced to bool?
target_name, profile_data = cls.render_profile(
raw_profile, profile_name, target_override, renderer
@@ -358,7 +357,7 @@ class Profile(HasCredentials):
profile_name=profile_name,
target_name=target_name,
threads=threads,
user_cfg=user_cfg
user_config=user_config
)
@classmethod
@@ -401,13 +400,13 @@ class Profile(HasCredentials):
error_string=msg
)
)
user_cfg = raw_profiles.get('config')
user_config = raw_profiles.get('config')
return cls.from_raw_profile_info(
raw_profile=raw_profile,
profile_name=profile_name,
renderer=renderer,
user_cfg=user_cfg,
user_config=user_config,
target_override=target_override,
threads_override=threads_override,
)
@@ -435,7 +434,7 @@ class Profile(HasCredentials):
"""
threads_override = getattr(args, 'threads', None)
target_override = getattr(args, 'target', None)
raw_profiles = read_profile(args.profiles_dir)
raw_profiles = read_profile(flags.PROFILES_DIR)
profile_name = cls.pick_profile_name(getattr(args, 'profile', None),
project_profile_name)
return cls.from_raw_profiles(

View File

@@ -12,6 +12,7 @@ from .profile import Profile
from .project import Project
from .renderer import DbtProjectYamlRenderer, ProfileRenderer
from .utils import parse_cli_vars
from dbt import flags
from dbt import tracking
from dbt.adapters.factory import get_relation_class_by_name, get_include_paths
from dbt.helper_types import FQNPath, PathSet
@@ -117,7 +118,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
unrendered=project.unrendered,
profile_name=profile.profile_name,
target_name=profile.target_name,
config=profile.config,
user_config=profile.user_config,
threads=profile.threads,
credentials=profile.credentials,
args=args,
@@ -144,7 +145,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
project = Project.from_project_root(
project_root,
renderer,
verify_version=getattr(self.args, 'version_check', False),
verify_version=bool(flags.VERSION_CHECK),
)
cfg = self.from_parts(
@@ -197,7 +198,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
) -> Tuple[Project, Profile]:
# profile_name from the project
project_root = args.project_dir if args.project_dir else os.getcwd()
version_check = getattr(args, 'version_check', False)
version_check = bool(flags.VERSION_CHECK)
partial = Project.partial_load(
project_root,
verify_version=version_check
@@ -416,7 +417,7 @@ class UnsetConfig(UserConfig):
class UnsetProfile(Profile):
def __init__(self):
self.credentials = UnsetCredentials()
self.config = UnsetConfig()
self.user_config = UnsetConfig()
self.profile_name = ''
self.target_name = ''
self.threads = -1
@@ -513,7 +514,7 @@ class UnsetProfileConfig(RuntimeConfig):
unrendered=project.unrendered,
profile_name='',
target_name='',
config=UnsetConfig(),
user_config=UnsetConfig(),
threads=getattr(args, 'threads', 1),
credentials=UnsetCredentials(),
args=args,

View File

@@ -526,8 +526,6 @@ class BaseContext(metaclass=ContextMeta):
The list of valid flags are:
- `flags.STRICT_MODE`: True if `--strict` (or `-S`) was provided on the
command line
- `flags.FULL_REFRESH`: True if `--full-refresh` was provided on the
command line
- `flags.NON_DESTRUCTIVE`: True if `--non-destructive` was provided on

View File

@@ -186,14 +186,11 @@ class UserConfigContract(Protocol):
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
def set_values(self, cookie_dir: str) -> None:
...
class HasCredentials(Protocol):
credentials: Credentials
profile_name: str
config: UserConfigContract
user_config: UserConfigContract
target_name: str
threads: int

View File

@@ -223,9 +223,7 @@ class ManifestMetadata(BaseArtifactMetadata):
self.user_id = tracking.active_user.id
if self.send_anonymous_usage_stats is None:
self.send_anonymous_usage_stats = (
not tracking.active_user.do_not_track
)
self.send_anonymous_usage_stats = flags.SEND_ANONYMOUS_USAGE_STATS
@classmethod
def default(cls):
@@ -1071,7 +1069,7 @@ AnyManifest = Union[Manifest, MacroManifest]
@dataclass
@schema_version('manifest', 2)
@schema_version('manifest', 3)
class WritableManifest(ArtifactMixin):
nodes: Mapping[UniqueID, ManifestNode] = field(
metadata=dict(description=(

View File

@@ -156,13 +156,6 @@ class ParsedNodeMixins(dbtClassMixin):
self.columns = patch.columns
self.meta = patch.meta
self.docs = patch.docs
if flags.STRICT_MODE:
# It seems odd that an instance can be invalid
# Maybe there should be validation or restrictions
# elsewhere?
assert isinstance(self, dbtClassMixin)
dct = self.to_dict(omit_none=False)
self.validate(dct)
def get_materialization(self):
return self.config.materialized
@@ -509,11 +502,6 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID):
self.meta = patch.meta
self.docs = patch.docs
self.arguments = patch.arguments
if flags.STRICT_MODE:
# What does this actually validate?
assert isinstance(self, dbtClassMixin)
dct = self.to_dict(omit_none=False)
self.validate(dct)
def same_contents(self, other: Optional['ParsedMacro']) -> bool:
if other is None:

View File

@@ -1,9 +1,7 @@
from dbt.contracts.util import Replaceable, Mergeable, list_str
from dbt.contracts.connection import UserConfigContract, QueryComment
from dbt.contracts.connection import QueryComment, UserConfigContract
from dbt.helper_types import NoValue
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from dbt import tracking
from dbt import ui
from dbt.dataclass_schema import (
dbtClassMixin, ValidationError,
HyphenatedDbtClassMixin,
@@ -230,25 +228,20 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
use_colors: Optional[bool] = None
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
def set_values(self, cookie_dir):
if self.send_anonymous_usage_stats:
tracking.initialize_tracking(cookie_dir)
else:
tracking.do_not_track()
if self.use_colors is not None:
ui.use_colors(self.use_colors)
if self.printer_width:
ui.printer_width(self.printer_width)
write_json: Optional[bool] = None
warn_error: Optional[bool] = None
log_format: Optional[bool] = None
debug: Optional[bool] = None
version_check: Optional[bool] = None
fail_fast: Optional[bool] = None
use_experimental_parser: Optional[bool] = None
@dataclass
class ProfileConfig(HyphenatedDbtClassMixin, Replaceable):
profile_name: str = field(metadata={'preserve_underscore': True})
target_name: str = field(metadata={'preserve_underscore': True})
config: UserConfig
user_config: UserConfig = field(metadata={'preserve_underscore': True})
threads: int
# TODO: make this a dynamic union of some kind?
credentials: Optional[Dict[str, Any]]

View File

@@ -185,7 +185,7 @@ class RunExecutionResult(
@dataclass
@schema_version('run-results', 2)
@schema_version('run-results', 3)
class RunResultsArtifact(ExecutionResult, ArtifactMixin):
results: Sequence[RunResultOutput]
args: Dict[str, Any] = field(default_factory=dict)
@@ -369,7 +369,7 @@ class FreshnessResult(ExecutionResult):
@dataclass
@schema_version('sources', 1)
@schema_version('sources', 2)
class FreshnessExecutionResultArtifact(
ArtifactMixin,
VersionedSchema,

View File

@@ -57,22 +57,6 @@ class DispatchPackagesDeprecation(DBTDeprecation):
'''
class MaterializationReturnDeprecation(DBTDeprecation):
_name = 'materialization-return'
_description = '''\
The materialization ("{materialization}") did not explicitly return a list
of relations to add to the cache. By default the target relation will be
added, but this behavior will be removed in a future version of dbt.
For more information, see:
https://docs.getdbt.com/v0.15/docs/creating-new-materializations#section-6-returning-relations
'''
class NotADictionaryDeprecation(DBTDeprecation):
_name = 'not-a-dictionary'
@@ -82,21 +66,6 @@ class NotADictionaryDeprecation(DBTDeprecation):
'''
class ColumnQuotingDeprecation(DBTDeprecation):
_name = 'column-quoting-unset'
_description = '''\
The quote_columns parameter was not set for seeds, so the default value of
False was chosen. The default will change to True in a future release.
For more information, see:
https://docs.getdbt.com/v0.15/docs/seeds#section-specify-column-quoting
'''
class ModelsKeyNonModelDeprecation(DBTDeprecation):
_name = 'models-key-mismatch'
@@ -113,15 +82,6 @@ class ModelsKeyNonModelDeprecation(DBTDeprecation):
'''
class ExecuteMacrosReleaseDeprecation(DBTDeprecation):
_name = 'execute-macro-release'
_description = '''\
The "release" argument to execute_macro is now ignored, and will be removed
in a future relase of dbt. At that time, providing a `release` argument
will result in an error.
'''
class AdapterMacroDeprecation(DBTDeprecation):
_name = 'adapter-macro'
_description = '''\
@@ -178,11 +138,8 @@ active_deprecations: Set[str] = set()
deprecations_list: List[DBTDeprecation] = [
DispatchPackagesDeprecation(),
MaterializationReturnDeprecation(),
NotADictionaryDeprecation(),
ColumnQuotingDeprecation(),
ModelsKeyNonModelDeprecation(),
ExecuteMacrosReleaseDeprecation(),
AdapterMacroDeprecation(),
PackageRedirectDeprecation()
]

View File

@@ -6,19 +6,49 @@ if os.name != 'nt':
from pathlib import Path
from typing import Optional
# initially all flags are set to None, the on-load call of reset() will set
# them for their first time.
STRICT_MODE = None
FULL_REFRESH = None
USE_CACHE = None
WARN_ERROR = None
TEST_NEW_PARSER = None
# PROFILES_DIR must be set before the other flags
# It also gets set in main.py and in set_from_args because the rpc server
# doesn't go through exactly the same main arg processing.
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser('~'), '.dbt')
PROFILES_DIR = os.path.expanduser(
os.getenv('DBT_PROFILES_DIR', DEFAULT_PROFILES_DIR)
)
STRICT_MODE = False # Only here for backwards compatibility
FULL_REFRESH = False # subcommand
STORE_FAILURES = False # subcommand
GREEDY = None # subcommand
# Global CLI commands
USE_EXPERIMENTAL_PARSER = None
WARN_ERROR = None
WRITE_JSON = None
PARTIAL_PARSE = None
USE_COLORS = None
STORE_FAILURES = None
GREEDY = None
DEBUG = None
LOG_FORMAT = None
VERSION_CHECK = None
FAIL_FAST = None
SEND_ANONYMOUS_USAGE_STATS = None
PRINTER_WIDTH = 80
# Global CLI defaults. These flags are set from three places:
# CLI args, environment variables, and user_config (profiles.yml).
# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR
flag_defaults = {
"USE_EXPERIMENTAL_PARSER": False,
"WARN_ERROR": False,
"WRITE_JSON": True,
"PARTIAL_PARSE": False,
"USE_COLORS": True,
"PROFILES_DIR": DEFAULT_PROFILES_DIR,
"DEBUG": False,
"LOG_FORMAT": None,
"VERSION_CHECK": True,
"FAIL_FAST": False,
"SEND_ANONYMOUS_USAGE_STATS": True,
"PRINTER_WIDTH": 80
}
def env_set_truthy(key: str) -> Optional[str]:
@@ -31,6 +61,12 @@ def env_set_truthy(key: str) -> Optional[str]:
return value
def env_set_bool(env_value):
if env_value in ('1', 't', 'true', 'y', 'yes'):
return True
return False
def env_set_path(key: str) -> Optional[Path]:
value = os.getenv(key)
if value is None:
@@ -51,58 +87,75 @@ def _get_context():
return multiprocessing.get_context('spawn')
# This is not a flag, it's a place to store the lock
MP_CONTEXT = _get_context()
def reset():
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
STORE_FAILURES, GREEDY
STRICT_MODE = False
FULL_REFRESH = False
USE_CACHE = True
WARN_ERROR = False
TEST_NEW_PARSER = False
USE_EXPERIMENTAL_PARSER = False
WRITE_JSON = True
PARTIAL_PARSE = False
MP_CONTEXT = _get_context()
USE_COLORS = True
STORE_FAILURES = False
GREEDY = False
def set_from_args(args):
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
STORE_FAILURES, GREEDY
USE_CACHE = getattr(args, 'use_cache', USE_CACHE)
def set_from_args(args, user_config):
global STRICT_MODE, FULL_REFRESH, WARN_ERROR, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, USE_COLORS, \
STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT, GREEDY, \
VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS, PRINTER_WIDTH
STRICT_MODE = False # backwards compatibility
# cli args without user_config or env var option
FULL_REFRESH = getattr(args, 'full_refresh', FULL_REFRESH)
STRICT_MODE = getattr(args, 'strict', STRICT_MODE)
WARN_ERROR = (
STRICT_MODE or
getattr(args, 'warn_error', STRICT_MODE or WARN_ERROR)
)
TEST_NEW_PARSER = getattr(args, 'test_new_parser', TEST_NEW_PARSER)
USE_EXPERIMENTAL_PARSER = getattr(args, 'use_experimental_parser', USE_EXPERIMENTAL_PARSER)
WRITE_JSON = getattr(args, 'write_json', WRITE_JSON)
PARTIAL_PARSE = getattr(args, 'partial_parse', None)
MP_CONTEXT = _get_context()
# The use_colors attribute will always have a value because it is assigned
# None by default from the add_mutually_exclusive_group function
use_colors_override = getattr(args, 'use_colors')
if use_colors_override is not None:
USE_COLORS = use_colors_override
STORE_FAILURES = getattr(args, 'store_failures', STORE_FAILURES)
GREEDY = getattr(args, 'greedy', GREEDY)
# global cli flags with env var and user_config alternatives
USE_EXPERIMENTAL_PARSER = get_flag_value('USE_EXPERIMENTAL_PARSER', args, user_config)
WARN_ERROR = get_flag_value('WARN_ERROR', args, user_config)
WRITE_JSON = get_flag_value('WRITE_JSON', args, user_config)
PARTIAL_PARSE = get_flag_value('PARTIAL_PARSE', args, user_config)
USE_COLORS = get_flag_value('USE_COLORS', args, user_config)
PROFILES_DIR = get_flag_value('PROFILES_DIR', args, user_config)
DEBUG = get_flag_value('DEBUG', args, user_config)
LOG_FORMAT = get_flag_value('LOG_FORMAT', args, user_config)
VERSION_CHECK = get_flag_value('VERSION_CHECK', args, user_config)
FAIL_FAST = get_flag_value('FAIL_FAST', args, user_config)
SEND_ANONYMOUS_USAGE_STATS = get_flag_value('SEND_ANONYMOUS_USAGE_STATS', args, user_config)
PRINTER_WIDTH = get_flag_value('PRINTER_WIDTH', args, user_config)
# initialize everything to the defaults on module load
reset()
def get_flag_value(flag, args, user_config):
lc_flag = flag.lower()
flag_value = getattr(args, lc_flag, None)
if flag_value is None:
# Environment variables use pattern 'DBT_{flag name}'
env_flag = f"DBT_{flag}"
env_value = os.getenv(env_flag)
if env_value is not None and env_value != '':
env_value = env_value.lower()
# non Boolean values
if flag in ['LOG_FORMAT', 'PRINTER_WIDTH', 'PROFILES_DIR']:
flag_value = env_value
else:
flag_value = env_set_bool(env_value)
elif user_config is not None and getattr(user_config, lc_flag, None) is not None:
flag_value = getattr(user_config, lc_flag)
else:
flag_value = flag_defaults[flag]
if flag == 'PRINTER_WIDTH': # printer_width must be an int or it hangs
flag_value = int(flag_value)
if flag == 'PROFILES_DIR':
flag_value = os.path.abspath(flag_value)
return flag_value
def get_flag_dict():
return {
"use_experimental_parser": USE_EXPERIMENTAL_PARSER,
"warn_error": WARN_ERROR,
"write_json": WRITE_JSON,
"partial_parse": PARTIAL_PARSE,
"use_colors": USE_COLORS,
"profiles_dir": PROFILES_DIR,
"debug": DEBUG,
"log_format": LOG_FORMAT,
"version_check": VERSION_CHECK,
"fail_fast": FAIL_FAST,
"send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS,
"printer_width": PRINTER_WIDTH,
}

View File

@@ -405,27 +405,38 @@ class StateSelectorMethod(SelectorMethod):
return modified
def recursively_check_macros_modified(self, node):
# check if there are any changes in macros the first time
if self.modified_macros is None:
self.modified_macros = self._macros_modified()
def recursively_check_macros_modified(self, node, previous_macros):
# loop through all macros that this node depends on
for macro_uid in node.depends_on.macros:
# avoid infinite recursion if we've already seen this macro
if macro_uid in previous_macros:
continue
previous_macros.append(macro_uid)
# is this macro one of the modified macros?
if macro_uid in self.modified_macros:
return True
# if not, and this macro depends on other macros, keep looping
macro = self.manifest.macros[macro_uid]
if len(macro.depends_on.macros) > 0:
return self.recursively_check_macros_modified(macro)
macro_node = self.manifest.macros[macro_uid]
if len(macro_node.depends_on.macros) > 0:
return self.recursively_check_macros_modified(macro_node, previous_macros)
else:
return False
return False
def check_macros_modified(self, node):
# check if there are any changes in macros the first time
if self.modified_macros is None:
self.modified_macros = self._macros_modified()
# no macros have been modified, skip looping entirely
if not self.modified_macros:
return False
# recursively loop through upstream macros to see if any is modified
else:
previous_macros = []
return self.recursively_check_macros_modified(node, previous_macros)
def check_modified(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
different_contents = not new.same_contents(old) # type: ignore
upstream_macro_change = self.recursively_check_macros_modified(new)
upstream_macro_change = self.check_macros_modified(new)
return different_contents or upstream_macro_change
def check_modified_body(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
@@ -457,7 +468,7 @@ class StateSelectorMethod(SelectorMethod):
return False
def check_modified_macros(self, _, new: SelectorTarget) -> bool:
return self.recursively_check_macros_modified(new)
return self.check_macros_modified(new)
def check_new(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
return old is None

View File

@@ -51,7 +51,7 @@
{% endmacro %}
{% macro get_batch_size() -%}
{{ adapter.dispatch('get_batch_size', 'dbt')() }}
{{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}
{%- endmacro %}
{% macro default__get_batch_size() %}

View File

@@ -33,7 +33,7 @@ from dbt.adapters.factory import reset_adapters, cleanup_connections
import dbt.tracking
from dbt.utils import ExitCodes
from dbt.config import PROFILES_DIR, read_user_config
from dbt.config.profile import DEFAULT_PROFILES_DIR, read_user_config
from dbt.exceptions import RuntimeException, InternalException
@@ -160,17 +160,6 @@ def handle(args):
return res
def initialize_config_values(parsed):
"""Given the parsed args, initialize the dbt tracking code.
It would be nice to re-use this profile later on instead of parsing it
twice, but dbt's intialization is not structured in a way that makes that
easy.
"""
cfg = read_user_config(parsed.profiles_dir)
cfg.set_values(parsed.profiles_dir)
@contextmanager
def adapter_management():
reset_adapters()
@@ -184,8 +173,15 @@ def handle_and_check(args):
with log_manager.applicationbound():
parsed = parse_args(args)
# we've parsed the args - we can now decide if we're debug or not
if parsed.debug:
# Set flags from args, user config, and env vars
user_config = read_user_config(flags.PROFILES_DIR) # This is read again later
flags.set_from_args(parsed, user_config)
dbt.tracking.initialize_from_flags()
# Set log_format from flags
parsed.cls.set_log_format()
# we've parsed the args and set the flags - we can now decide if we're debug or not
if flags.DEBUG:
log_manager.set_debug()
profiler_enabled = False
@@ -198,8 +194,6 @@ def handle_and_check(args):
outfile=parsed.record_timing_info
):
initialize_config_values(parsed)
with adapter_management():
task, res = run_from_args(parsed)
@@ -233,15 +227,17 @@ def track_run(task):
def run_from_args(parsed):
log_cache_events(getattr(parsed, 'log_cache_events', False))
flags.set_from_args(parsed)
parsed.cls.pre_init_hook(parsed)
# we can now use the logger for stdout
# set log_format in the logger
parsed.cls.pre_init_hook(parsed)
logger.info("Running with dbt{}".format(dbt.version.installed))
# this will convert DbtConfigErrors into RuntimeExceptions
# task could be any one of the task objects
task = parsed.cls.from_args(args=parsed)
logger.debug("running dbt with arguments {parsed}", parsed=str(parsed))
log_path = None
@@ -275,11 +271,12 @@ def _build_base_subparser():
base_subparser.add_argument(
'--profiles-dir',
default=PROFILES_DIR,
default=None,
dest='sub_profiles_dir', # Main cli arg precedes subcommand
type=str,
help='''
Which directory to look in for the profiles.yml file. Default = {}
'''.format(PROFILES_DIR)
'''.format(DEFAULT_PROFILES_DIR)
)
base_subparser.add_argument(
@@ -319,15 +316,6 @@ def _build_base_subparser():
help=argparse.SUPPRESS,
)
base_subparser.add_argument(
'--bypass-cache',
action='store_false',
dest='use_cache',
help='''
If set, bypass the adapter-level cache of database state
''',
)
base_subparser.set_defaults(defer=None, state=None)
return base_subparser
@@ -394,6 +382,7 @@ def _build_build_subparser(subparsers, base_subparser):
sub.add_argument(
'-x',
'--fail-fast',
dest='sub_fail_fast',
action='store_true',
help='''
Stop execution upon a first failure.
@@ -531,6 +520,7 @@ def _build_run_subparser(subparsers, base_subparser):
run_sub.add_argument(
'-x',
'--fail-fast',
dest='sub_fail_fast',
action='store_true',
help='''
Stop execution upon a first failure.
@@ -654,8 +644,9 @@ def _add_table_mutability_arguments(*subparsers):
def _add_version_check(sub):
sub.add_argument(
'--no-version-check',
dest='version_check',
dest='sub_version_check', # main cli arg precedes subcommands
action='store_false',
default=None,
help='''
If set, skip ensuring dbt's version matches the one specified in
the dbt_project.yml file ('require-dbt-version')
@@ -749,6 +740,7 @@ def _build_test_subparser(subparsers, base_subparser):
sub.add_argument(
'-x',
'--fail-fast',
dest='sub_fail_fast',
action='store_true',
help='''
Stop execution upon a first test failure.
@@ -972,6 +964,7 @@ def parse_args(args, cls=DBTArgumentParser):
'-d',
'--debug',
action='store_true',
default=None,
help='''
Display debug logging during dbt execution. Useful for debugging and
making bug reports.
@@ -981,13 +974,14 @@ def parse_args(args, cls=DBTArgumentParser):
p.add_argument(
'--log-format',
choices=['text', 'json', 'default'],
default='default',
default=None,
help='''Specify the log format, overriding the command's default.'''
)
p.add_argument(
'--no-write-json',
action='store_false',
default=None,
dest='write_json',
help='''
If set, skip writing the manifest and run_results.json files to disk
@@ -998,6 +992,7 @@ def parse_args(args, cls=DBTArgumentParser):
'--use-colors',
action='store_const',
const=True,
default=None,
dest='use_colors',
help='''
Colorize the output DBT prints to the terminal. Output is colorized by
@@ -1019,18 +1014,17 @@ def parse_args(args, cls=DBTArgumentParser):
)
p.add_argument(
'-S',
'--strict',
action='store_true',
'--printer-width',
dest='printer_width',
help='''
Run schema validations at runtime. This will surface bugs in dbt, but
may incur a performance penalty.
Sets the width of terminal output
'''
)
p.add_argument(
'--warn-error',
action='store_true',
default=None,
help='''
If dbt would normally warn, instead raise an exception. Examples
include --models that selects nothing, deprecations, configurations
@@ -1039,6 +1033,17 @@ def parse_args(args, cls=DBTArgumentParser):
'''
)
p.add_argument(
'--no-version-check',
dest='version_check',
action='store_false',
default=None,
help='''
If set, skip ensuring dbt's version matches the one specified in
the dbt_project.yml file ('require-dbt-version')
'''
)
p.add_optional_argument_inverse(
'--partial-parse',
enable_help='''
@@ -1061,26 +1066,48 @@ def parse_args(args, cls=DBTArgumentParser):
help=argparse.SUPPRESS,
)
# if set, extract all models and blocks with the jinja block extractor, and
# verify that we don't fail anywhere the actual jinja parser passes. The
# reverse (passing files that ends up failing jinja) is fine.
# TODO remove?
p.add_argument(
'--test-new-parser',
action='store_true',
help=argparse.SUPPRESS
)
# if set, will use the tree-sitter-jinja2 parser and extractor instead of
# jinja rendering when possible.
p.add_argument(
'--use-experimental-parser',
action='store_true',
default=None,
help='''
Uses an experimental parser to extract jinja values.
'''
)
p.add_argument(
'--profiles-dir',
default=None,
dest='profiles_dir',
type=str,
help='''
Which directory to look in for the profiles.yml file. Default = {}
'''.format(DEFAULT_PROFILES_DIR)
)
p.add_argument(
'--no-anonymous-usage-stats',
action='store_false',
default=None,
dest='send_anonymous_usage_stats',
help='''
Do not send anonymous usage stat to dbt Labs
'''
)
p.add_argument(
'-x',
'--fail-fast',
dest='fail_fast',
action='store_true',
default=None,
help='''
Stop execution upon a first failure.
'''
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = _build_base_subparser()
@@ -1128,8 +1155,31 @@ def parse_args(args, cls=DBTArgumentParser):
parsed = p.parse_args(args)
# profiles_dir is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_profiles_dir'):
if parsed.sub_profiles_dir is not None:
parsed.profiles_dir = parsed.sub_profiles_dir
delattr(parsed, 'sub_profiles_dir')
if hasattr(parsed, 'profiles_dir'):
parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)
if parsed.profiles_dir is None:
parsed.profiles_dir = flags.PROFILES_DIR
else:
parsed.profiles_dir = os.path.abspath(parsed.profiles_dir)
# needs to be set before the other flags, because it's needed to
# read the profile that contains them
flags.PROFILES_DIR = parsed.profiles_dir
# version_check is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_version_check'):
if parsed.sub_version_check is False:
parsed.version_check = False
delattr(parsed, 'sub_version_check')
# fail_fast is set before subcommands and after, so normalize
if hasattr(parsed, 'sub_fail_fast'):
if parsed.sub_fail_fast is True:
parsed.fail_fast = True
delattr(parsed, 'sub_fail_fast')
if getattr(parsed, 'project_dir', None) is not None:
expanded_user = os.path.expanduser(parsed.project_dir)

View File

@@ -64,7 +64,6 @@ from dbt.dataclass_schema import StrEnum, dbtClassMixin
PARTIAL_PARSE_FILE_NAME = 'partial_parse.msgpack'
PARSING_STATE = DbtProcessState('parsing')
DEFAULT_PARTIAL_PARSE = False
class ReparseReason(StrEnum):
@@ -265,7 +264,7 @@ class ManifestLoader:
self.manifest._parsing_info = ParsingInfo()
if skip_parsing:
logger.info("Partial parsing enabled, no changes found, skipping parsing")
logger.debug("Partial parsing enabled, no changes found, skipping parsing")
else:
# Load Macros
# We need to parse the macros first, so they're resolvable when
@@ -539,18 +538,8 @@ class ManifestLoader:
reparse_reason = ReparseReason.project_config_changed
return valid, reparse_reason
def _partial_parse_enabled(self):
# if the CLI is set, follow that
if flags.PARTIAL_PARSE is not None:
return flags.PARTIAL_PARSE
# if the config is set, follow that
elif self.root_project.config.partial_parse is not None:
return self.root_project.config.partial_parse
else:
return DEFAULT_PARTIAL_PARSE
def read_manifest_for_partial_parse(self) -> Optional[Manifest]:
if not self._partial_parse_enabled():
if not flags.PARTIAL_PARSE:
logger.debug('Partial parsing not enabled')
return None
path = os.path.join(self.root_project.target_path,
@@ -577,7 +566,7 @@ class ManifestLoader:
)
reparse_reason = ReparseReason.load_file_failure
else:
logger.info(f"Unable to do partial parsing because {path} not found")
logger.info("Partial parse save file not found. Starting full parse.")
reparse_reason = ReparseReason.file_not_found
# this event is only fired if a full reparse is needed
@@ -587,7 +576,7 @@ class ManifestLoader:
def build_perf_info(self):
mli = ManifestLoaderInfo(
is_partial_parse_enabled=self._partial_parse_enabled(),
is_partial_parse_enabled=flags.PARTIAL_PARSE,
is_static_analysis_enabled=flags.USE_EXPERIMENTAL_PARSER
)
for project in self.all_projects.values():
@@ -618,7 +607,7 @@ class ManifestLoader:
])
)
profile_path = os.path.join(config.args.profiles_dir, 'profiles.yml')
profile_path = os.path.join(flags.PROFILES_DIR, 'profiles.yml')
with open(profile_path) as fp:
profile_hash = FileHash.from_contents(fp.read())

View File

@@ -105,10 +105,10 @@ class PartialParsing:
}
if changed_or_deleted_macro_file:
self.macro_child_map = self.saved_manifest.build_macro_child_map()
logger.info(f"Partial parsing enabled: "
f"{len(deleted) + len(deleted_schema_files)} files deleted, "
f"{len(added)} files added, "
f"{len(changed) + len(changed_schema_files)} files changed.")
logger.debug(f"Partial parsing enabled: "
f"{len(deleted) + len(deleted_schema_files)} files deleted, "
f"{len(added)} files added, "
f"{len(changed) + len(changed_schema_files)} files changed.")
self.file_diff = file_diff
# generate the list of files that need parsing

View File

@@ -67,15 +67,16 @@ class BootstrapProcess(dbt.flags.MP_CONTEXT.Process):
keeps everything in memory.
"""
# reset flags
dbt.flags.set_from_args(self.task.args)
user_config = None
if self.task.config is not None:
user_config = self.task.config.user_config
dbt.flags.set_from_args(self.task.args, user_config)
dbt.tracking.initialize_from_flags()
# reload the active plugin
load_plugin(self.task.config.credentials.type)
# register it
register_adapter(self.task.config)
# reset tracking, etc
self.task.config.config.set_values(self.task.args.profiles_dir)
def task_exec(self) -> None:
"""task_exec runs first inside the child process"""
if type(self.task) != RemoteListTask:

View File

@@ -7,6 +7,7 @@ from typing import Type, Union, Dict, Any, Optional
from dbt import tracking
from dbt import ui
from dbt import flags
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.results import (
NodeStatus, RunResult, collect_timing_info, RunStatus
@@ -21,7 +22,7 @@ from .printer import print_skip_caused_by_error, print_skip_line
from dbt.adapters.factory import register_adapter
from dbt.config import RuntimeConfig, Project
from dbt.config.profile import read_profile, PROFILES_DIR
from dbt.config.profile import read_profile
import dbt.exceptions
@@ -34,7 +35,7 @@ class NoneConfig:
def read_profiles(profiles_dir=None):
"""This is only used for some error handling"""
if profiles_dir is None:
profiles_dir = PROFILES_DIR
profiles_dir = flags.PROFILES_DIR
raw_profiles = read_profile(profiles_dir)
@@ -69,6 +70,13 @@ class BaseTask(metaclass=ABCMeta):
else:
log_manager.format_text()
@classmethod
def set_log_format(cls):
if flags.LOG_FORMAT == 'json':
log_manager.format_json()
else:
log_manager.format_text()
@classmethod
def from_args(cls, args):
try:
@@ -85,7 +93,7 @@ class BaseTask(metaclass=ABCMeta):
logger.error("Encountered an error while reading profiles:")
logger.error(" ERROR {}".format(str(exc)))
all_profiles = read_profiles(args.profiles_dir).keys()
all_profiles = read_profiles(flags.PROFILES_DIR).keys()
if len(all_profiles) > 0:
logger.info("Defined profiles:")

View File

@@ -5,10 +5,11 @@ import sys
from typing import Optional, Dict, Any, List
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import flags
import dbt.clients.system
import dbt.exceptions
from dbt.adapters.factory import get_adapter, register_adapter
from dbt.config import Project, Profile, PROFILES_DIR
from dbt.config import Project, Profile
from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer
from dbt.config.utils import parse_cli_vars
from dbt.context.base import generate_base_context
@@ -69,7 +70,7 @@ class QueryCommentedProfile(Profile):
class DebugTask(BaseTask):
def __init__(self, args, config):
super().__init__(args, config)
self.profiles_dir = getattr(self.args, 'profiles_dir', PROFILES_DIR)
self.profiles_dir = flags.PROFILES_DIR
self.profile_path = os.path.join(self.profiles_dir, 'profiles.yml')
try:
self.project_dir = get_nearest_project_dir(self.args)
@@ -156,7 +157,7 @@ class DebugTask(BaseTask):
self.project = Project.from_project_root(
self.project_dir,
renderer,
verify_version=getattr(self.args, 'version_check', False),
verify_version=flags.VERSION_CHECK,
)
except dbt.exceptions.DbtConfigError as exc:
self.project_fail_details = str(exc)
@@ -195,7 +196,7 @@ class DebugTask(BaseTask):
try:
partial = Project.partial_load(
os.path.dirname(self.project_path),
verify_version=getattr(self.args, 'version_check', False),
verify_version=bool(flags.VERSION_CHECK),
)
renderer = DbtProjectYamlRenderer(
generate_base_context(self.cli_vars)

View File

@@ -3,6 +3,7 @@ import shutil
import dbt.config
import dbt.clients.system
from dbt import flags
from dbt.version import _get_adapter_plugin_names
from dbt.adapters.factory import load_plugin, get_include_paths
@@ -93,7 +94,7 @@ class InitTask(BaseTask):
except StopIteration:
logger.debug("No adapters installed, skipping")
profiles_dir = dbt.config.PROFILES_DIR
profiles_dir = flags.PROFILES_DIR
profiles_file = os.path.join(profiles_dir, 'profiles.yml')
self.create_profiles_dir(profiles_dir)

View File

@@ -30,8 +30,8 @@ def print_fancy_output_line(
progress=progress,
message=msg)
truncate_width = ui.PRINTER_WIDTH - 3
justified = prefix.ljust(ui.PRINTER_WIDTH, ".")
truncate_width = ui.printer_width() - 3
justified = prefix.ljust(ui.printer_width(), ".")
if truncate and len(justified) > truncate_width:
justified = justified[:truncate_width] + '...'

View File

@@ -16,7 +16,6 @@ from .printer import (
get_counts,
)
from dbt import deprecations
from dbt import tracking
from dbt import utils
from dbt.adapters.base import BaseRelation
@@ -209,11 +208,12 @@ class ModelRunner(CompileRunner):
self, result: Any, model
) -> List[BaseRelation]:
if isinstance(result, str):
deprecations.warn('materialization-return',
materialization=model.get_materialization())
return [
self.adapter.Relation.create_from(self.config, model)
]
msg = (
'The materialization ("{}") did not explicitly return a '
'list of relations to add to the cache.'
.format(str(model.get_materialization()))
)
raise CompilationException(msg, node=model)
if isinstance(result, dict):
return _validate_materialization_relations_dict(result, model)

View File

@@ -214,7 +214,7 @@ class GraphRunnableTask(ManifestTask):
logger.debug('Finished running node {}'.format(
runner.node.unique_id))
fail_fast = getattr(self.config.args, 'fail_fast', False)
fail_fast = flags.FAIL_FAST
if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast:
self._raise_next_tick = FailFastException(
@@ -281,7 +281,7 @@ class GraphRunnableTask(ManifestTask):
self._submit(pool, args, callback)
# block on completion
if getattr(self.config.args, 'fail_fast', False):
if flags.FAIL_FAST:
# checkout for an errors after task completion in case of
# fast failure
while self.job_queue.wait_until_something_was_done():
@@ -571,7 +571,11 @@ class GraphRunnableTask(ManifestTask):
)
def args_to_dict(self):
var_args = vars(self.args)
var_args = vars(self.args).copy()
# update the args with the flags, which could also come from environment
# variables or user_config
flag_dict = flags.get_flag_dict()
var_args.update(flag_dict)
dict_args = {}
# remove args keys that clutter up the dictionary
for key in var_args:
@@ -579,10 +583,11 @@ class GraphRunnableTask(ManifestTask):
continue
if var_args[key] is None:
continue
# TODO: add more default_false_keys
default_false_keys = (
'debug', 'full_refresh', 'fail_fast', 'warn_error',
'single_threaded', 'test_new_parser', 'log_cache_events',
'strict'
'single_threaded', 'log_cache_events',
'use_experimental_parser',
)
if key in default_false_keys and var_args[key] is False:
continue

View File

@@ -5,6 +5,7 @@ from dbt.clients.yaml_helper import ( # noqa:F401
)
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import version as dbt_version
from dbt import flags
from snowplow_tracker import Subject, Tracker, Emitter, logger as sp_logger
from snowplow_tracker import SelfDescribingJson
from datetime import datetime
@@ -184,7 +185,6 @@ def get_invocation_context(user, config, args):
"command": args.which,
"options": None,
"version": str(dbt_version.installed),
"run_type": get_run_type(args),
"adapter_type": adapter_type,
"adapter_unique_id": adapter_unique_id,
@@ -509,3 +509,11 @@ class InvocationProcessor(logbook.Processor):
"run_started_at": active_user.run_started_at.isoformat(),
"invocation_id": active_user.invocation_id,
})
def initialize_from_flags():
# Setting these used to be in UserConfig, but had to be moved here
if flags.SEND_ANONYMOUS_USAGE_STATS:
initialize_tracking(flags.PROFILES_DIR)
else:
do_not_track()

View File

@@ -17,17 +17,6 @@ COLOR_FG_GREEN = COLORS['green']
COLOR_FG_YELLOW = COLORS['yellow']
COLOR_RESET_ALL = COLORS['reset_all']
PRINTER_WIDTH = 80
def use_colors(use_colors_val=True):
flags.USE_COLORS = use_colors_val
def printer_width(printer_width):
global PRINTER_WIDTH
PRINTER_WIDTH = printer_width
def color(text: str, color_code: str):
if flags.USE_COLORS:
@@ -36,6 +25,12 @@ def color(text: str, color_code: str):
return text
def printer_width():
if flags.PRINTER_WIDTH:
return flags.PRINTER_WIDTH
return 80
def green(text: str):
return color(text, COLOR_FG_GREEN)
@@ -56,7 +51,7 @@ def line_wrap_message(
newlines to newlines and avoid calling textwrap.fill() on them (like
markdown)
'''
width = PRINTER_WIDTH - subtract
width = printer_width() - subtract
if dedent:
msg = textwrap.dedent(msg)

View File

@@ -96,5 +96,5 @@ def _get_dbt_plugins_info():
yield plugin_name, mod.version
__version__ = '0.21.0b2'
__version__ = '0.21.0rc1'
installed = get_installed_version()

View File

@@ -284,12 +284,12 @@ def parse_args(argv=None):
parser.add_argument('adapter')
parser.add_argument('--title-case', '-t', default=None)
parser.add_argument('--dependency', action='append')
parser.add_argument('--dbt-core-version', default='0.21.0b2')
parser.add_argument('--dbt-core-version', default='0.21.0rc1')
parser.add_argument('--email')
parser.add_argument('--author')
parser.add_argument('--url')
parser.add_argument('--sql', action='store_true')
parser.add_argument('--package-version', default='0.21.0b2')
parser.add_argument('--package-version', default='0.21.0rc1')
parser.add_argument('--project-version', default='1.0')
parser.add_argument(
'--no-dependency', action='store_false', dest='set_dependency'

View File

@@ -24,7 +24,7 @@ def read(fname):
package_name = "dbt-core"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """dbt (data build tool) is a command line tool that helps \
analysts and engineers transform data in their warehouse more effectively"""

View File

@@ -0,0 +1,75 @@
agate==1.6.1
asn1crypto==1.4.0
attrs==21.2.0
azure-common==1.1.27
azure-core==1.18.0
azure-storage-blob==12.8.1
Babel==2.9.1
boto3==1.18.44
botocore==1.21.44
cachetools==4.2.2
certifi==2021.5.30
cffi==1.14.6
chardet==4.0.0
charset-normalizer==2.0.6
colorama==0.4.4
cryptography==3.4.8
google-api-core==1.31.2
google-auth==1.35.0
google-cloud-bigquery==2.26.0
google-cloud-core==1.7.2
google-crc32c==1.1.2
google-resumable-media==2.0.2
googleapis-common-protos==1.53.0
grpcio==1.40.0
hologram==0.0.14
idna==3.2
importlib-metadata==4.8.1
isodate==0.6.0
jeepney==0.7.1
Jinja2==2.11.3
jmespath==0.10.0
json-rpc==1.13.0
jsonschema==3.1.1
keyring==21.8.0
leather==0.3.3
Logbook==1.5.3
MarkupSafe==2.0.1
mashumaro==2.5
minimal-snowplow-tracker==0.0.2
msgpack==1.0.2
msrest==0.6.21
networkx==2.6.3
oauthlib==3.1.1
oscrypto==1.2.1
packaging==20.9
parsedatetime==2.6
proto-plus==1.19.0
protobuf==3.18.0
psycopg2-binary==2.9.1
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.20
pycryptodomex==3.10.1
PyJWT==2.1.0
pyOpenSSL==20.0.1
pyparsing==2.4.7
pyrsistent==0.18.0
python-dateutil==2.8.2
python-slugify==5.0.2
pytimeparse==1.1.8
pytz==2021.1
PyYAML==5.4.1
requests==2.26.0
requests-oauthlib==1.3.0
rsa==4.7.2
s3transfer==0.5.0
SecretStorage==3.3.1
six==1.16.0
snowflake-connector-python==2.5.1
sqlparse==0.4.2
text-unidecode==1.3
typing-extensions==3.10.0.2
urllib3==1.26.6
Werkzeug==2.0.1
zipp==3.5.0

View File

@@ -1,5 +1,3 @@
-e ./core
-e ./plugins/postgres
-e ./plugins/redshift
-e ./plugins/snowflake
-e ./plugins/bigquery

View File

@@ -1 +1 @@
version = '0.21.0b2'
version = '0.21.0rc1'

View File

@@ -4,7 +4,6 @@ from dbt.dataclass_schema import dbtClassMixin, ValidationError
import dbt.deprecations
import dbt.exceptions
import dbt.flags as flags
import dbt.clients.gcloud
import dbt.clients.agate_helper
@@ -15,7 +14,6 @@ from dbt.adapters.base import (
from dbt.adapters.bigquery.relation import BigQueryRelation
from dbt.adapters.bigquery import BigQueryColumn
from dbt.adapters.bigquery import BigQueryConnectionManager
from dbt.contracts.connection import Connection
from dbt.contracts.graph.manifest import Manifest
from dbt.logger import GLOBAL_LOGGER as logger, print_timestamped_line
from dbt.utils import filter_null_values
@@ -515,19 +513,6 @@ class BigQueryAdapter(BaseAdapter):
if sql_override is None:
sql_override = model.get('compiled_sql')
if flags.STRICT_MODE:
connection = self.connections.get_thread_connection()
if not isinstance(connection, Connection):
dbt.exceptions.raise_compiler_error(
f'Got {connection} - not a Connection!'
)
model_uid = model.get('unique_id')
if connection.name != model_uid:
raise dbt.exceptions.InternalException(
f'Connection had name "{connection.name}", expected model '
f'unique id of "{model_uid}"'
)
if materialization == 'view':
res = self._materialize_as_view(model)
elif materialization == 'table':

View File

@@ -20,7 +20,7 @@ except ImportError:
package_name = "dbt-bigquery"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """The bigquery adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))

View File

@@ -1 +1 @@
version = '0.21.0b2'
version = '0.21.0rc1'

View File

@@ -41,7 +41,7 @@ def _dbt_psycopg2_name():
package_name = "dbt-postgres"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """The postgres adpter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))

View File

@@ -1,32 +0,0 @@
<p align="center">
<img src="https://raw.githubusercontent.com/dbt-labs/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
</p>
**[dbt](https://www.getdbt.com/)** (data build tool) enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
## dbt-redshift
The `dbt-redshift` package contains all of the code required to make dbt operate on a Redshift database. For
more information on using dbt with Redshift, consult [the docs](https://docs.getdbt.com/docs/profile-redshift).
## Find out more
- Check out the [Introduction to dbt](https://docs.getdbt.com/docs/introduction/).
- Read the [dbt Viewpoint](https://docs.getdbt.com/docs/about/viewpoint/).
## Join thousands of analysts in the dbt community
- Join the [chat](http://community.getdbt.com/) on Slack.
- Find community posts on [dbt Discourse](https://discourse.getdbt.com).
## Reporting bugs and contributing code
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt/issues/new).
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](https://github.com/dbt-labs/dbt/blob/HEAD/CONTRIBUTING.md)
## Code of Conduct
Everyone interacting in the dbt project's codebases, issue trackers, chat rooms, and mailing lists is expected to follow the [dbt Code of Conduct](https://community.getdbt.com/code-of-conduct).

View File

@@ -1,15 +0,0 @@
from dbt.adapters.redshift.connections import RedshiftConnectionManager # noqa
from dbt.adapters.redshift.connections import RedshiftCredentials
from dbt.adapters.redshift.relation import RedshiftColumn # noqa
from dbt.adapters.redshift.relation import RedshiftRelation # noqa: F401
from dbt.adapters.redshift.impl import RedshiftAdapter
from dbt.adapters.base import AdapterPlugin
from dbt.include import redshift
Plugin = AdapterPlugin(
adapter=RedshiftAdapter,
credentials=RedshiftCredentials,
include_path=redshift.PACKAGE_PATH,
dependencies=['postgres'])

View File

@@ -1 +0,0 @@
version = '0.21.0b2'

View File

@@ -1,174 +0,0 @@
from multiprocessing import Lock
from contextlib import contextmanager
from typing import NewType
from dbt.adapters.postgres import PostgresConnectionManager
from dbt.adapters.postgres import PostgresCredentials
from dbt.logger import GLOBAL_LOGGER as logger # noqa
import dbt.exceptions
import dbt.flags
import boto3
from dbt.dataclass_schema import FieldEncoder, dbtClassMixin, StrEnum
from dataclasses import dataclass, field
from typing import Optional, List
drop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()
IAMDuration = NewType('IAMDuration', int)
class IAMDurationEncoder(FieldEncoder):
@property
def json_schema(self):
return {'type': 'integer', 'minimum': 0, 'maximum': 65535}
dbtClassMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})
class RedshiftConnectionMethod(StrEnum):
DATABASE = 'database'
IAM = 'iam'
@dataclass
class RedshiftCredentials(PostgresCredentials):
method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE
password: Optional[str] = None
cluster_id: Optional[str] = field(
default=None,
metadata={'description': 'If using IAM auth, the name of the cluster'},
)
iam_profile: Optional[str] = None
iam_duration_seconds: int = 900
search_path: Optional[str] = None
keepalives_idle: int = 240
autocreate: bool = False
db_groups: List[str] = field(default_factory=list)
ra3_node: Optional[bool] = False
@property
def type(self):
return 'redshift'
def _connection_keys(self):
keys = super()._connection_keys()
return keys + (
'method',
'cluster_id',
'iam_profile',
'iam_duration_seconds'
)
class RedshiftConnectionManager(PostgresConnectionManager):
TYPE = 'redshift'
@contextmanager
def fresh_transaction(self, name=None):
"""On entrance to this context manager, hold an exclusive lock and
create a fresh transaction for redshift, then commit and begin a new
one before releasing the lock on exit.
See drop_relation in RedshiftAdapter for more information.
:param Optional[str] name: The name of the connection to use, or None
to use the default.
"""
with drop_lock:
connection = self.get_thread_connection()
if connection.transaction_open:
self.commit()
self.begin()
yield
self.commit()
self.begin()
@classmethod
def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,
iam_profile, duration_s, autocreate,
db_groups):
"""Fetches temporary login credentials from AWS. The specified user
must already exist in the database, or else an error will occur"""
if iam_profile is None:
session = boto3.Session()
boto_client = session.client("redshift")
else:
logger.debug("Connecting to Redshift using 'IAM'" +
f"with profile {iam_profile}")
boto_session = boto3.Session(
profile_name=iam_profile
)
boto_client = boto_session.client('redshift')
try:
return boto_client.get_cluster_credentials(
DbUser=db_user,
DbName=db_name,
ClusterIdentifier=cluster_id,
DurationSeconds=duration_s,
AutoCreate=autocreate,
DbGroups=db_groups,)
except boto_client.exceptions.ClientError as e:
raise dbt.exceptions.FailedToConnectException(
"Unable to get temporary Redshift cluster credentials: {}"
.format(e))
@classmethod
def get_tmp_iam_cluster_credentials(cls, credentials):
cluster_id = credentials.cluster_id
# default via:
# boto3.readthedocs.io/en/latest/reference/services/redshift.html
iam_duration_s = credentials.iam_duration_seconds
if not cluster_id:
raise dbt.exceptions.FailedToConnectException(
"'cluster_id' must be provided in profile if IAM "
"authentication method selected")
cluster_creds = cls.fetch_cluster_credentials(
credentials.user,
credentials.database,
credentials.cluster_id,
credentials.iam_profile,
iam_duration_s,
credentials.autocreate,
credentials.db_groups,
)
# replace username and password with temporary redshift credentials
return credentials.replace(user=cluster_creds.get('DbUser'),
password=cluster_creds.get('DbPassword'))
@classmethod
def get_credentials(cls, credentials):
method = credentials.method
# Support missing 'method' for backwards compatibility
if method == 'database' or method is None:
logger.debug("Connecting to Redshift using 'database' credentials")
# this requirement is really annoying to encode into json schema,
# so validate it here
if credentials.password is None:
raise dbt.exceptions.FailedToConnectException(
"'password' field is required for 'database' credentials"
)
return credentials
elif method == 'iam':
logger.debug("Connecting to Redshift using 'IAM' credentials")
return cls.get_tmp_iam_cluster_credentials(credentials)
else:
raise dbt.exceptions.FailedToConnectException(
"Invalid 'method' in profile: '{}'".format(method))

View File

@@ -1,88 +0,0 @@
from dataclasses import dataclass
from typing import Optional
from dbt.adapters.base.impl import AdapterConfig
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.base.meta import available
from dbt.adapters.postgres import PostgresAdapter
from dbt.adapters.redshift import RedshiftConnectionManager
from dbt.adapters.redshift import RedshiftColumn
from dbt.adapters.redshift import RedshiftRelation
from dbt.logger import GLOBAL_LOGGER as logger # noqa
import dbt.exceptions
@dataclass
class RedshiftConfig(AdapterConfig):
sort_type: Optional[str] = None
dist: Optional[str] = None
sort: Optional[str] = None
bind: Optional[bool] = None
class RedshiftAdapter(PostgresAdapter, SQLAdapter):
Relation = RedshiftRelation
ConnectionManager = RedshiftConnectionManager
Column = RedshiftColumn
AdapterSpecificConfigs = RedshiftConfig
@classmethod
def date_function(cls):
return 'getdate()'
def drop_relation(self, relation):
"""
In Redshift, DROP TABLE ... CASCADE should not be used
inside a transaction. Redshift doesn't prevent the CASCADE
part from conflicting with concurrent transactions. If we do
attempt to drop two tables with CASCADE at once, we'll often
get the dreaded:
table was dropped by a concurrent transaction
So, we need to lock around calls to the underlying
drop_relation() function.
https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_TABLE.html
"""
with self.connections.fresh_transaction():
return super().drop_relation(relation)
@classmethod
def convert_text_type(cls, agate_table, col_idx):
column = agate_table.columns[col_idx]
# `lens` must be a list, so this can't be a generator expression,
# because max() raises ane exception if its argument has no members.
lens = [len(d.encode("utf-8")) for d in column.values_without_nulls()]
max_len = max(lens) if lens else 64
return "varchar({})".format(max_len)
@classmethod
def convert_time_type(cls, agate_table, col_idx):
return "varchar(24)"
@available
def verify_database(self, database):
if database.startswith('"'):
database = database.strip('"')
expected = self.config.credentials.database
ra3_node = self.config.credentials.ra3_node
if database.lower() != expected.lower() and not ra3_node:
raise dbt.exceptions.NotImplementedException(
'Cross-db references allowed only in RA3.* node. ({} vs {})'
.format(database, expected)
)
# return an empty string on success so macros can call this
return ''
def _get_catalog_schemas(self, manifest):
# redshift(besides ra3) only allow one database (the main one)
schemas = super(SQLAdapter, self)._get_catalog_schemas(manifest)
try:
return schemas.flatten(allow_multiple_databases=self.config.credentials.ra3_node)
except dbt.exceptions.RuntimeException as exc:
dbt.exceptions.raise_compiler_error(
'Cross-db references allowed only in {} RA3.* node. Got {}'
.format(self.type(), exc.msg)
)

View File

@@ -1,15 +0,0 @@
from dbt.adapters.base import Column
from dataclasses import dataclass
from dbt.adapters.postgres.relation import PostgresRelation
@dataclass(frozen=True, eq=False, repr=False)
class RedshiftRelation(PostgresRelation):
# Override the method in the Postgres Relation
# because Redshift allows longer names
def relation_max_name_length(self):
return 127
class RedshiftColumn(Column):
pass # redshift does not inherit from postgres here

View File

@@ -1,3 +0,0 @@
import os
PACKAGE_PATH = os.path.dirname(__file__)

View File

@@ -1,5 +0,0 @@
config-version: 2
name: dbt_redshift
version: 1.0
macro-paths: ["macros"]

View File

@@ -1,283 +0,0 @@
{% macro dist(dist) %}
{%- if dist is not none -%}
{%- set dist = dist.strip().lower() -%}
{%- if dist in ['all', 'even'] -%}
diststyle {{ dist }}
{%- elif dist == "auto" -%}
{%- else -%}
diststyle key distkey ({{ dist }})
{%- endif -%}
{%- endif -%}
{%- endmacro -%}
{% macro sort(sort_type, sort) %}
{%- if sort is not none %}
{{ sort_type | default('compound', boolean=true) }} sortkey(
{%- if sort is string -%}
{%- set sort = [sort] -%}
{%- endif -%}
{%- for item in sort -%}
{{ item }}
{%- if not loop.last -%},{%- endif -%}
{%- endfor -%}
)
{%- endif %}
{%- endmacro -%}
{% macro redshift__create_table_as(temporary, relation, sql) -%}
{%- set _dist = config.get('dist') -%}
{%- set _sort_type = config.get(
'sort_type',
validator=validation.any['compound', 'interleaved']) -%}
{%- set _sort = config.get(
'sort',
validator=validation.any[list, basestring]) -%}
{%- set sql_header = config.get('sql_header', none) -%}
{{ sql_header if sql_header is not none }}
create {% if temporary -%}temporary{%- endif %} table
{{ relation.include(database=(not temporary), schema=(not temporary)) }}
{{ dist(_dist) }}
{{ sort(_sort_type, _sort) }}
as (
{{ sql }}
);
{%- endmacro %}
{% macro redshift__create_view_as(relation, sql) -%}
{%- set binding = config.get('bind', default=True) -%}
{% set bind_qualifier = '' if binding else 'with no schema binding' %}
{%- set sql_header = config.get('sql_header', none) -%}
{{ sql_header if sql_header is not none }}
create view {{ relation }} as (
{{ sql }}
) {{ bind_qualifier }};
{% endmacro %}
{% macro redshift__create_schema(relation) -%}
{{ postgres__create_schema(relation) }}
{% endmacro %}
{% macro redshift__drop_schema(relation) -%}
{{ postgres__drop_schema(relation) }}
{% endmacro %}
{% macro redshift__get_columns_in_relation(relation) -%}
{% call statement('get_columns_in_relation', fetch_result=True) %}
with bound_views as (
select
ordinal_position,
table_schema,
column_name,
data_type,
character_maximum_length,
numeric_precision,
numeric_scale
from information_schema."columns"
where table_name = '{{ relation.identifier }}'
),
unbound_views as (
select
ordinal_position,
view_schema,
col_name,
case
when col_type ilike 'character varying%' then
'character varying'
when col_type ilike 'numeric%' then 'numeric'
else col_type
end as col_type,
case
when col_type like 'character%'
then nullif(REGEXP_SUBSTR(col_type, '[0-9]+'), '')::int
else null
end as character_maximum_length,
case
when col_type like 'numeric%'
then nullif(
SPLIT_PART(REGEXP_SUBSTR(col_type, '[0-9,]+'), ',', 1),
'')::int
else null
end as numeric_precision,
case
when col_type like 'numeric%'
then nullif(
SPLIT_PART(REGEXP_SUBSTR(col_type, '[0-9,]+'), ',', 2),
'')::int
else null
end as numeric_scale
from pg_get_late_binding_view_cols()
cols(view_schema name, view_name name, col_name name,
col_type varchar, ordinal_position int)
where view_name = '{{ relation.identifier }}'
),
external_views as (
select
columnnum,
schemaname,
columnname,
case
when external_type ilike 'character varying%' or external_type ilike 'varchar%'
then 'character varying'
when external_type ilike 'numeric%' then 'numeric'
else external_type
end as external_type,
case
when external_type like 'character%' or external_type like 'varchar%'
then nullif(
REGEXP_SUBSTR(external_type, '[0-9]+'),
'')::int
else null
end as character_maximum_length,
case
when external_type like 'numeric%'
then nullif(
SPLIT_PART(REGEXP_SUBSTR(external_type, '[0-9,]+'), ',', 1),
'')::int
else null
end as numeric_precision,
case
when external_type like 'numeric%'
then nullif(
SPLIT_PART(REGEXP_SUBSTR(external_type, '[0-9,]+'), ',', 2),
'')::int
else null
end as numeric_scale
from
pg_catalog.svv_external_columns
where
schemaname = '{{ relation.schema }}'
and tablename = '{{ relation.identifier }}'
),
unioned as (
select * from bound_views
union all
select * from unbound_views
union all
select * from external_views
)
select
column_name,
data_type,
character_maximum_length,
numeric_precision,
numeric_scale
from unioned
{% if relation.schema %}
where table_schema = '{{ relation.schema }}'
{% endif %}
order by ordinal_position
{% endcall %}
{% set table = load_result('get_columns_in_relation').table %}
{{ return(sql_convert_columns_in_relation(table)) }}
{% endmacro %}
{% macro redshift__list_relations_without_caching(schema_relation) %}
{{ return(postgres__list_relations_without_caching(schema_relation)) }}
{% endmacro %}
{% macro redshift__information_schema_name(database) -%}
{{ return(postgres__information_schema_name(database)) }}
{%- endmacro %}
{% macro redshift__list_schemas(database) -%}
{{ return(postgres__list_schemas(database)) }}
{%- endmacro %}
{% macro redshift__check_schema_exists(information_schema, schema) -%}
{{ return(postgres__check_schema_exists(information_schema, schema)) }}
{%- endmacro %}
{% macro redshift__current_timestamp() -%}
getdate()
{%- endmacro %}
{% macro redshift__snapshot_get_time() -%}
{{ current_timestamp() }}::timestamp
{%- endmacro %}
{% macro redshift__snapshot_string_as_time(timestamp) -%}
{%- set result = "'" ~ timestamp ~ "'::timestamp" -%}
{{ return(result) }}
{%- endmacro %}
{% macro redshift__make_temp_relation(base_relation, suffix) %}
{% do return(postgres__make_temp_relation(base_relation, suffix)) %}
{% endmacro %}
{% macro redshift__persist_docs(relation, model, for_relation, for_columns) -%}
{% if for_relation and config.persist_relation_docs() and model.description %}
{% do run_query(alter_relation_comment(relation, model.description)) %}
{% endif %}
{# Override: do not set column comments for LBVs #}
{% set is_lbv = config.get('materialized') == 'view' and config.get('bind') == false %}
{% if for_columns and config.persist_column_docs() and model.columns and not is_lbv %}
{% do run_query(alter_column_comment(relation, model.columns)) %}
{% endif %}
{% endmacro %}
{% macro redshift__alter_relation_comment(relation, comment) %}
{% do return(postgres__alter_relation_comment(relation, comment)) %}
{% endmacro %}
{% macro redshift__alter_column_comment(relation, column_dict) %}
{% do return(postgres__alter_column_comment(relation, column_dict)) %}
{% endmacro %}
{% macro redshift__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}
{% if add_columns %}
{% for column in add_columns %}
{% set sql -%}
alter {{ relation.type }} {{ relation }} add column {{ column.name }} {{ column.data_type }}
{% endset %}
{% do run_query(sql) %}
{% endfor %}
{% endif %}
{% if remove_columns %}
{% for column in remove_columns %}
{% set sql -%}
alter {{ relation.type }} {{ relation }} drop column {{ column.name }}
{% endset %}
{% do run_query(sql) %}
{% endfor %}
{% endif %}
{% endmacro %}

View File

@@ -1,242 +0,0 @@
{% macro redshift__get_base_catalog(information_schema, schemas) -%}
{%- call statement('base_catalog', fetch_result=True) -%}
{% set database = information_schema.database %}
{{ adapter.verify_database(database) }}
with late_binding as (
select
'{{ database }}'::varchar as table_database,
table_schema,
table_name,
'LATE BINDING VIEW'::varchar as table_type,
null::text as table_comment,
column_name,
column_index,
column_type,
null::text as column_comment
from pg_get_late_binding_view_cols()
cols(table_schema name, table_name name, column_name name,
column_type varchar,
column_index int)
order by "column_index"
),
early_binding as (
select
'{{ database }}'::varchar as table_database,
sch.nspname as table_schema,
tbl.relname as table_name,
case tbl.relkind
when 'v' then 'VIEW'
else 'BASE TABLE'
end as table_type,
tbl_desc.description as table_comment,
col.attname as column_name,
col.attnum as column_index,
pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,
col_desc.description as column_comment
from pg_catalog.pg_namespace sch
join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid
join pg_catalog.pg_attribute col on col.attrelid = tbl.oid
left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)
left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)
where (
{%- for schema in schemas -%}
upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
{%- endfor -%}
)
and tbl.relkind in ('r', 'v', 'f', 'p')
and col.attnum > 0
and not col.attisdropped
),
table_owners as (
select
'{{ database }}'::varchar as table_database,
schemaname as table_schema,
tablename as table_name,
tableowner as table_owner
from pg_tables
union all
select
'{{ database }}'::varchar as table_database,
schemaname as table_schema,
viewname as table_name,
viewowner as table_owner
from pg_views
),
unioned as (
select *
from early_binding
union all
select *
from late_binding
)
select *,
table_database || '.' || table_schema || '.' || table_name as table_id
from unioned
join table_owners using (table_database, table_schema, table_name)
where (
{%- for schema in schemas -%}
upper(table_schema) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
{%- endfor -%}
)
order by "column_index"
{%- endcall -%}
{{ return(load_result('base_catalog').table) }}
{%- endmacro %}
{% macro redshift__get_extended_catalog(schemas) %}
{%- call statement('extended_catalog', fetch_result=True) -%}
select
"database" || '.' || "schema" || '.' || "table" as table_id,
'Encoded'::text as "stats:encoded:label",
encoded as "stats:encoded:value",
'Indicates whether any column in the table has compression encoding defined.'::text as "stats:encoded:description",
true as "stats:encoded:include",
'Dist Style' as "stats:diststyle:label",
diststyle as "stats:diststyle:value",
'Distribution style or distribution key column, if key distribution is defined.'::text as "stats:diststyle:description",
true as "stats:diststyle:include",
'Sort Key 1' as "stats:sortkey1:label",
-- handle 0xFF byte in response for interleaved sort styles
case
when sortkey1 like 'INTERLEAVED%' then 'INTERLEAVED'::text
else sortkey1
end as "stats:sortkey1:value",
'First column in the sort key.'::text as "stats:sortkey1:description",
(sortkey1 is not null) as "stats:sortkey1:include",
'Max Varchar' as "stats:max_varchar:label",
max_varchar as "stats:max_varchar:value",
'Size of the largest column that uses a VARCHAR data type.'::text as "stats:max_varchar:description",
true as "stats:max_varchar:include",
-- exclude this, as the data is strangely returned with null-byte characters
'Sort Key 1 Encoding' as "stats:sortkey1_enc:label",
sortkey1_enc as "stats:sortkey1_enc:value",
'Compression encoding of the first column in the sort key.' as "stats:sortkey1_enc:description",
false as "stats:sortkey1_enc:include",
'# Sort Keys' as "stats:sortkey_num:label",
sortkey_num as "stats:sortkey_num:value",
'Number of columns defined as sort keys.' as "stats:sortkey_num:description",
(sortkey_num > 0) as "stats:sortkey_num:include",
'Approximate Size' as "stats:size:label",
size * 1000000 as "stats:size:value",
'Approximate size of the table, calculated from a count of 1MB blocks'::text as "stats:size:description",
true as "stats:size:include",
'Disk Utilization' as "stats:pct_used:label",
pct_used / 100.0 as "stats:pct_used:value",
'Percent of available space that is used by the table.'::text as "stats:pct_used:description",
true as "stats:pct_used:include",
'Unsorted %' as "stats:unsorted:label",
unsorted / 100.0 as "stats:unsorted:value",
'Percent of unsorted rows in the table.'::text as "stats:unsorted:description",
(unsorted is not null) as "stats:unsorted:include",
'Stats Off' as "stats:stats_off:label",
stats_off as "stats:stats_off:value",
'Number that indicates how stale the table statistics are; 0 is current, 100 is out of date.'::text as "stats:stats_off:description",
true as "stats:stats_off:include",
'Approximate Row Count' as "stats:rows:label",
tbl_rows as "stats:rows:value",
'Approximate number of rows in the table. This value includes rows marked for deletion, but not yet vacuumed.'::text as "stats:rows:description",
true as "stats:rows:include",
'Sort Key Skew' as "stats:skew_sortkey1:label",
skew_sortkey1 as "stats:skew_sortkey1:value",
'Ratio of the size of the largest non-sort key column to the size of the first column of the sort key.'::text as "stats:skew_sortkey1:description",
(skew_sortkey1 is not null) as "stats:skew_sortkey1:include",
'Skew Rows' as "stats:skew_rows:label",
skew_rows as "stats:skew_rows:value",
'Ratio of the number of rows in the slice with the most rows to the number of rows in the slice with the fewest rows.'::text as "stats:skew_rows:description",
(skew_rows is not null) as "stats:skew_rows:include"
from svv_table_info
where (
{%- for schema in schemas -%}
upper(schema) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
{%- endfor -%}
)
{%- endcall -%}
{{ return(load_result('extended_catalog').table) }}
{% endmacro %}
{% macro redshift__can_select_from(table_name) %}
{%- call statement('has_table_privilege', fetch_result=True) -%}
select has_table_privilege(current_user, '{{ table_name }}', 'SELECT') as can_select
{%- endcall -%}
{% set can_select = load_result('has_table_privilege').table[0]['can_select'] %}
{{ return(can_select) }}
{% endmacro %}
{% macro redshift__no_svv_table_info_warning() %}
{% set msg %}
Warning: The database user "{{ target.user }}" has insufficient permissions to
query the "svv_table_info" table. Please grant SELECT permissions on this table
to the "{{ target.user }}" user to fetch extended table details from Redshift.
{% endset %}
{{ log(msg, info=True) }}
{% endmacro %}
{% macro redshift__get_catalog(information_schema, schemas) %}
{#-- Compute a left-outer join in memory. Some Redshift queries are
-- leader-only, and cannot be joined to other compute-based queries #}
{% set catalog = redshift__get_base_catalog(information_schema, schemas) %}
{% set select_extended = redshift__can_select_from('svv_table_info') %}
{% if select_extended %}
{% set extended_catalog = redshift__get_extended_catalog(schemas) %}
{% set catalog = catalog.join(extended_catalog, 'table_id') %}
{% else %}
{{ redshift__no_svv_table_info_warning() }}
{% endif %}
{{ return(catalog.exclude(['table_id'])) }}
{% endmacro %}

View File

@@ -1,4 +0,0 @@
{% macro redshift__snapshot_merge_sql(target, source, insert_cols) -%}
{{ postgres__snapshot_merge_sql(target, source, insert_cols) }}
{% endmacro %}

View File

@@ -1,3 +0,0 @@
{% macro redshift__get_relations () -%}
{{ return(dbt.postgres__get_relations()) }}
{% endmacro %}

View File

@@ -1,25 +0,0 @@
default:
outputs:
dev:
type: redshift
threads: [1 or more]
host: [host]
port: [port]
user: [dev_username]
pass: [dev_password]
dbname: [dbname]
schema: [dev_schema]
prod:
type: redshift
method: iam
cluster_id: [cluster_id]
threads: [1 or more]
host: [host]
port: [port]
user: [prod_user]
dbname: [dbname]
schema: [prod_schema]
target: dev

View File

@@ -1,70 +0,0 @@
#!/usr/bin/env python
import os
import sys
if sys.version_info < (3, 6):
print('Error: dbt does not support this version of Python.')
print('Please upgrade to Python 3.6 or higher.')
sys.exit(1)
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
package_name = "dbt-redshift"
package_version = "0.21.0b2"
description = """The redshift adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name=package_name,
version=package_version,
description=description,
long_description=description,
long_description_content_type='text/markdown',
author="dbt Labs",
author_email="info@dbtlabs.com",
url="https://github.com/dbt-labs/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/redshift/dbt_project.yml',
'include/redshift/sample_profiles.yml',
'include/redshift/macros/*.sql',
'include/redshift/macros/**/*.sql',
]
},
install_requires=[
'dbt-core=={}'.format(package_version),
'dbt-postgres=={}'.format(package_version),
# the following are all to match snowflake-connector-python
'boto3>=1.4.4,<2.0.0',
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires=">=3.6.2",
)

View File

@@ -1,32 +0,0 @@
<p align="center">
<img src="https://raw.githubusercontent.com/dbt-labs/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
</p>
**[dbt](https://www.getdbt.com/)** (data build tool) enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
## dbt-snowflake
The `dbt-snowflake` package contains all of the code required to make dbt operate on a Snowflake database. For
more information on using dbt with Snowflake, consult [the docs](https://docs.getdbt.com/docs/profile-snowflake).
## Find out more
- Check out the [Introduction to dbt](https://docs.getdbt.com/docs/introduction/).
- Read the [dbt Viewpoint](https://docs.getdbt.com/docs/about/viewpoint/).
## Join thousands of analysts in the dbt community
- Join the [chat](http://community.getdbt.com/) on Slack.
- Find community posts on [dbt Discourse](https://discourse.getdbt.com).
## Reporting bugs and contributing code
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt/issues/new).
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](https://github.com/dbt-labs/dbt/blob/HEAD/CONTRIBUTING.md)
## Code of Conduct
Everyone interacting in the dbt project's codebases, issue trackers, chat rooms, and mailing lists is expected to follow the [dbt Code of Conduct](https://community.getdbt.com/code-of-conduct).

View File

@@ -1,13 +0,0 @@
from dbt.adapters.snowflake.column import SnowflakeColumn # noqa
from dbt.adapters.snowflake.connections import SnowflakeConnectionManager # noqa
from dbt.adapters.snowflake.connections import SnowflakeCredentials
from dbt.adapters.snowflake.relation import SnowflakeRelation # noqa
from dbt.adapters.snowflake.impl import SnowflakeAdapter
from dbt.adapters.base import AdapterPlugin
from dbt.include import snowflake
Plugin = AdapterPlugin(
adapter=SnowflakeAdapter,
credentials=SnowflakeCredentials,
include_path=snowflake.PACKAGE_PATH)

View File

@@ -1 +0,0 @@
version = '0.21.0b2'

View File

@@ -1,31 +0,0 @@
from dataclasses import dataclass
from dbt.adapters.base.column import Column
from dbt.exceptions import RuntimeException
@dataclass
class SnowflakeColumn(Column):
def is_integer(self) -> bool:
# everything that smells like an int is actually a NUMBER(38, 0)
return False
def is_numeric(self) -> bool:
return self.dtype.lower() in [
'int', 'integer', 'bigint', 'smallint', 'tinyint', 'byteint',
'numeric', 'decimal', 'number'
]
def is_float(self):
return self.dtype.lower() in [
'float', 'float4', 'float8', 'double', 'double precision', 'real',
]
def string_size(self) -> int:
if not self.is_string():
raise RuntimeException("Called string_size() on non-string field!")
if self.dtype == 'text' or self.char_size is None:
return 16777216
else:
return int(self.char_size)

View File

@@ -1,375 +0,0 @@
import base64
import datetime
import pytz
import re
from contextlib import contextmanager
from dataclasses import dataclass
from io import StringIO
from time import sleep
from typing import Optional
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import requests
import snowflake.connector
import snowflake.connector.errors
from dbt.exceptions import (
InternalException, RuntimeException, FailedToConnectException,
DatabaseException, warn_or_error
)
from dbt.adapters.base import Credentials
from dbt.contracts.connection import AdapterResponse
from dbt.adapters.sql import SQLConnectionManager
from dbt.logger import GLOBAL_LOGGER as logger
_TOKEN_REQUEST_URL = 'https://{}.snowflakecomputing.com/oauth/token-request'
@dataclass
class SnowflakeCredentials(Credentials):
account: str
user: str
warehouse: Optional[str] = None
role: Optional[str] = None
password: Optional[str] = None
authenticator: Optional[str] = None
private_key_path: Optional[str] = None
private_key_passphrase: Optional[str] = None
token: Optional[str] = None
oauth_client_id: Optional[str] = None
oauth_client_secret: Optional[str] = None
query_tag: Optional[str] = None
client_session_keep_alive: bool = False
def __post_init__(self):
if (
self.authenticator != 'oauth' and
(self.oauth_client_secret or self.oauth_client_id or self.token)
):
# the user probably forgot to set 'authenticator' like I keep doing
warn_or_error(
'Authenticator is not set to oauth, but an oauth-only '
'parameter is set! Did you mean to set authenticator: oauth?'
)
@property
def type(self):
return 'snowflake'
@property
def unique_field(self):
return self.account
def _connection_keys(self):
return (
'account', 'user', 'database', 'schema', 'warehouse', 'role',
'client_session_keep_alive'
)
def auth_args(self):
# Pull all of the optional authentication args for the connector,
# let connector handle the actual arg validation
result = {}
if self.password:
result['password'] = self.password
if self.authenticator:
result['authenticator'] = self.authenticator
if self.authenticator == 'oauth':
token = self.token
# if we have a client ID/client secret, the token is a refresh
# token, not an access token
if self.oauth_client_id and self.oauth_client_secret:
token = self._get_access_token()
elif self.oauth_client_id:
warn_or_error(
'Invalid profile: got an oauth_client_id, but not an '
'oauth_client_secret!'
)
elif self.oauth_client_secret:
warn_or_error(
'Invalid profile: got an oauth_client_secret, but not '
'an oauth_client_id!'
)
result['token'] = token
# enable the token cache
result['client_store_temporary_credential'] = True
result['private_key'] = self._get_private_key()
return result
def _get_access_token(self) -> str:
if self.authenticator != 'oauth':
raise InternalException('Can only get access tokens for oauth')
missing = any(
x is None for x in
(self.oauth_client_id, self.oauth_client_secret, self.token)
)
if missing:
raise InternalException(
'need a client ID a client secret, and a refresh token to get '
'an access token'
)
# should the full url be a config item?
token_url = _TOKEN_REQUEST_URL.format(self.account)
# I think this is only used to redirect on success, which we ignore
# (it does not have to match the integration's settings in snowflake)
redirect_uri = 'http://localhost:9999'
data = {
'grant_type': 'refresh_token',
'refresh_token': self.token,
'redirect_uri': redirect_uri
}
auth = base64.b64encode(
f'{self.oauth_client_id}:{self.oauth_client_secret}'
.encode('ascii')
).decode('ascii')
headers = {
'Authorization': f'Basic {auth}',
'Content-type': 'application/x-www-form-urlencoded;charset=utf-8'
}
result_json = None
max_iter = 20
# Attempt to obtain JSON for 1 second before throwing an error
for i in range(max_iter):
result = requests.post(token_url, headers=headers, data=data)
try:
result_json = result.json()
break
except ValueError as e:
message = result.text
logger.debug(f"Got a non-json response ({result.status_code}): \
{e}, message: {message}")
sleep(0.05)
if result_json is None:
raise DatabaseException(f"""Did not receive valid json with access_token.
Showing json response: {result_json}""")
return result_json['access_token']
def _get_private_key(self):
"""Get Snowflake private key by path or None."""
if not self.private_key_path:
return None
if self.private_key_passphrase:
encoded_passphrase = self.private_key_passphrase.encode()
else:
encoded_passphrase = None
with open(self.private_key_path, 'rb') as key:
p_key = serialization.load_pem_private_key(
key.read(),
password=encoded_passphrase,
backend=default_backend())
return p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
class SnowflakeConnectionManager(SQLConnectionManager):
TYPE = 'snowflake'
@contextmanager
def exception_handler(self, sql):
try:
yield
except snowflake.connector.errors.ProgrammingError as e:
msg = str(e)
logger.debug('Snowflake query id: {}'.format(e.sfqid))
logger.debug('Snowflake error: {}'.format(msg))
if 'Empty SQL statement' in msg:
logger.debug("got empty sql statement, moving on")
elif 'This session does not have a current database' in msg:
raise FailedToConnectException(
('{}\n\nThis error sometimes occurs when invalid '
'credentials are provided, or when your default role '
'does not have access to use the specified database. '
'Please double check your profile and try again.')
.format(msg))
else:
raise DatabaseException(msg)
except Exception as e:
if isinstance(e, snowflake.connector.errors.Error):
logger.debug('Snowflake query id: {}'.format(e.sfqid))
logger.debug("Error running SQL: {}", sql)
logger.debug("Rolling back transaction.")
self.rollback_if_open()
if isinstance(e, RuntimeException):
# during a sql query, an internal to dbt exception was raised.
# this sounds a lot like a signal handler and probably has
# useful information, so raise it without modification.
raise
raise RuntimeException(str(e)) from e
@classmethod
def open(cls, connection):
if connection.state == 'open':
logger.debug('Connection is already open, skipping open.')
return connection
try:
creds = connection.credentials
handle = snowflake.connector.connect(
account=creds.account,
user=creds.user,
database=creds.database,
schema=creds.schema,
warehouse=creds.warehouse,
role=creds.role,
autocommit=True,
client_session_keep_alive=creds.client_session_keep_alive,
application='dbt',
**creds.auth_args()
)
if creds.query_tag:
handle.cursor().execute(
("alter session set query_tag = '{}'")
.format(creds.query_tag))
connection.handle = handle
connection.state = 'open'
except snowflake.connector.errors.Error as e:
logger.debug("Got an error when attempting to open a snowflake "
"connection: '{}'"
.format(e))
connection.handle = None
connection.state = 'fail'
raise FailedToConnectException(str(e))
def cancel(self, connection):
handle = connection.handle
sid = handle.session_id
connection_name = connection.name
sql = 'select system$abort_session({})'.format(sid)
logger.debug("Cancelling query '{}' ({})".format(connection_name, sid))
_, cursor = self.add_query(sql)
res = cursor.fetchone()
logger.debug("Cancel query '{}': {}".format(connection_name, res))
@classmethod
def get_response(cls, cursor) -> AdapterResponse:
code = cursor.sqlstate
if code is None:
code = 'SUCCESS'
return AdapterResponse(
_message="{} {}".format(code, cursor.rowcount),
rows_affected=cursor.rowcount,
code=code
)
# disable transactional logic by default on Snowflake
# except for DML statements where explicitly defined
def add_begin_query(self, *args, **kwargs):
pass
def add_commit_query(self, *args, **kwargs):
pass
def begin(self):
pass
def commit(self):
pass
def clear_transaction(self):
pass
@classmethod
def _split_queries(cls, sql):
"Splits sql statements at semicolons into discrete queries"
sql_s = str(sql)
sql_buf = StringIO(sql_s)
split_query = snowflake.connector.util_text.split_statements(sql_buf)
return [part[0] for part in split_query]
@classmethod
def process_results(cls, column_names, rows):
# Override for Snowflake. The datetime objects returned by
# snowflake-connector-python are not pickleable, so we need
# to replace them with sane timezones
fixed = []
for row in rows:
fixed_row = []
for col in row:
if isinstance(col, datetime.datetime) and col.tzinfo:
offset = col.utcoffset()
offset_seconds = offset.total_seconds()
new_timezone = pytz.FixedOffset(offset_seconds // 60)
col = col.astimezone(tz=new_timezone)
fixed_row.append(col)
fixed.append(fixed_row)
return super().process_results(column_names, fixed)
def add_query(self, sql, auto_begin=True,
bindings=None, abridge_sql_log=False):
connection = None
cursor = None
if bindings:
# The snowflake connector is more strict than, eg., psycopg2 -
# which allows any iterable thing to be passed as a binding.
bindings = tuple(bindings)
queries = self._split_queries(sql)
for individual_query in queries:
# hack -- after the last ';', remove comments and don't run
# empty queries. this avoids using exceptions as flow control,
# and also allows us to return the status of the last cursor
without_comments = re.sub(
re.compile(
r'(\".*?\"|\'.*?\')|(/\*.*?\*/|--[^\r\n]*$)', re.MULTILINE
),
'', individual_query).strip()
if without_comments == "":
continue
connection, cursor = super().add_query(
individual_query, auto_begin,
bindings=bindings,
abridge_sql_log=abridge_sql_log
)
if cursor is None:
conn = self.get_thread_connection()
if conn is None or conn.name is None:
conn_name = '<None>'
else:
conn_name = conn.name
raise RuntimeException(
"Tried to run an empty query on model '{}'. If you are "
"conditionally running\nsql, eg. in a model hook, make "
"sure your `else` clause contains valid sql!\n\n"
"Provided SQL:\n{}"
.format(conn_name, sql)
)
return connection, cursor

View File

@@ -1,169 +0,0 @@
from dataclasses import dataclass
from typing import Mapping, Any, Optional, List, Union
import agate
from dbt.adapters.base.impl import AdapterConfig
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.sql.impl import (
LIST_SCHEMAS_MACRO_NAME,
LIST_RELATIONS_MACRO_NAME,
)
from dbt.adapters.snowflake import SnowflakeConnectionManager
from dbt.adapters.snowflake import SnowflakeRelation
from dbt.adapters.snowflake import SnowflakeColumn
from dbt.contracts.graph.manifest import Manifest
from dbt.exceptions import RuntimeException, DatabaseException
from dbt.utils import filter_null_values
@dataclass
class SnowflakeConfig(AdapterConfig):
transient: Optional[bool] = None
cluster_by: Optional[Union[str, List[str]]] = None
automatic_clustering: Optional[bool] = None
secure: Optional[bool] = None
copy_grants: Optional[bool] = None
snowflake_warehouse: Optional[str] = None
query_tag: Optional[str] = None
merge_update_columns: Optional[str] = None
class SnowflakeAdapter(SQLAdapter):
Relation = SnowflakeRelation
Column = SnowflakeColumn
ConnectionManager = SnowflakeConnectionManager
AdapterSpecificConfigs = SnowflakeConfig
@classmethod
def date_function(cls):
return "CURRENT_TIMESTAMP()"
@classmethod
def _catalog_filter_table(
cls, table: agate.Table, manifest: Manifest
) -> agate.Table:
# On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
# the column names to their lowercased forms.
lowered = table.rename(
column_names=[c.lower() for c in table.column_names]
)
return super()._catalog_filter_table(lowered, manifest)
def _make_match_kwargs(self, database, schema, identifier):
quoting = self.config.quoting
if identifier is not None and quoting["identifier"] is False:
identifier = identifier.upper()
if schema is not None and quoting["schema"] is False:
schema = schema.upper()
if database is not None and quoting["database"] is False:
database = database.upper()
return filter_null_values(
{"identifier": identifier, "schema": schema, "database": database}
)
def _get_warehouse(self) -> str:
_, table = self.execute(
'select current_warehouse() as warehouse',
fetch=True
)
if len(table) == 0 or len(table[0]) == 0:
# can this happen?
raise RuntimeException(
'Could not get current warehouse: no results'
)
return str(table[0][0])
def _use_warehouse(self, warehouse: str):
"""Use the given warehouse. Quotes are never applied."""
self.execute('use warehouse {}'.format(warehouse))
def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:
default_warehouse = self.config.credentials.warehouse
warehouse = config.get('snowflake_warehouse', default_warehouse)
if warehouse == default_warehouse or warehouse is None:
return None
previous = self._get_warehouse()
self._use_warehouse(warehouse)
return previous
def post_model_hook(
self, config: Mapping[str, Any], context: Optional[str]
) -> None:
if context is not None:
self._use_warehouse(context)
def list_schemas(self, database: str) -> List[str]:
try:
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
except DatabaseException as exc:
msg = (
f'Database error while listing schemas in database '
f'"{database}"\n{exc}'
)
raise RuntimeException(msg)
# this uses 'show terse schemas in database', and the column name we
# want is 'name'
return [row['name'] for row in results]
def get_columns_in_relation(self, relation):
try:
return super().get_columns_in_relation(relation)
except DatabaseException as exc:
if 'does not exist or not authorized' in str(exc):
return []
else:
raise
def list_relations_without_caching(
self, schema_relation: SnowflakeRelation
) -> List[SnowflakeRelation]:
kwargs = {'schema_relation': schema_relation}
try:
results = self.execute_macro(
LIST_RELATIONS_MACRO_NAME,
kwargs=kwargs
)
except DatabaseException as exc:
# if the schema doesn't exist, we just want to return.
# Alternatively, we could query the list of schemas before we start
# and skip listing the missing ones, which sounds expensive.
if 'Object does not exist' in str(exc):
return []
raise
relations = []
quote_policy = {
'database': True,
'schema': True,
'identifier': True
}
columns = ['database_name', 'schema_name', 'name', 'kind']
for _database, _schema, _identifier, _type in results.select(columns):
try:
_type = self.Relation.get_relation_type(_type.lower())
except ValueError:
_type = self.Relation.External
relations.append(self.Relation.create(
database=_database,
schema=_schema,
identifier=_identifier,
quote_policy=quote_policy,
type=_type
))
return relations
def timestamp_add_sql(
self, add_to: str, number: int = 1, interval: str = 'hour'
) -> str:
return f'DATEADD({interval}, {number}, {add_to})'

View File

@@ -1,14 +0,0 @@
from dataclasses import dataclass
from dbt.adapters.base.relation import BaseRelation, Policy
@dataclass
class SnowflakeQuotePolicy(Policy):
database: bool = False
schema: bool = False
identifier: bool = False
@dataclass(frozen=True, eq=False, repr=False)
class SnowflakeRelation(BaseRelation):
quote_policy: SnowflakeQuotePolicy = SnowflakeQuotePolicy()

View File

@@ -1,2 +0,0 @@
import os
PACKAGE_PATH = os.path.dirname(__file__)

View File

@@ -1,5 +0,0 @@
config-version: 2
name: dbt_snowflake
version: 1.0
macro-paths: ["macros"]

View File

@@ -1,251 +0,0 @@
{% macro snowflake__create_table_as(temporary, relation, sql) -%}
{%- set transient = config.get('transient', default=true) -%}
{%- set cluster_by_keys = config.get('cluster_by', default=none) -%}
{%- set enable_automatic_clustering = config.get('automatic_clustering', default=false) -%}
{%- set copy_grants = config.get('copy_grants', default=false) -%}
{%- if cluster_by_keys is not none and cluster_by_keys is string -%}
{%- set cluster_by_keys = [cluster_by_keys] -%}
{%- endif -%}
{%- if cluster_by_keys is not none -%}
{%- set cluster_by_string = cluster_by_keys|join(", ")-%}
{% else %}
{%- set cluster_by_string = none -%}
{%- endif -%}
{%- set sql_header = config.get('sql_header', none) -%}
{{ sql_header if sql_header is not none }}
create or replace {% if temporary -%}
temporary
{%- elif transient -%}
transient
{%- endif %} table {{ relation }} {% if copy_grants and not temporary -%} copy grants {%- endif %} as
(
{%- if cluster_by_string is not none -%}
select * from(
{{ sql }}
) order by ({{ cluster_by_string }})
{%- else -%}
{{ sql }}
{%- endif %}
);
{% if cluster_by_string is not none and not temporary -%}
alter table {{relation}} cluster by ({{cluster_by_string}});
{%- endif -%}
{% if enable_automatic_clustering and cluster_by_string is not none and not temporary -%}
alter table {{relation}} resume recluster;
{%- endif -%}
{% endmacro %}
{% macro snowflake__create_view_as(relation, sql) -%}
{%- set secure = config.get('secure', default=false) -%}
{%- set copy_grants = config.get('copy_grants', default=false) -%}
{%- set sql_header = config.get('sql_header', none) -%}
{{ sql_header if sql_header is not none }}
create or replace {% if secure -%}
secure
{%- endif %} view {{ relation }} {% if copy_grants -%} copy grants {%- endif %} as (
{{ sql }}
);
{% endmacro %}
{% macro snowflake__get_columns_in_relation(relation) -%}
{%- set sql -%}
describe table {{ relation }}
{%- endset -%}
{%- set result = run_query(sql) -%}
{% set maximum = 10000 %}
{% if (result | length) >= maximum %}
{% set msg %}
Too many columns in relation {{ relation }}! dbt can only get
information about relations with fewer than {{ maximum }} columns.
{% endset %}
{% do exceptions.raise_compiler_error(msg) %}
{% endif %}
{% set columns = [] %}
{% for row in result %}
{% do columns.append(api.Column.from_description(row['name'], row['type'])) %}
{% endfor %}
{% do return(columns) %}
{% endmacro %}
{% macro snowflake__list_schemas(database) -%}
{# 10k limit from here: https://docs.snowflake.net/manuals/sql-reference/sql/show-schemas.html#usage-notes #}
{% set maximum = 10000 %}
{% set sql -%}
show terse schemas in database {{ database }}
limit {{ maximum }}
{%- endset %}
{% set result = run_query(sql) %}
{% if (result | length) >= maximum %}
{% set msg %}
Too many schemas in database {{ database }}! dbt can only get
information about databases with fewer than {{ maximum }} schemas.
{% endset %}
{% do exceptions.raise_compiler_error(msg) %}
{% endif %}
{{ return(result) }}
{% endmacro %}
{% macro snowflake__list_relations_without_caching(schema_relation) %}
{%- set sql -%}
show terse objects in {{ schema_relation }}
{%- endset -%}
{%- set result = run_query(sql) -%}
{% set maximum = 10000 %}
{% if (result | length) >= maximum %}
{% set msg %}
Too many schemas in schema {{ schema_relation }}! dbt can only get
information about schemas with fewer than {{ maximum }} objects.
{% endset %}
{% do exceptions.raise_compiler_error(msg) %}
{% endif %}
{%- do return(result) -%}
{% endmacro %}
{% macro snowflake__check_schema_exists(information_schema, schema) -%}
{% call statement('check_schema_exists', fetch_result=True) -%}
select count(*)
from {{ information_schema }}.schemata
where upper(schema_name) = upper('{{ schema }}')
and upper(catalog_name) = upper('{{ information_schema.database }}')
{%- endcall %}
{{ return(load_result('check_schema_exists').table) }}
{%- endmacro %}
{% macro snowflake__current_timestamp() -%}
convert_timezone('UTC', current_timestamp())
{%- endmacro %}
{% macro snowflake__snapshot_string_as_time(timestamp) -%}
{%- set result = "to_timestamp_ntz('" ~ timestamp ~ "')" -%}
{{ return(result) }}
{%- endmacro %}
{% macro snowflake__snapshot_get_time() -%}
to_timestamp_ntz({{ current_timestamp() }})
{%- endmacro %}
{% macro snowflake__rename_relation(from_relation, to_relation) -%}
{% call statement('rename_relation') -%}
alter table {{ from_relation }} rename to {{ to_relation }}
{%- endcall %}
{% endmacro %}
{% macro snowflake__alter_column_type(relation, column_name, new_column_type) -%}
{% call statement('alter_column_type') %}
alter table {{ relation }} alter {{ adapter.quote(column_name) }} set data type {{ new_column_type }};
{% endcall %}
{% endmacro %}
{% macro snowflake__alter_relation_comment(relation, relation_comment) -%}
comment on {{ relation.type }} {{ relation }} IS $${{ relation_comment | replace('$', '[$]') }}$$;
{% endmacro %}
{% macro snowflake__alter_column_comment(relation, column_dict) -%}
{% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute="name") | list %}
alter {{ relation.type }} {{ relation }} alter
{% for column_name in column_dict if (column_name in existing_columns) or (column_name|upper in existing_columns) %}
{{ adapter.quote(column_name) if column_dict[column_name]['quote'] else column_name }} COMMENT $${{ column_dict[column_name]['description'] | replace('$', '[$]') }}$$ {{ ',' if not loop.last else ';' }}
{% endfor %}
{% endmacro %}
{% macro get_current_query_tag() -%}
{{ return(run_query("show parameters like 'query_tag' in session").rows[0]['value']) }}
{% endmacro %}
{% macro set_query_tag() -%}
{% set new_query_tag = config.get('query_tag') %}
{% if new_query_tag %}
{% set original_query_tag = get_current_query_tag() %}
{{ log("Setting query_tag to '" ~ new_query_tag ~ "'. Will reset to '" ~ original_query_tag ~ "' after materialization.") }}
{% do run_query("alter session set query_tag = '{}'".format(new_query_tag)) %}
{{ return(original_query_tag)}}
{% endif %}
{{ return(none)}}
{% endmacro %}
{% macro unset_query_tag(original_query_tag) -%}
{% set new_query_tag = config.get('query_tag') %}
{% if new_query_tag %}
{% if original_query_tag %}
{{ log("Resetting query_tag to '" ~ original_query_tag ~ "'.") }}
{% do run_query("alter session set query_tag = '{}'".format(original_query_tag)) %}
{% else %}
{{ log("No original query_tag, unsetting parameter.") }}
{% do run_query("alter session unset query_tag") %}
{% endif %}
{% endif %}
{% endmacro %}
{% macro snowflake__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}
{% if add_columns %}
{% set sql -%}
alter {{ relation.type }} {{ relation }} add column
{% for column in add_columns %}
{{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}
{% endfor %}
{%- endset -%}
{% do run_query(sql) %}
{% endif %}
{% if remove_columns %}
{% set sql -%}
alter {{ relation.type }} {{ relation }} drop column
{% for column in remove_columns %}
{{ column.name }}{{ ',' if not loop.last }}
{% endfor %}
{%- endset -%}
{% do run_query(sql) %}
{% endif %}
{% endmacro %}
{% macro snowflake_dml_explicit_transaction(dml) %}
{#
Use this macro to wrap all INSERT, MERGE, UPDATE, DELETE, and TRUNCATE
statements before passing them into run_query(), or calling in the 'main' statement
of a materialization
#}
{% set dml_transaction -%}
begin;
{{ dml }};
commit;
{%- endset %}
{% do return(dml_transaction) %}
{% endmacro %}
{% macro snowflake__truncate_relation(relation) -%}
{% set truncate_dml %}
truncate table {{ relation }}
{% endset %}
{% call statement('truncate_relation') -%}
{{ snowflake_dml_explicit_transaction(truncate_dml) }}
{%- endcall %}
{% endmacro %}

View File

@@ -1,67 +0,0 @@
{% macro snowflake__get_catalog(information_schema, schemas) -%}
{% set query %}
with tables as (
select
table_catalog as "table_database",
table_schema as "table_schema",
table_name as "table_name",
table_type as "table_type",
comment as "table_comment",
-- note: this is the _role_ that owns the table
table_owner as "table_owner",
'Clustering Key' as "stats:clustering_key:label",
clustering_key as "stats:clustering_key:value",
'The key used to cluster this table' as "stats:clustering_key:description",
(clustering_key is not null) as "stats:clustering_key:include",
'Row Count' as "stats:row_count:label",
row_count as "stats:row_count:value",
'An approximate count of rows in this table' as "stats:row_count:description",
(row_count is not null) as "stats:row_count:include",
'Approximate Size' as "stats:bytes:label",
bytes as "stats:bytes:value",
'Approximate size of the table as reported by Snowflake' as "stats:bytes:description",
(bytes is not null) as "stats:bytes:include",
'Last Modified' as "stats:last_modified:label",
to_varchar(convert_timezone('UTC', last_altered), 'yyyy-mm-dd HH24:MI'||'UTC') as "stats:last_modified:value",
'The timestamp for last update/change' as "stats:last_modified:description",
(last_altered is not null and table_type='BASE TABLE') as "stats:last_modified:include"
from {{ information_schema }}.tables
),
columns as (
select
table_catalog as "table_database",
table_schema as "table_schema",
table_name as "table_name",
column_name as "column_name",
ordinal_position as "column_index",
data_type as "column_type",
comment as "column_comment"
from {{ information_schema }}.columns
)
select *
from tables
join columns using ("table_database", "table_schema", "table_name")
where (
{%- for schema in schemas -%}
upper("table_schema") = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
{%- endfor -%}
)
order by "column_index"
{%- endset -%}
{{ return(run_query(query)) }}
{%- endmacro %}

View File

@@ -1,80 +0,0 @@
{% macro dbt_snowflake_validate_get_incremental_strategy(config) %}
{#-- Find and validate the incremental strategy #}
{%- set strategy = config.get("incremental_strategy", default="merge") -%}
{% set invalid_strategy_msg -%}
Invalid incremental strategy provided: {{ strategy }}
Expected one of: 'merge', 'delete+insert'
{%- endset %}
{% if strategy not in ['merge', 'delete+insert'] %}
{% do exceptions.raise_compiler_error(invalid_strategy_msg) %}
{% endif %}
{% do return(strategy) %}
{% endmacro %}
{% macro dbt_snowflake_get_incremental_sql(strategy, tmp_relation, target_relation, unique_key, dest_columns) %}
{% if strategy == 'merge' %}
{% do return(get_merge_sql(target_relation, tmp_relation, unique_key, dest_columns)) %}
{% elif strategy == 'delete+insert' %}
{% do return(get_delete_insert_merge_sql(target_relation, tmp_relation, unique_key, dest_columns)) %}
{% else %}
{% do exceptions.raise_compiler_error('invalid strategy: ' ~ strategy) %}
{% endif %}
{% endmacro %}
{% materialization incremental, adapter='snowflake' -%}
{% set original_query_tag = set_query_tag() %}
{%- set unique_key = config.get('unique_key') -%}
{%- set full_refresh_mode = (should_full_refresh()) -%}
{% set target_relation = this %}
{% set existing_relation = load_relation(this) %}
{% set tmp_relation = make_temp_relation(this) %}
{#-- Validate early so we don't run SQL if the strategy is invalid --#}
{% set strategy = dbt_snowflake_validate_get_incremental_strategy(config) -%}
{% set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') %}
{{ run_hooks(pre_hooks) }}
{% if existing_relation is none %}
{% set build_sql = create_table_as(False, target_relation, sql) %}
{% elif existing_relation.is_view %}
{#-- Can't overwrite a view with a table - we must drop --#}
{{ log("Dropping relation " ~ target_relation ~ " because it is a view and this model is a table.") }}
{% do adapter.drop_relation(existing_relation) %}
{% set build_sql = create_table_as(False, target_relation, sql) %}
{% elif full_refresh_mode %}
{% set build_sql = create_table_as(False, target_relation, sql) %}
{% else %}
{% do run_query(create_table_as(True, tmp_relation, sql)) %}
{% do adapter.expand_target_column_types(
from_relation=tmp_relation,
to_relation=target_relation) %}
{% do process_schema_changes(on_schema_change, tmp_relation, existing_relation) %}
{% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}
{% set build_sql = dbt_snowflake_get_incremental_sql(strategy, tmp_relation, target_relation, unique_key, dest_columns) %}
{% endif %}
{%- call statement('main') -%}
{{ build_sql }}
{%- endcall -%}
{{ run_hooks(post_hooks) }}
{% set target_relation = target_relation.incorporate(type='table') %}
{% do persist_docs(target_relation, model) %}
{% do unset_query_tag(original_query_tag) %}
{{ return({'relations': [target_relation]}) }}
{%- endmaterialization %}

View File

@@ -1,44 +0,0 @@
{% macro snowflake__get_merge_sql(target, source_sql, unique_key, dest_columns, predicates) -%}
{#
Workaround for Snowflake not being happy with a merge on a constant-false predicate.
When no unique_key is provided, this macro will do a regular insert. If a unique_key
is provided, then this macro will do a proper merge instead.
#}
{%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute='name')) -%}
{%- set sql_header = config.get('sql_header', none) -%}
{%- set dml -%}
{%- if unique_key is none -%}
{{ sql_header if sql_header is not none }}
insert into {{ target }} ({{ dest_cols_csv }})
(
select {{ dest_cols_csv }}
from {{ source_sql }}
)
{%- else -%}
{{ default__get_merge_sql(target, source_sql, unique_key, dest_columns, predicates) }}
{%- endif -%}
{%- endset -%}
{% do return(snowflake_dml_explicit_transaction(dml)) %}
{% endmacro %}
{% macro snowflake__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) %}
{% set dml = default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) %}
{% do return(snowflake_dml_explicit_transaction(dml)) %}
{% endmacro %}
{% macro snowflake__snapshot_merge_sql(target, source, insert_cols) %}
{% set dml = default__snapshot_merge_sql(target, source, insert_cols) %}
{% do return(snowflake_dml_explicit_transaction(dml)) %}
{% endmacro %}

View File

@@ -1,36 +0,0 @@
{% macro snowflake__load_csv_rows(model, agate_table) %}
{% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}
{% set bindings = [] %}
{% set statements = [] %}
{% for chunk in agate_table.rows | batch(batch_size) %}
{% set bindings = [] %}
{% for row in chunk %}
{% do bindings.extend(row) %}
{% endfor %}
{% set sql %}
insert into {{ this.render() }} ({{ cols_sql }}) values
{% for row in chunk -%}
({%- for column in agate_table.column_names -%}
%s
{%- if not loop.last%},{%- endif %}
{%- endfor -%})
{%- if not loop.last%},{%- endif %}
{%- endfor %}
{% endset %}
{% do adapter.add_query('BEGIN', auto_begin=False) %}
{% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}
{% do adapter.add_query('COMMIT', auto_begin=False) %}
{% if loop.index0 == 0 %}
{% do statements.append(sql) %}
{% endif %}
{% endfor %}
{# Return SQL so we can render it out into the compiled files #}
{{ return(statements[0]) }}
{% endmacro %}

View File

@@ -1,34 +0,0 @@
{% materialization table, adapter='snowflake' %}
{% set original_query_tag = set_query_tag() %}
{%- set identifier = model['alias'] -%}
{%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}
{%- set target_relation = api.Relation.create(identifier=identifier,
schema=schema,
database=database, type='table') -%}
{{ run_hooks(pre_hooks) }}
{#-- Drop the relation if it was a view to "convert" it in a table. This may lead to
-- downtime, but it should be a relatively infrequent occurrence #}
{% if old_relation is not none and not old_relation.is_table %}
{{ log("Dropping relation " ~ old_relation ~ " because it is of type " ~ old_relation.type) }}
{{ drop_relation_if_exists(old_relation) }}
{% endif %}
--build model
{% call statement('main') -%}
{{ create_table_as(false, target_relation, sql) }}
{%- endcall %}
{{ run_hooks(post_hooks) }}
{% do persist_docs(target_relation, model) %}
{% do unset_query_tag(original_query_tag) %}
{{ return({'relations': [target_relation]}) }}
{% endmaterialization %}

View File

@@ -1,13 +0,0 @@
{% materialization view, adapter='snowflake' -%}
{% set original_query_tag = set_query_tag() %}
{% set to_return = create_or_replace_view() %}
{% set target_relation = this.incorporate(type='view') %}
{% do persist_docs(target_relation, model, for_columns=false) %}
{% do return(to_return) %}
{% do unset_query_tag(original_query_tag) %}
{%- endmaterialization %}

View File

@@ -1,29 +0,0 @@
default:
outputs:
dev: # User-Password config
type: snowflake
account: [account id + region (if applicable)]
user: [username]
password: [password]
role: [user role]
database: [database name]
warehouse: [warehouse name]
schema: [dbt schema]
threads: [1 or more]
client_session_keep_alive: False
prod: # Keypair config
type: snowflake
account: [account id + region (if applicable)]
user: [username]
role: [user role]
private_key_path: [path/to/private.key]
private_key_passphrase: [passphrase for the private key, if key is encrypted]
database: [database name]
warehouse: [warehouse name]
schema: [dbt schema]
threads: [1 or more]
client_session_keep_alive: False
target: dev

View File

@@ -1,70 +0,0 @@
#!/usr/bin/env python
import os
import sys
if sys.version_info < (3, 6):
print('Error: dbt does not support this version of Python.')
print('Please upgrade to Python 3.6 or higher.')
sys.exit(1)
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
package_name = "dbt-snowflake"
package_version = "0.21.0b2"
description = """The snowflake adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name=package_name,
version=package_version,
description=description,
long_description=description,
long_description_content_type='text/markdown',
author="dbt Labs",
author_email="info@dbtlabs.com",
url="https://github.com/dbt-labs/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/snowflake/dbt_project.yml',
'include/snowflake/sample_profiles.yml',
'include/snowflake/macros/*.sql',
'include/snowflake/macros/**/*.sql',
]
},
install_requires=[
'dbt-core=={}'.format(package_version),
'snowflake-connector-python[secure-local-storage]>=2.4.1,<2.6.0',
'requests<3.0.0',
'cryptography>=3.2,<4',
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires=">=3.6.2",
)

View File

@@ -1,5 +1,3 @@
./core
./plugins/postgres
./plugins/redshift
./plugins/snowflake
./plugins/bigquery

View File

@@ -14,7 +14,7 @@ rm -rf "$DBT_PATH"/dist
rm -rf "$DBT_PATH"/build
mkdir -p "$DBT_PATH"/dist
for SUBPATH in core plugins/postgres plugins/redshift plugins/bigquery plugins/snowflake
for SUBPATH in core plugins/postgres plugins/bigquery
do
rm -rf "$DBT_PATH"/"$SUBPATH"/dist
rm -rf "$DBT_PATH"/"$SUBPATH"/build

View File

@@ -24,7 +24,7 @@ with open(os.path.join(this_directory, 'README.md')) as f:
package_name = "dbt"
package_version = "0.21.0b2"
package_version = "0.21.0rc1"
description = """With dbt, data analysts and engineers can build analytics \
the way engineers build applications."""
@@ -44,8 +44,6 @@ setup(
install_requires=[
'dbt-core=={}'.format(package_version),
'dbt-postgres=={}'.format(package_version),
'dbt-redshift=={}'.format(package_version),
'dbt-snowflake=={}'.format(package_version),
'dbt-bigquery=={}'.format(package_version),
],
zip_safe=False,

View File

@@ -14,9 +14,3 @@ SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET=
BIGQUERY_TEST_SERVICE_ACCOUNT_JSON=
BIGQUERY_TEST_ALT_DATABASE=
REDSHIFT_TEST_HOST=
REDSHIFT_TEST_USER=
REDSHIFT_TEST_PASS=
REDSHIFT_TEST_PORT=
REDSHIFT_TEST_DBNAME=

View File

@@ -1,9 +1,6 @@
{{ config(materialized='incremental', unique_key='id') }}
-- this will fail on snowflake with "merge" due
-- to the nondeterministic join on id
select 1 as id
union all
select 1 as id

View File

@@ -105,89 +105,6 @@ class TestSimpleCopy(BaseTestSimpleCopy):
self.assertManyTablesEqual(["seed", "view_model", "materialized"])
@use_profile("snowflake")
def test__snowflake__simple_copy(self):
self.use_default_project({
"data-paths": [self.dir("seed-initial")],
"seeds": {
'quote_columns': False,
}
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED", "GET_AND_REF"])
self.use_default_project({"data-paths": [self.dir("seed-update")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED", "GET_AND_REF"])
self.use_default_project({
"test-paths": [self.dir("tests")],
"data-paths": [self.dir("seed-update")],
})
self.run_dbt(['test'])
@use_profile("snowflake")
def test__snowflake__simple_copy__quoting_off(self):
self.use_default_project({
"quoting": {"identifier": False},
"data-paths": [self.dir("snowflake-seed-initial")],
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED", "GET_AND_REF"])
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-update")],
"quoting": {"identifier": False},
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED", "GET_AND_REF"])
self.use_default_project({
"test-paths": [self.dir("tests")],
"data-paths": [self.dir("snowflake-seed-update")],
"quoting": {"identifier": False},
})
self.run_dbt(['test'])
@use_profile("snowflake")
def test__snowflake__seed__quoting_switch(self):
self.use_default_project({
"quoting": {"identifier": False},
"data-paths": [self.dir("snowflake-seed-initial")],
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-update")],
"quoting": {"identifier": True},
})
results = self.run_dbt(["seed"], expect_pass=False)
self.use_default_project({
"test-paths": [self.dir("tests")],
"data-paths": [self.dir("snowflake-seed-initial")],
})
self.run_dbt(['test'])
@use_profile("bigquery")
def test__bigquery__simple_copy(self):
results = self.run_dbt(["seed"])
@@ -213,155 +130,6 @@ class TestSimpleCopy(BaseTestSimpleCopy):
self.assertTablesEqual("seed", "get_and_ref")
class TestSimpleCopyQuotingIdentifierOn(BaseTestSimpleCopy):
@property
def project_config(self):
return self.seed_quote_cfg_with({
'quoting': {
'identifier': True,
},
})
@use_profile("snowflake")
def test__snowflake__simple_copy__quoting_on(self):
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-initial")],
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["seed", "view_model", "incremental", "materialized", "get_and_ref"])
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-update")],
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["seed", "view_model", "incremental", "materialized", "get_and_ref"])
# can't run the test as this one's identifiers will be the wrong case
class BaseLowercasedSchemaTest(BaseTestSimpleCopy):
def unique_schema(self):
# bypass the forced uppercasing that unique_schema() does on snowflake
return super().unique_schema().lower()
class TestSnowflakeSimpleLowercasedSchemaCopy(BaseLowercasedSchemaTest):
@use_profile('snowflake')
def test__snowflake__simple_copy(self):
self.use_default_project({"data-paths": [self.dir("snowflake-seed-initial")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED", "GET_AND_REF"])
self.use_default_project({"data-paths": [self.dir("snowflake-seed-update")]})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
results = self.run_dbt()
self.assertEqual(len(results), 7)
self.assertManyTablesEqual(["SEED", "VIEW_MODEL", "INCREMENTAL", "MATERIALIZED", "GET_AND_REF"])
self.use_default_project({
"test-paths": [self.dir("tests")],
"data-paths": [self.dir("snowflake-seed-update")],
})
self.run_dbt(['test'])
class TestSnowflakeSimpleLowercasedSchemaQuoted(BaseLowercasedSchemaTest):
@property
def project_config(self):
return self.seed_quote_cfg_with({
'quoting': {'identifier': False, 'schema': True}
})
@use_profile("snowflake")
def test__snowflake__seed__quoting_switch_schema_lower(self):
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-initial")],
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
# this is intentional - should not error!
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-update")],
"quoting": {"identifier": False, "schema": False},
})
results = self.run_dbt(["seed"], expect_pass=False)
class TestSnowflakeSimpleUppercasedSchemaQuoted(BaseTestSimpleCopy):
@property
def project_config(self):
return self.seed_quote_cfg_with({
'quoting': {'identifier': False, 'schema': True}
})
@use_profile("snowflake")
def test__snowflake__seed__quoting_switch_schema_upper(self):
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-initial")],
})
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
# this is intentional - should not error!
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-update")],
"quoting": {"identifier": False, "schema": False},
})
results = self.run_dbt(["seed"])
class TestSnowflakeIncrementalOverwrite(BaseTestSimpleCopy):
@property
def models(self):
return self.dir("models-snowflake")
@use_profile("snowflake")
def test__snowflake__incremental_overwrite(self):
self.use_default_project({
"data-paths": [self.dir("snowflake-seed-initial")],
})
results = self.run_dbt(["run"])
self.assertEqual(len(results), 1)
results = self.run_dbt(["run"], expect_pass=False)
self.assertEqual(len(results), 1)
# Setting the incremental_strategy should make this succeed
self.use_default_project({
"models": {
"incremental_strategy": "delete+insert"
},
"data-paths": [self.dir("snowflake-seed-update")],
})
results = self.run_dbt(["run"])
self.assertEqual(len(results), 1)
class TestShouting(BaseTestSimpleCopy):
@property
def models(self):
@@ -498,21 +266,3 @@ class TestIncrementalMergeColumns(BaseTestSimpleCopy):
})
self.seed_and_run()
self.assertTablesEqual("incremental_update_cols", "expected_result")
@use_profile("snowflake")
def test__snowflake__incremental_merge_columns(self):
self.use_default_project({
"data-paths": ["seeds-merge-cols-initial"],
"seeds": {
"quote_columns": False
}
})
self.seed_and_run()
self.use_default_project({
"data-paths": ["seeds-merge-cols-update"],
"seeds": {
"quote_columns": False
}
})
self.seed_and_run()
self.assertTablesEqual("incremental_update_cols", "expected_result")

View File

@@ -27,19 +27,3 @@ class TestVarcharWidening(DBTIntegrationTest):
self.assertTablesEqual("seed","incremental")
self.assertTablesEqual("seed","materialized")
@use_profile('snowflake')
def test__snowflake__varchar_widening(self):
self.run_sql_file("seed.sql")
results = self.run_dbt()
self.assertEqual(len(results), 2)
self.assertManyTablesEqual(["SEED", "INCREMENTAL", "MATERIALIZED"])
self.run_sql_file("update.sql")
results = self.run_dbt()
self.assertEqual(len(results), 2)
self.assertManyTablesEqual(["SEED", "INCREMENTAL", "MATERIALIZED"])

View File

@@ -66,28 +66,6 @@ class TestSimpleReference(DBTIntegrationTest):
self.assertTablesEqual("summary_expected","ephemeral_summary")
self.assertTablesEqual("summary_expected","view_using_ref")
@use_profile('snowflake')
def test__snowflake__simple_reference(self):
results = self.run_dbt()
self.assertEqual(len(results), 8)
# Copies should match
self.assertManyTablesEqual(
["SEED", "INCREMENTAL_COPY", "MATERIALIZED_COPY", "VIEW_COPY"],
["SUMMARY_EXPECTED", "INCREMENTAL_SUMMARY", "MATERIALIZED_SUMMARY", "VIEW_SUMMARY", "EPHEMERAL_SUMMARY"]
)
self.run_sql_file("update.sql")
results = self.run_dbt()
self.assertEqual(len(results), 8)
self.assertManyTablesEqual(
["SEED", "INCREMENTAL_COPY", "MATERIALIZED_COPY", "VIEW_COPY"],
["SUMMARY_EXPECTED", "INCREMENTAL_SUMMARY", "MATERIALIZED_SUMMARY", "VIEW_SUMMARY", "EPHEMERAL_SUMMARY"]
)
@use_profile('postgres')
def test__postgres__simple_reference_with_models(self):
@@ -140,57 +118,6 @@ class TestSimpleReference(DBTIntegrationTest):
self.assertTrue('ephemeral_summary' in created_models)
self.assertEqual(created_models['ephemeral_summary'], 'table')
@use_profile('snowflake')
def test__snowflake__simple_reference_with_models(self):
# Run materialized_copy & ephemeral_copy
# ephemeral_copy should not actually be materialized b/c it is ephemeral
results = self.run_dbt(
['run', '--models', 'materialized_copy', 'ephemeral_copy']
)
self.assertEqual(len(results), 1)
# Copies should match
self.assertTablesEqual("SEED", "MATERIALIZED_COPY")
created_models = self.get_models_in_schema()
self.assertTrue('MATERIALIZED_COPY' in created_models)
@use_profile('snowflake')
def test__snowflake__simple_reference_with_models_and_children(self):
# Run materialized_copy, ephemeral_copy, and their dependents
# ephemeral_copy should not actually be materialized b/c it is ephemeral
# the dependent ephemeral_summary, however, should be materialized as a table
results = self.run_dbt(
['run', '--models', 'materialized_copy+', 'ephemeral_copy+']
)
self.assertEqual(len(results), 3)
# Copies should match
self.assertManyTablesEqual(
["SEED", "MATERIALIZED_COPY"],
["SUMMARY_EXPECTED", "MATERIALIZED_SUMMARY", "EPHEMERAL_SUMMARY"]
)
created_models = self.get_models_in_schema()
self.assertFalse('INCREMENTAL_COPY' in created_models)
self.assertFalse('INCREMENTAL_SUMMARY' in created_models)
self.assertFalse('VIEW_COPY' in created_models)
self.assertFalse('VIEW_SUMMARY' in created_models)
# make sure this wasn't errantly materialized
self.assertFalse('EPHEMERAL_COPY' in created_models)
self.assertTrue('MATERIALIZED_COPY' in created_models)
self.assertTrue('MATERIALIZED_SUMMARY' in created_models)
self.assertEqual(created_models['MATERIALIZED_COPY'], 'table')
self.assertEqual(created_models['MATERIALIZED_SUMMARY'], 'table')
self.assertTrue('EPHEMERAL_SUMMARY' in created_models)
self.assertEqual(created_models['EPHEMERAL_SUMMARY'], 'table')
class TestErrorReference(DBTIntegrationTest):
@property

View File

@@ -28,10 +28,7 @@ class BaseSimpleSnapshotTest(DBTIntegrationTest):
self.assertEqual(len(results), self.NUM_SNAPSHOT_MODELS)
def assert_case_tables_equal(self, actual, expected):
if self.adapter_type == 'snowflake':
actual = actual.upper()
expected = expected.upper()
# this does something different on snowflake, but here it's just assertTablesEqual
self.assertTablesEqual(actual, expected)
def assert_expected(self):
@@ -69,34 +66,6 @@ class TestSimpleSnapshotFiles(BaseSimpleSnapshotTest):
self.assert_expected()
@use_profile('snowflake')
def test__snowflake__simple_snapshot(self):
self.dbt_run_seed_snapshot()
self.assert_expected()
self.run_sql_file("invalidate_snowflake.sql")
self.run_sql_file("update.sql")
results = self.run_snapshot()
self.assertEqual(len(results), self.NUM_SNAPSHOT_MODELS)
self.assert_expected()
@use_profile('redshift')
def test__redshift__simple_snapshot(self):
self.dbt_run_seed_snapshot()
self.assert_expected()
self.run_sql_file("invalidate_postgres.sql")
self.run_sql_file("update.sql")
results = self.run_snapshot()
self.assertEqual(len(results), self.NUM_SNAPSHOT_MODELS)
self.assert_expected()
class TestSimpleColumnSnapshotFiles(DBTIntegrationTest):
@@ -159,14 +128,6 @@ class TestSimpleColumnSnapshotFiles(DBTIntegrationTest):
def test_postgres_renamed_source(self):
self._run_snapshot_test()
@use_profile('snowflake')
def test_snowflake_renamed_source(self):
self._run_snapshot_test()
@use_profile('redshift')
def test_redshift_renamed_source(self):
self._run_snapshot_test()
@use_profile('bigquery')
def test_bigquery_renamed_source(self):
self._run_snapshot_test()
@@ -419,10 +380,7 @@ class TestCrossDBSnapshotFiles(DBTIntegrationTest):
@property
def project_config(self):
if self.adapter_type == 'snowflake':
paths = ['test-snapshots-pg']
else:
paths = ['test-snapshots-bq']
paths = ['test-snapshots-bq']
return {
'config-version': 2,
'snapshot-paths': paths,
@@ -432,23 +390,6 @@ class TestCrossDBSnapshotFiles(DBTIntegrationTest):
def run_snapshot(self):
return self.run_dbt(['snapshot', '--vars', '{{"target_database": {}}}'.format(self.alternative_database)])
@use_profile('snowflake')
def test__snowflake__cross_snapshot(self):
self.run_sql_file("seed.sql")
results = self.run_snapshot()
self.assertEqual(len(results), 1)
self.assertTablesEqual("SNAPSHOT_EXPECTED", "SNAPSHOT_ACTUAL", table_b_db=self.alternative_database)
self.run_sql_file("invalidate_snowflake.sql")
self.run_sql_file("update.sql")
results = self.run_snapshot()
self.assertEqual(len(results), 1)
self.assertTablesEqual("SNAPSHOT_EXPECTED", "SNAPSHOT_ACTUAL", table_b_db=self.alternative_database)
@use_profile('bigquery')
def test__bigquery__cross_snapshot(self):
self.run_sql_file("seed_bq.sql")
@@ -842,23 +783,10 @@ class TestSnapshotHardDelete(DBTIntegrationTest):
self.run_sql_file('seed_bq.sql')
self._test_snapshot_hard_delete()
@use_profile('snowflake')
def test__snowflake__snapshot_hard_delete(self):
self.run_sql_file('seed.sql')
self._test_snapshot_hard_delete()
@use_profile('redshift')
def test__redshift__snapshot_hard_delete(self):
self.run_sql_file('seed.sql')
self._test_snapshot_hard_delete()
def _test_snapshot_hard_delete(self):
self._snapshot()
if self.adapter_type == 'snowflake':
self.assertTablesEqual("SNAPSHOT_EXPECTED", "SNAPSHOT_ACTUAL")
else:
self.assertTablesEqual("snapshot_expected", "snapshot_actual")
self.assertTablesEqual("snapshot_expected", "snapshot_actual")
self._invalidated_snapshot_datetime = None
self._revived_snapshot_datetime = None

View File

@@ -1,5 +1,4 @@
from test.integration.base import DBTIntegrationTest, use_profile
import dbt.exceptions
class TestSimpleSnapshotFiles(DBTIntegrationTest):
@@ -35,11 +34,6 @@ class TestSimpleSnapshotFiles(DBTIntegrationTest):
def assert_expected(self):
self.run_dbt(['test', '--data', '--vars', 'version: 3'])
@use_profile('snowflake')
def test__snowflake__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
@use_profile('postgres')
def test__postgres__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
@@ -49,8 +43,3 @@ class TestSimpleSnapshotFiles(DBTIntegrationTest):
def test__bigquery__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
@use_profile('redshift')
def test__redshift__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()

View File

@@ -0,0 +1 @@
*.csv

View File

@@ -60,66 +60,6 @@ class TestSimpleSeedColumnOverridePostgres(TestSimpleSeedColumnOverride):
self.assertEqual(len(results), 10)
class TestSimpleSeedColumnOverrideRedshift(TestSimpleSeedColumnOverride):
@property
def models(self):
return "models-rs"
@property
def profile_config(self):
return self.redshift_profile()
def seed_enabled_types(self):
return {
"id": "text",
"birthday": "date",
}
def seed_tricky_types(self):
return {
'id_str': 'text',
'looks_like_a_bool': 'text',
'looks_like_a_date': 'text',
}
@use_profile('redshift')
def test_redshift_simple_seed_with_column_override_redshift(self):
results = self.run_dbt(["seed", "--show"])
self.assertEqual(len(results), 2)
results = self.run_dbt(["test"])
self.assertEqual(len(results), 10)
class TestSimpleSeedColumnOverrideSnowflake(TestSimpleSeedColumnOverride):
@property
def models(self):
return "models-snowflake"
def seed_enabled_types(self):
return {
"id": "FLOAT",
"birthday": "TEXT",
}
def seed_tricky_types(self):
return {
'id_str': 'TEXT',
'looks_like_a_bool': 'TEXT',
'looks_like_a_date': 'TEXT',
}
@property
def profile_config(self):
return self.snowflake_profile()
@use_profile('snowflake')
def test_snowflake_simple_seed_with_column_override_snowflake(self):
results = self.run_dbt(["seed", "--show"])
self.assertEqual(len(results), 2)
results = self.run_dbt(["test"])
self.assertEqual(len(results), 10)
class TestSimpleSeedColumnOverrideBQ(TestSimpleSeedColumnOverride):
@property
def models(self):

View File

@@ -1,5 +1,5 @@
import os
import csv
from test.integration.base import DBTIntegrationTest, use_profile
@@ -311,4 +311,39 @@ class TestSimpleSeedWithDots(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_simple_seed(self):
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
self.assertEqual(len(results), 1)
class TestSimpleBigSeedBatched(DBTIntegrationTest):
@property
def schema(self):
return "simple_seed_005"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
"data-paths": ['data-big'],
'seeds': {
'quote_columns': False,
}
}
def test_big_batched_seed(self):
with open('data-big/my_seed.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['id'])
for i in range(0, 20000):
writer.writerow([i])
results = self.run_dbt(["seed"])
self.assertEqual(len(results), 1)
@use_profile('postgres')
def test_postgres_big_batched_seed(self):
self.test_big_batched_seed()

View File

@@ -8,6 +8,7 @@ from unittest import mock
import dbt.semver
import dbt.config
import dbt.exceptions
import dbt.flags
class BaseDependencyTest(DBTIntegrationTest):
@@ -45,8 +46,6 @@ class BaseDependencyTest(DBTIntegrationTest):
}
def run_dbt(self, *args, **kwargs):
strict = kwargs.pop('strict', False)
kwargs['strict'] = strict
return super().run_dbt(*args, **kwargs)
@@ -115,12 +114,9 @@ class TestMissingDependency(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_missing_dependency(self):
# dbt should raise a dbt exception, not raise a parse-time TypeError.
with self.assertRaises(dbt.exceptions.Exception) as exc:
self.run_dbt(['compile'], strict=False)
message = str(exc.exception)
self.assertIn('no_such_dependency', message)
self.assertIn('is undefined', message)
# dbt should raise a runtime exception
with self.assertRaises(dbt.exceptions.RuntimeException) as exc:
self.run_dbt(['compile'])
class TestSimpleDependencyWithSchema(TestSimpleDependency):
@@ -175,6 +171,54 @@ class TestSimpleDependencyWithSchema(TestSimpleDependency):
self.assertEqual(len(results), 5)
class TestSimpleDependencyNoVersionCheckConfig(TestSimpleDependency):
def run_dbt(self, cmd, *args, **kwargs):
# we can't add this to the config because Sources don't respect dbt_project.yml
vars_arg = yaml.safe_dump({
'schema_override': self.base_schema(),
})
cmd.extend(['--vars', vars_arg])
return super().run_dbt(cmd, *args, **kwargs)
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': ['schema_override_macros'],
'models': {
'schema': 'dbt_test',
},
'seeds': {
'schema': 'dbt_test',
}
}
@property
def profile_config(self):
return {
'config': {
'send_anonymous_usage_stats': False,
'version_check': False,
}
}
def base_schema(self):
return 'dbt_test_{}_macro'.format(self.unique_schema())
def configured_schema(self):
return 'configured_{}_macro'.format(self.unique_schema())
@use_profile('postgres')
@mock.patch('dbt.config.project.get_installed_version')
def test_postgres_local_dependency_out_of_date_no_check(self, mock_get):
mock_get.return_value = dbt.semver.VersionSpecifier.from_version_string('0.0.1')
self.run_dbt(['deps'])
self.assertFalse(dbt.flags.VERSION_CHECK)
self.run_dbt(['seed'])
results = self.run_dbt(['run'])
self.assertEqual(len(results), 5)
class TestSimpleDependencyHooks(DBTIntegrationTest):
@property
def schema(self):
@@ -245,11 +289,6 @@ class TestSimpleDependencyDuplicateName(DBTIntegrationTest):
]
}
def run_dbt(self, *args, **kwargs):
strict = kwargs.pop('strict', False)
kwargs['strict'] = strict
return super().run_dbt(*args, **kwargs)
@use_profile('postgres')
def test_postgres_local_dependency_same_name(self):
with self.assertRaises(dbt.exceptions.DependencyException):

View File

@@ -1,7 +1,7 @@
import os
import tempfile
from test.integration.base import DBTIntegrationTest, use_profile
from dbt.exceptions import CompilationException
from dbt.exceptions import CompilationException, DependencyException
from dbt import deprecations
@@ -110,10 +110,7 @@ class TestSimpleDependencyUnpinned(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_simple_dependency(self):
with self.assertRaises(CompilationException) as exc:
self.run_dbt(["deps"])
assert 'is not pinned' in str(exc.exception)
self.run_dbt(['deps'], strict=False)
self.run_dbt(["deps"])
class TestSimpleDependencyWithDuplicates(DBTIntegrationTest):

View File

@@ -109,20 +109,6 @@ class TestGraphSelection(DBTIntegrationTest):
self.assertNotIn('users_rollup_dependency', created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--select', 'users'])
self.assertEqual(len(results), 1)
self.assertTablesEqual("SEED", "USERS")
created_models = self.get_models_in_schema()
self.assertFalse('USERS_ROLLUP' in created_models)
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('EMAILS' in created_models)
self.assert_correct_schemas()
@use_profile('postgres')
def test__postgres__specific_model_and_children(self):
self.run_sql_file("seed.sql")
@@ -139,21 +125,6 @@ class TestGraphSelection(DBTIntegrationTest):
self.assertNotIn('emails', created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model_and_children(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--select', 'users+'])
self.assertEqual(len(results), 4)
self.assertManyTablesEqual(
["SEED", "USERS"],
["SUMMARY_EXPECTED", "USERS_ROLLUP"]
)
created_models = self.get_models_in_schema()
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('EMAILS' in created_models)
@use_profile('postgres')
def test__postgres__specific_model_and_children_limited(self):
self.run_sql_file("seed.sql")
@@ -184,22 +155,6 @@ class TestGraphSelection(DBTIntegrationTest):
self.assertFalse('emails' in created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model_and_parents(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--select', '+users_rollup'])
self.assertEqual(len(results), 2)
self.assertManyTablesEqual(
["SEED", "USERS"],
["SUMMARY_EXPECTED", "USERS_ROLLUP"]
)
created_models = self.get_models_in_schema()
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('EMAILS' in created_models)
@use_profile('postgres')
def test__postgres__specific_model_and_parents_limited(self):
self.run_sql_file("seed.sql")
@@ -230,21 +185,6 @@ class TestGraphSelection(DBTIntegrationTest):
self.assertFalse('emails' in created_models)
self.assert_correct_schemas()
@use_profile('snowflake')
def test__snowflake__specific_model_with_exclusion(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(
['run', '--select', '+users_rollup', '--exclude', 'users_rollup']
)
self.assertEqual(len(results), 1)
self.assertManyTablesEqual(["SEED", "USERS"])
created_models = self.get_models_in_schema()
self.assertFalse('BASE_USERS' in created_models)
self.assertFalse('USERS_ROLLUP' in created_models)
self.assertFalse('EMAILS' in created_models)
@use_profile('postgres')
def test__postgres__locally_qualified_name(self):
results = self.run_dbt(['run', '--select', 'test.subdir'])
@@ -326,28 +266,6 @@ class TestGraphSelection(DBTIntegrationTest):
self.assertEqual(len(results), 2)
assert sorted([r.node.name for r in results]) == ['unique_users_id', 'unique_users_rollup_gender']
@use_profile('snowflake')
def test__snowflake__skip_intermediate(self):
self.run_sql_file("seed.sql")
results = self.run_dbt(['run', '--select', '@models/users.sql'])
# base_users, emails, users_rollup, users_rollup_dependency
self.assertEqual(len(results), 4)
# now re-run, skipping users_rollup
results = self.run_dbt(['run', '--select', '@users', '--exclude', 'users_rollup'])
self.assertEqual(len(results), 3)
# make sure that users_rollup_dependency and users don't interleave
users = [r for r in results if r.node.name == 'users'][0]
dep = [r for r in results if r.node.name == 'users_rollup_dependency'][0]
user_last_end = users.timing[1].completed_at
dep_first_start = dep.timing[0].started_at
self.assertTrue(
user_last_end <= dep_first_start,
'dependency started before its transitive parent ({} > {})'.format(user_last_end, dep_first_start)
)
@use_profile('postgres')
def test__postgres__concat(self):
self.run_sql_file("seed.sql")

View File

@@ -117,12 +117,9 @@ class TestMalformedSchemaTests(DBTIntegrationTest):
return test_task.run()
@use_profile('postgres')
def test_postgres_malformed_schema_strict_will_break_run(self):
def test_postgres_malformed_schema_will_break_run(self):
with self.assertRaises(CompilationException):
self.run_dbt(strict=True)
# even if strict = False!
with self.assertRaises(CompilationException):
self.run_dbt(strict=False)
self.run_dbt()
class TestCustomConfigSchemaTests(DBTIntegrationTest):
@@ -149,16 +146,11 @@ class TestCustomConfigSchemaTests(DBTIntegrationTest):
def test_postgres_config(self):
""" Test that tests use configs properly. All tests for
this project will fail, configs are set to make test pass. """
results = self.run_dbt()
results = self.run_dbt(['test'], strict=False)
results = self.run_dbt(['test'], expect_pass=False)
self.assertEqual(len(results), 7)
for result in results:
self.assertFalse(result.skipped)
self.assertEqual(
result.failures, 0,
'test {} failed'.format(result.node.name)
)
class TestHooksInTests(DBTIntegrationTest):
@@ -393,16 +385,16 @@ class TestSchemaCaseInsensitive(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_schema_lowercase_sql(self):
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 2)
results = self.run_dbt(['test', '-m', 'lowercase'], strict=False)
results = self.run_dbt(['test', '-m', 'lowercase'])
self.assertEqual(len(results), 1)
@use_profile('postgres')
def test_postgres_schema_uppercase_sql(self):
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 2)
results = self.run_dbt(['test', '-m', 'uppercase'], strict=False)
results = self.run_dbt(['test', '-m', 'uppercase'])
self.assertEqual(len(results), 1)
@@ -440,7 +432,7 @@ class TestSchemaTestContext(DBTIntegrationTest):
# This test tests the the TestContext and TestMacroNamespace
# are working correctly
self.run_dbt(['deps'])
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 3)
run_result = self.run_dbt(['test'], expect_pass=False)
@@ -457,7 +449,7 @@ class TestSchemaTestContext(DBTIntegrationTest):
self.assertEqual(results[3].status, TestStatus.Fail)
self.assertRegex(results[3].node.compiled_sql, r'union all')
# type_two_model_a_
self.assertEqual(results[4].status, TestStatus.Fail)
self.assertEqual(results[4].status, TestStatus.Warn)
self.assertEqual(results[4].node.config.severity, 'WARN')
class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest):
@@ -500,7 +492,7 @@ class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest):
# This test tests the the TestContext and TestMacroNamespace
# are working correctly
self.run_dbt(['deps'])
results = self.run_dbt(strict=False)
results = self.run_dbt()
self.assertEqual(len(results), 3)
run_result = self.run_dbt(['test'], expect_pass=False)
@@ -515,7 +507,7 @@ class TestSchemaTestContextWithMacroNamespace(DBTIntegrationTest):
self.assertEqual(results[2].status, TestStatus.Fail)
self.assertRegex(results[2].node.compiled_sql, r'union all')
# type_two_model_a_
self.assertEqual(results[3].status, TestStatus.Fail)
self.assertEqual(results[3].status, TestStatus.Warn)
self.assertEqual(results[3].node.config.severity, 'WARN')
class TestSchemaTestNameCollision(DBTIntegrationTest):

View File

@@ -56,26 +56,3 @@ class TestDataTests(DBTIntegrationTest):
defined_tests = os.listdir(self.test_path)
self.assertNotEqual(len(test_results), 0)
self.assertEqual(len(test_results), len(defined_tests))
@use_profile('snowflake')
def test_snowflake_data_tests(self):
self.use_profile('snowflake')
self.run_sql_file("seed.sql")
results = self.run_dbt()
self.assertEqual(len(results), 1)
test_results = self.run_data_validations()
for result in test_results:
# assert that all deliberately failing tests actually fail
if 'fail' in result.node.name:
self.assertEqual(result.status, 'fail')
self.assertFalse(result.skipped)
self.assertTrue(result.failures > 0)
# assert that actual tests pass
else:
self.assertEqual(result.status, 'pass')
self.assertFalse(result.skipped)
self.assertEqual(result.failures, 0)

View File

@@ -25,41 +25,16 @@ class TestDeprecations(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
self.run_dbt(strict=True, expect_pass=False)
self.run_dbt(['--warn-error', 'run'], expect_pass=False)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter:already_exists'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestMaterializationReturnDeprecation(BaseTestDeprecations):
@property
def models(self):
return self.dir('custom-models')
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': [self.dir('custom-materialization-macros')],
}
@use_profile('postgres')
def test_postgres_deprecations_fail(self):
# this should fail at runtime
self.run_dbt(strict=True, expect_pass=False)
@use_profile('postgres')
def test_postgres_deprecations(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'materialization-return'}
self.assertEqual(expected, deprecations.active_deprecations)
class TestAdapterMacroDeprecation(BaseTestDeprecations):
@property
def models(self):
@@ -75,7 +50,7 @@ class TestAdapterMacroDeprecation(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -83,24 +58,16 @@ class TestAdapterMacroDeprecation(BaseTestDeprecations):
def test_postgres_adapter_macro_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
self.run_dbt(['--warn-error', 'run'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'The "adapter_macro" macro has been deprecated' in exc_str
@use_profile('redshift')
def test_redshift_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
# pick up the postgres macro
self.run_dbt(strict=False)
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@use_profile('bigquery')
def test_bigquery_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
# picked up the default -> error
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=False, expect_pass=False)
self.run_dbt(expect_pass=False)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'not allowed' in exc_str # we saw the default macro
@@ -120,7 +87,7 @@ class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_adapter_macro_pkg(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -128,25 +95,16 @@ class TestAdapterMacroDeprecationPackages(BaseTestDeprecations):
def test_postgres_adapter_macro_pkg_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
self.run_dbt(['--warn-error', 'run'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'The "adapter_macro" macro has been deprecated' in exc_str
@use_profile('redshift')
def test_redshift_adapter_macro_pkg(self):
self.assertEqual(deprecations.active_deprecations, set())
# pick up the postgres macro
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
expected = {'adapter-macro'}
self.assertEqual(expected, deprecations.active_deprecations)
@use_profile('bigquery')
def test_bigquery_adapter_macro_pkg(self):
self.assertEqual(deprecations.active_deprecations, set())
# picked up the default -> error
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=False, expect_pass=False)
self.run_dbt(expect_pass=False)
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'not allowed' in exc_str # we saw the default macro
@@ -176,7 +134,7 @@ class TestDispatchPackagesDeprecation(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_adapter_macro(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(strict=False)
self.run_dbt()
expected = {'dispatch-packages'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -184,7 +142,7 @@ class TestDispatchPackagesDeprecation(BaseTestDeprecations):
def test_postgres_adapter_macro_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(strict=True)
self.run_dbt(['--warn-error', 'run'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
assert 'Raised during dispatch for: string_literal' in exc_str
@@ -208,7 +166,7 @@ class TestPackageRedirectDeprecation(BaseTestDeprecations):
@use_profile('postgres')
def test_postgres_package_redirect(self):
self.assertEqual(deprecations.active_deprecations, set())
self.run_dbt(['deps'], strict=False)
self.run_dbt(['deps'])
expected = {'package-redirect'}
self.assertEqual(expected, deprecations.active_deprecations)
@@ -216,7 +174,7 @@ class TestPackageRedirectDeprecation(BaseTestDeprecations):
def test_postgres_package_redirect_fail(self):
self.assertEqual(deprecations.active_deprecations, set())
with self.assertRaises(dbt.exceptions.CompilationException) as exc:
self.run_dbt(['deps'], strict=True)
self.run_dbt(['--warn-error', 'deps'])
exc_str = ' '.join(str(exc.exception).split()) # flatten all whitespace
expected = "The `fishtown-analytics/dbt_utils` package is deprecated in favor of `dbt-labs/dbt_utils`"
assert expected in exc_str

View File

@@ -24,4 +24,4 @@ vars:
seeds:
quote_columns: False
quote_columns: True

View File

@@ -143,7 +143,8 @@ class TestContextVars(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_env_vars_secrets(self):
_, log_output = self.run_dbt_and_capture(['--debug', 'run', '--target', 'prod'])
os.environ['DBT_DEBUG'] = 'True'
_, log_output = self.run_dbt_and_capture(['run', '--target', 'prod'])
self.assertFalse("secret_variable" in log_output)
self.assertTrue("regular_variable" in log_output)
@@ -159,9 +160,7 @@ class TestEmitWarning(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_warn(self):
with pytest.raises(dbt.exceptions.CompilationException):
self.run_dbt(['run'], strict=True)
self.run_dbt(['run'], strict=False, expect_pass=True)
self.run_dbt(['run'], expect_pass=True)
class TestVarDependencyInheritance(DBTIntegrationTest):
@@ -199,9 +198,9 @@ class TestVarDependencyInheritance(DBTIntegrationTest):
@use_profile('postgres')
def test_postgres_var_mutual_overrides_v1_conversion(self):
self.run_dbt(['deps'], strict=False)
assert len(self.run_dbt(['seed'], strict=False)) == 2
assert len(self.run_dbt(['run'], strict=False)) == 2
self.run_dbt(['deps'])
assert len(self.run_dbt(['seed'])) == 2
assert len(self.run_dbt(['run'])) == 2
self.assertTablesEqual('root_model_expected', 'model')
self.assertTablesEqual('first_dep_expected', 'first_dep_model')

View File

@@ -95,20 +95,6 @@ class TestAdapterMacroNoDestination(DBTIntegrationTest):
assert "In dispatch: No macro named 'dispatch_to_nowhere' found" in str(exc.value)
class TestDispatchMacroUseParent(DBTIntegrationTest):
@property
def schema(self):
return "test_macros_016"
@property
def models(self):
return "dispatch-inheritance-models"
@use_profile('redshift')
def test_redshift_inherited_macro(self):
self.run_dbt(['run'])
class TestMacroOverrideBuiltin(DBTIntegrationTest):
@property
def schema(self):
@@ -125,7 +111,6 @@ class TestMacroOverrideBuiltin(DBTIntegrationTest):
'macro-paths': ['override-get-columns-macros'],
}
@use_profile('postgres')
def test_postgres_overrides(self):
# the first time, the model doesn't exist

View File

@@ -43,17 +43,6 @@ class TestEphemeralMulti(DBTIntegrationTest):
expected_sql = "".join(expected_sql.split())
self.assertEqual(sql_file, expected_sql)
@use_profile('snowflake')
def test__snowflake(self):
self.run_sql_file("seed.sql")
results = self.run_dbt()
self.assertEqual(len(results), 3)
self.assertManyTablesEqual(
["SEED", "DEPENDENT", "DOUBLE_DEPENDENT", "SUPER_DEPENDENT"]
)
class TestEphemeralNested(DBTIntegrationTest):
@property

Some files were not shown because too many files have changed in this diff Show More