Compare commits

..

1 Commits

Author SHA1 Message Date
Kshitij Aranke
301f0eb454 set default branch as main for codecov 2023-07-19 11:42:31 -05:00
56 changed files with 134 additions and 533 deletions

View File

@@ -1,6 +0,0 @@
kind: "Dependencies"
body: "Bump mypy from 1.3.0 to 1.4.0"
time: 2023-06-21T00:57:52.00000Z
custom:
Author: dependabot[bot]
PR: 7912

View File

@@ -1,6 +0,0 @@
kind: Docs
body: Corrected spelling of "Partiton"
time: 2023-07-15T20:09:07.057361092+02:00
custom:
Author: pgoslatara
Issue: "8100"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Fixed double-underline
time: 2023-06-25T14:27:31.231253719+08:00
custom:
Author: lllong33
Issue: "5301"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Add status to Parse Inline Error
time: 2023-07-20T12:27:23.085084-07:00
custom:
Author: ChenyuLInx
Issue: "8173"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Ensure `warn_error_options` get serialized in `invocation_args_dict`
time: 2023-07-20T16:15:13.761813-07:00
custom:
Author: QMalcolm
Issue: "7694"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Stop detecting materialization macros based on macro name
time: 2023-07-20T17:01:12.496238-07:00
custom:
Author: QMalcolm
Issue: "6231"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Update `dbt deps` download retry logic to handle `EOFError` exceptions
time: 2023-07-20T17:24:22.969951-07:00
custom:
Author: QMalcolm
Issue: "6653"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Improve handling of CTE injection with ephemeral models
time: 2023-07-26T10:44:48.888451-04:00
custom:
Author: gshank
Issue: "8213"

View File

@@ -1,6 +0,0 @@
kind: Under the Hood
body: Refactor flaky test pp_versioned_models
time: 2023-07-19T12:46:11.972481-04:00
custom:
Author: gshank
Issue: "7781"

View File

@@ -1,6 +0,0 @@
kind: Under the Hood
body: format exception from dbtPlugin.initialize
time: 2023-07-19T16:33:34.586377-04:00
custom:
Author: michelleark
Issue: "8152"

View File

@@ -1,6 +0,0 @@
kind: Under the Hood
body: A way to control maxBytes for a single dbt.log file
time: 2023-07-24T15:06:54.263822-07:00
custom:
Author: ChenyuLInx
Issue: "8199"

View File

@@ -1,7 +0,0 @@
kind: Under the Hood
body: Ref expressions with version can now be processed by the latest version of the
high-performance dbt-extractor library.
time: 2023-07-25T10:26:09.902878-04:00
custom:
Author: peterallenwebb
Issue: "7688"

View File

@@ -1,40 +0,0 @@
name: 🛠️ Implementation
description: This is an implementation ticket intended for use by the maintainers of dbt-core
title: "[<project>] <title>"
labels: ["user_docs"]
body:
- type: markdown
attributes:
value: This is an implementation ticket intended for use by the maintainers of dbt-core
- type: checkboxes
attributes:
label: Housekeeping
description: >
A couple friendly reminders:
1. Remove the `user_docs` label if the scope of this work does not require changes to https://docs.getdbt.com/docs: no end-user interface (e.g. yml spec, CLI, error messages, etc) or functional changes
2. Link any blocking issues in the "Blocked on" field under the "Core devs & maintainers" project.
options:
- label: I am a maintainer of dbt-core
required: true
- type: textarea
attributes:
label: Short description
description: |
Describe the scope of the ticket, a high-level implementation approach and any tradeoffs to consider
validations:
required: true
- type: textarea
attributes:
label: Acceptance critera
description: |
What is the definition of done for this ticket? Include any relevant edge cases and/or test cases
validations:
required: true
- type: textarea
attributes:
label: Context
description: |
Provide the "why", motivation, and alternative approaches considered -- linking to previous refinement issues, spikes, Notion docs as appropriate
validations:
validations:
required: false

View File

@@ -33,11 +33,6 @@ defaults:
run:
shell: bash
# top-level adjustments can be made here
env:
# number of parallel processes to spawn for python integration testing
PYTHON_INTEGRATION_TEST_WORKERS: ${{ vars.PYTHON_INTEGRATION_TEST_WORKERS }}
jobs:
code-quality:
name: code-quality
@@ -111,55 +106,23 @@ jobs:
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
integration-metadata:
name: integration test metadata generation
runs-on: ubuntu-latest
outputs:
split-groups: ${{ steps.generate-split-groups.outputs.split-groups }}
include: ${{ steps.generate-include.outputs.include }}
steps:
- name: generate split-groups
id: generate-split-groups
run: |
MATRIX_JSON="["
for B in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
MATRIX_JSON+=$(sed 's/^/"/;s/$/"/' <<< "${B}")
done
MATRIX_JSON="${MATRIX_JSON//\"\"/\", \"}"
MATRIX_JSON+="]"
echo "split-groups=${MATRIX_JSON}"
echo "split-groups=${MATRIX_JSON}" >> $GITHUB_OUTPUT
- name: generate include
id: generate-include
run: |
INCLUDE=('"python-version":"3.8","os":"windows-latest"' '"python-version":"3.8","os":"macos-latest"' )
INCLUDE_GROUPS="["
for include in ${INCLUDE[@]}; do
for group in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
INCLUDE_GROUPS+=$(sed 's/$/, /' <<< "{\"split-group\":\"${group}\",${include}}")
done
done
INCLUDE_GROUPS=$(echo $INCLUDE_GROUPS | sed 's/,*$//g')
INCLUDE_GROUPS+="]"
echo "include=${INCLUDE_GROUPS}"
echo "include=${INCLUDE_GROUPS}" >> $GITHUB_OUTPUT
integration:
name: (${{ matrix.split-group }}) integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
name: integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
runs-on: ${{ matrix.os }}
timeout-minutes: 30
needs:
- integration-metadata
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
os: [ubuntu-20.04]
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
include: ${{ fromJson(needs.integration-metadata.outputs.include) }}
include:
- python-version: 3.8
os: windows-latest
- python-version: 3.8
os: macos-latest
env:
TOXENV: integration
DBT_INVOCATION_ENV: github-actions
@@ -202,8 +165,6 @@ jobs:
- name: Run tests
run: tox -- --ddtrace
env:
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
- name: Get current date
if: always()
@@ -224,15 +185,6 @@ jobs:
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
integration-report:
name: integration test suite
runs-on: ubuntu-latest
needs: integration
steps:
- name: "[Notification] Integration test suite passes"
run: |
echo "::notice title="Integration test suite passes""
build:
name: build packages

View File

@@ -18,41 +18,11 @@ on:
permissions: read-all
# top-level adjustments can be made here
env:
# number of parallel processes to spawn for python testing
PYTHON_INTEGRATION_TEST_WORKERS: ${{ vars.PYTHON_INTEGRATION_TEST_WORKERS }}
jobs:
integration-metadata:
name: integration test metadata generation
runs-on: ubuntu-latest
outputs:
split-groups: ${{ steps.generate-split-groups.outputs.split-groups }}
steps:
- name: generate split-groups
id: generate-split-groups
run: |
MATRIX_JSON="["
for B in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
MATRIX_JSON+=$(sed 's/^/"/;s/$/"/' <<< "${B}")
done
MATRIX_JSON="${MATRIX_JSON//\"\"/\", \"}"
MATRIX_JSON+="]"
echo "split-groups=${MATRIX_JSON}" >> $GITHUB_OUTPUT
# run the performance measurements on the current or default branch
test-schema:
name: Test Log Schema
runs-on: ubuntu-20.04
timeout-minutes: 30
needs:
- integration-metadata
strategy:
fail-fast: false
matrix:
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
env:
# turns warnings into errors
RUSTFLAGS: "-D warnings"
@@ -95,14 +65,3 @@ jobs:
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
- name: Run integration tests
run: tox -e integration -- -nauto
env:
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
test-schema-report:
name: Log Schema Test Suite
runs-on: ubuntu-latest
needs: test-schema
steps:
- name: "[Notification] Log test suite passes"
run: |
echo "::notice title="Log test suite passes""

View File

@@ -37,7 +37,7 @@ repos:
alias: flake8-check
stages: [manual]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.4.0
rev: v1.3.0
hooks:
- id: mypy
# N.B.: Mypy is... a bit fragile.

View File

@@ -0,0 +1,2 @@
codecov:
branch: main # set new Default branch

View File

@@ -132,7 +132,6 @@ class dbtRunner:
@p.enable_legacy_logger
@p.fail_fast
@p.log_cache_events
@p.log_file_max_bytes
@p.log_format
@p.log_format_file
@p.log_level

View File

@@ -171,15 +171,6 @@ use_colors_file = click.option(
default=True,
)
log_file_max_bytes = click.option(
"--log-file-max-bytes",
envvar="DBT_LOG_FILE_MAX_BYTES",
help="Configure the max file size in bytes for a single dbt.log file, before rolling over. 0 means no limit.",
default=10 * 1024 * 1024, # 10mb
type=click.INT,
hidden=True,
)
log_path = click.option(
"--log-path",
envvar="DBT_LOG_PATH",

View File

@@ -4,6 +4,7 @@ import json
import networkx as nx # type: ignore
import os
import pickle
import sqlparse
from collections import defaultdict
from typing import List, Dict, Any, Tuple, Optional
@@ -35,7 +36,6 @@ from dbt.node_types import NodeType, ModelLanguage
from dbt.events.format import pluralize
import dbt.tracking
import dbt.task.list as list_task
import sqlparse
graph_file_name = "graph.gpickle"
@@ -378,16 +378,16 @@ class Compiler:
_add_prepended_cte(prepended_ctes, InjectedCTE(id=cte.id, sql=sql))
injected_sql = inject_ctes_into_sql(
model.compiled_code,
prepended_ctes,
)
# Check again before updating for multi-threading
if not model.extra_ctes_injected:
injected_sql = inject_ctes_into_sql(
model.compiled_code,
prepended_ctes,
)
model.extra_ctes_injected = True
model._pre_injected_sql = model.compiled_code
model.compiled_code = injected_sql
model.extra_ctes = prepended_ctes
model.extra_ctes_injected = True
# if model.extra_ctes is not set to prepended ctes, something went wrong
return model, model.extra_ctes
@@ -523,12 +523,6 @@ class Compiler:
the node's raw_code into compiled_code, and then calls the
recursive method to "prepend" the ctes.
"""
# Make sure Lexer for sqlparse 0.4.4 is initialized
from sqlparse.lexer import Lexer # type: ignore
if hasattr(Lexer, "get_default_instance"):
Lexer.get_default_instance()
node = self._compile_code(node, manifest, extra_context)
node, _ = self._recursively_prepend_ctes(node, manifest, extra_context)

View File

@@ -13,7 +13,7 @@ from uuid import uuid4
from dbt.events.format import timestamp_to_datetime_string
from dbt.events.base_types import BaseEvent, EventLevel, msg_from_base_event, EventMsg
import dbt.utils
# A Filter is a function which takes a BaseEvent and returns True if the event
# should be logged, False otherwise.
@@ -80,7 +80,6 @@ class LoggerConfig:
use_colors: bool = False
output_stream: Optional[TextIO] = None
output_file_name: Optional[str] = None
output_file_max_bytes: Optional[int] = 10 * 1024 * 1024 # 10 mb
logger: Optional[Any] = None
@@ -101,7 +100,7 @@ class _Logger:
file_handler = RotatingFileHandler(
filename=str(config.output_file_name),
encoding="utf8",
maxBytes=config.output_file_max_bytes, # type: ignore
maxBytes=10 * 1024 * 1024, # 10 mb
backupCount=5,
)
self._python_logger = self._get_python_log_for_handler(file_handler)
@@ -176,7 +175,7 @@ class _JsonLogger(_Logger):
from dbt.events.functions import msg_to_dict
msg_dict = msg_to_dict(msg)
raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)
raw_log_line = json.dumps(msg_dict, sort_keys=True)
line = self.scrubber(raw_log_line) # type: ignore
return line

View File

@@ -13,7 +13,6 @@ from typing import Callable, Dict, List, Optional, TextIO
import uuid
from google.protobuf.json_format import MessageToDict
import dbt.utils
LOG_VERSION = 3
metadata_vars: Optional[Dict[str, str]] = None
@@ -68,11 +67,7 @@ def setup_event_logger(flags, callbacks: List[Callable[[EventMsg], None]] = [])
log_level_file = EventLevel.DEBUG if flags.DEBUG else EventLevel(flags.LOG_LEVEL_FILE)
EVENT_MANAGER.add_logger(
_get_logfile_config(
log_file,
flags.USE_COLORS_FILE,
log_file_format,
log_level_file,
flags.LOG_FILE_MAX_BYTES,
log_file, flags.USE_COLORS_FILE, log_file_format, log_level_file
)
)
@@ -121,11 +116,7 @@ def _stdout_filter(
def _get_logfile_config(
log_path: str,
use_colors: bool,
line_format: LineFormat,
level: EventLevel,
log_file_max_bytes: int,
log_path: str, use_colors: bool, line_format: LineFormat, level: EventLevel
) -> LoggerConfig:
return LoggerConfig(
name="file_log",
@@ -135,7 +126,6 @@ def _get_logfile_config(
scrubber=env_scrubber,
filter=partial(_logfile_filter, bool(get_flags().LOG_CACHE_EVENTS), line_format),
output_file_name=log_path,
output_file_max_bytes=log_file_max_bytes,
)
@@ -210,7 +200,7 @@ def stop_capture_stdout_logs():
# the message may contain secrets which must be scrubbed at the usage site.
def msg_to_json(msg: EventMsg) -> str:
msg_dict = msg_to_dict(msg)
raw_log_line = json.dumps(msg_dict, sort_keys=True, cls=dbt.utils.ForgivingJSONEncoder)
raw_log_line = json.dumps(msg_dict, sort_keys=True)
return raw_log_line

View File

@@ -63,12 +63,3 @@
{{ exceptions.raise_not_implemented(
'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}
{% endmacro %}
{% macro get_relations() %}
{{ return(adapter.dispatch('get_relations', 'dbt')()) }}
{% endmacro %}
{% macro default__get_relations() %}
{{ exceptions.raise_not_implemented(
'get_relations macro not implemented for adapter '+adapter.type()) }}
{% endmacro %}

View File

@@ -19,7 +19,7 @@
{% set day_count = (end_date - start_date).days %}
{% if day_count < 0 %}
{% set msg -%}
Partition start date is after the end date ({{ start_date }}, {{ end_date }})
Partiton start date is after the end date ({{ start_date }}, {{ end_date }})
{%- endset %}
{{ exceptions.raise_compiler_error(msg, model) }}

View File

@@ -33,12 +33,7 @@
-- cleanup
{% if existing_relation is not none %}
/* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped
since the variable was first set. */
{% set existing_relation = load_cached_relation(existing_relation) %}
{% if existing_relation is not none %}
{{ adapter.rename_relation(existing_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(existing_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(intermediate_relation, target_relation) }}

View File

@@ -45,12 +45,7 @@
-- cleanup
-- move the existing view out of the way
{% if existing_relation is not none %}
/* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped
since the variable was first set. */
{% set existing_relation = load_cached_relation(existing_relation) %}
{% if existing_relation is not none %}
{{ adapter.rename_relation(existing_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(existing_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(intermediate_relation, target_relation) }}

View File

@@ -81,7 +81,7 @@ class MacroParser(BaseParser[Macro]):
name: str = macro.name.replace(MACRO_PREFIX, "")
node = self.parse_macro(block, base_node, name)
# get supported_languages for materialization macro
if block.block_type_name == "materialization":
if "materialization" in name:
node.supported_languages = jinja.get_supported_languages(macro)
yield node

View File

@@ -497,10 +497,12 @@ class ModelParser(SimpleSQLParser[ModelNode]):
# set refs and sources on the node object
refs: List[RefArgs] = []
for ref in statically_parsed["refs"]:
name = ref.get("name")
package = ref.get("package")
version = ref.get("version")
refs.append(RefArgs(name, package, version))
if len(ref) == 1:
package, name = None, ref[0]
else:
package, name = ref
refs.append(RefArgs(package=package, name=name))
node.refs += refs
node.sources += statically_parsed["sources"]

View File

@@ -29,11 +29,8 @@ class dbtPlugin:
self.project_name = project_name
try:
self.initialize()
except DbtRuntimeError as e:
# Remove the first line of DbtRuntimeError to avoid redundant "Runtime Error" line
raise DbtRuntimeError("\n".join(str(e).split("\n")[1:]))
except Exception as e:
raise DbtRuntimeError(str(e))
raise DbtRuntimeError(f"initialize: {e}")
@property
def name(self) -> str:

View File

@@ -139,7 +139,6 @@ class CompileTask(GraphRunnableTask):
"node_path": "sql/inline_query",
"node_name": "inline_query",
"unique_id": "sqloperation.test.inline_query",
"node_status": "failed",
},
)
)

View File

@@ -502,7 +502,6 @@ def project(
DEBUG=False,
LOG_CACHE_EVENTS=False,
QUIET=False,
LOG_FILE_MAX_BYTES=1000000,
)
setup_event_logger(log_flags)
orig_cwd = os.getcwd()

View File

@@ -16,8 +16,9 @@ import time
from pathlib import PosixPath, WindowsPath
from contextlib import contextmanager
from dbt.exceptions import ConnectionError, DuplicateAliasError
from dbt.events.functions import fire_event
from dbt.events.types import RetryExternalCall, RecordRetryException
from dbt.helper_types import WarnErrorOptions
from dbt import flags
from enum import Enum
from typing_extensions import Protocol
@@ -39,7 +40,6 @@ from typing import (
Sequence,
)
import dbt.events.functions
import dbt.exceptions
DECIMALS: Tuple[Type[Any], ...]
@@ -337,18 +337,15 @@ class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, DECIMALS):
return float(obj)
elif isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return obj.isoformat()
elif isinstance(obj, jinja2.Undefined):
if isinstance(obj, jinja2.Undefined):
return ""
elif isinstance(obj, Exception):
return repr(obj)
elif hasattr(obj, "to_dict"):
if hasattr(obj, "to_dict"):
# if we have a to_dict we should try to serialize the result of
# that!
return obj.to_dict(omit_none=True)
else:
return super().default(obj)
return super().default(obj)
class ForgivingJSONEncoder(JSONEncoder):
@@ -372,7 +369,7 @@ class Translator:
for key, value in kwargs.items():
canonical_key = self.aliases.get(key, key)
if canonical_key in result:
raise dbt.exceptions.DuplicateAliasError(kwargs, self.aliases, canonical_key)
raise DuplicateAliasError(kwargs, self.aliases, canonical_key)
result[canonical_key] = self.translate_value(value)
return result
@@ -392,7 +389,9 @@ class Translator:
return self.translate_mapping(value)
except RuntimeError as exc:
if "maximum recursion depth exceeded" in str(exc):
raise RecursionError("Cycle detected in a value passed to translate!")
raise dbt.exceptions.RecursionError(
"Cycle detected in a value passed to translate!"
)
raise
@@ -602,17 +601,14 @@ def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0):
except (
requests.exceptions.RequestException,
ReadError,
EOFError,
) as exc:
if attempt <= max_attempts - 1:
dbt.events.functions.fire_event(RecordRetryException(exc=str(exc)))
dbt.events.functions.fire_event(RetryExternalCall(attempt=attempt, max=max_attempts))
fire_event(RecordRetryException(exc=str(exc)))
fire_event(RetryExternalCall(attempt=attempt, max=max_attempts))
time.sleep(1)
return _connection_exception_retry(fn, max_attempts, attempt + 1)
else:
raise dbt.exceptions.ConnectionError(
"External connection exception occurred: " + str(exc)
)
raise ConnectionError("External connection exception occurred: " + str(exc))
# This is used to serialize the args in the run_results and in the logs.
@@ -656,9 +652,6 @@ def args_to_dict(args):
# this was required for a test case
if isinstance(var_args[key], PosixPath) or isinstance(var_args[key], WindowsPath):
var_args[key] = str(var_args[key])
if isinstance(var_args[key], WarnErrorOptions):
var_args[key] = var_args[key].to_dict()
dict_args[key] = var_args[key]
return dict_args

View File

@@ -73,7 +73,7 @@ setup(
"sqlparse>=0.2.3",
# ----
# These are major-version-0 packages also maintained by dbt-labs. Accept patches.
"dbt-extractor~=0.5.0",
"dbt-extractor~=0.4.1",
"hologram~=0.0.16", # includes transitive dependencies on python-dateutil and jsonschema
"minimal-snowplow-tracker~=0.0.2",
# DSI is under active development, so we're pinning to specific dev versions for now.

View File

@@ -6,7 +6,7 @@ flake8
flaky
freezegun==0.3.12
ipdb
mypy==1.4.0
mypy==1.3.0
pip-tools
pre-commit
protobuf>=4.0.0
@@ -16,9 +16,7 @@ pytest-csv
pytest-dotenv
pytest-logbook
pytest-mock
pytest-split
pytest-xdist
python-dev-tools
sphinx
tox>=3.13
twine

View File

@@ -20,7 +20,8 @@ from dbt.exceptions import (
import dbt.utils
GET_RELATIONS_MACRO_NAME = "postgres__get_relations"
# note that this isn't an adapter macro, so just a single underscore
GET_RELATIONS_MACRO_NAME = "postgres_get_relations"
@dataclass

View File

@@ -1,4 +1,4 @@
{% macro postgres__get_relations() -%}
{% macro postgres_get_relations () -%}
{#
-- in pg_depend, objid is the dependent, refobjid is the referenced object
@@ -74,7 +74,3 @@
{{ return(load_result('relations').table) }}
{% endmacro %}
{% macro postgres_get_relations() %}
{{ return(postgres__get_relations()) }}
{% endmacro %}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -4,12 +4,6 @@ models__dep_macro = """
}}
"""
models__materialization_macro = """
{{
materialization_macro()
}}
"""
models__with_undefined_macro = """
{{ dispatch_to_nowhere() }}
select 1 as id
@@ -81,12 +75,6 @@ macros__my_macros = """
{% endmacro %}
"""
macros__named_materialization = """
{% macro materialization_macro() %}
select 1 as foo
{% endmacro %}
"""
macros__no_default_macros = """
{% macro do_something2(foo2, bar2) %}

View File

@@ -20,14 +20,12 @@ from tests.functional.macros.fixtures import (
models__override_get_columns_macros,
models__deprecated_adapter_macro_model,
models__incorrect_dispatch,
models__materialization_macro,
macros__my_macros,
macros__no_default_macros,
macros__override_get_columns_macros,
macros__package_override_get_columns_macros,
macros__deprecated_adapter_macro,
macros__incorrect_dispatch,
macros__named_materialization,
)
@@ -80,21 +78,6 @@ class TestMacros:
check_relations_equal(project.adapter, ["expected_local_macro", "local_macro"])
class TestMacrosNamedMaterialization:
@pytest.fixture(scope="class")
def models(self):
return {
"models_materialization_macro.sql": models__materialization_macro,
}
@pytest.fixture(scope="class")
def macros(self):
return {"macros_named_materialization.sql": macros__named_materialization}
def test_macro_with_materialization_in_name_works(self, project):
run_dbt(expect_pass=True)
class TestInvalidMacros:
@pytest.fixture(scope="class")
def models(self):

View File

@@ -8,6 +8,9 @@ from tests.functional.partial_parsing.fixtures import (
models_schema1_yml,
models_schema2_yml,
models_schema2b_yml,
models_versions_schema_yml,
models_versions_defined_in_schema_yml,
models_versions_updated_schema_yml,
model_three_sql,
model_three_modified_sql,
model_four1_sql,
@@ -68,7 +71,7 @@ from tests.functional.partial_parsing.fixtures import (
groups_schema_yml_two_groups_private_orders_invalid_access,
)
from dbt.exceptions import CompilationError, ParsingError
from dbt.exceptions import CompilationError, ParsingError, DuplicateVersionedUnversionedError
from dbt.contracts.files import ParseFileType
from dbt.contracts.results import TestStatus
@@ -300,6 +303,72 @@ class TestModels:
assert model_id not in manifest.disabled
class TestVersionedModels:
@pytest.fixture(scope="class")
def models(self):
return {
"model_one_v1.sql": model_one_sql,
"model_one.sql": model_one_sql,
"model_one_downstream.sql": model_four2_sql,
"schema.yml": models_versions_schema_yml,
}
def test_pp_versioned_models(self, project):
results = run_dbt(["run"])
assert len(results) == 3
manifest = get_manifest(project.project_root)
model_one_node = manifest.nodes["model.test.model_one.v1"]
assert not model_one_node.is_latest_version
model_two_node = manifest.nodes["model.test.model_one.v2"]
assert model_two_node.is_latest_version
# assert unpinned ref points to latest version
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v2"]
# update schema.yml block - model_one is now 'defined_in: model_one_different'
rm_file(project.project_root, "models", "model_one.sql")
write_file(model_one_sql, project.project_root, "models", "model_one_different.sql")
write_file(
models_versions_defined_in_schema_yml, project.project_root, "models", "schema.yml"
)
results = run_dbt(["--partial-parse", "run"])
assert len(results) == 3
# update versions schema.yml block - latest_version from 2 to 1
write_file(
models_versions_updated_schema_yml, project.project_root, "models", "schema.yml"
)
results, log_output = run_dbt_and_capture(
["--partial-parse", "--log-format", "json", "run"]
)
assert len(results) == 3
manifest = get_manifest(project.project_root)
model_one_node = manifest.nodes["model.test.model_one.v1"]
assert model_one_node.is_latest_version
model_two_node = manifest.nodes["model.test.model_one.v2"]
assert not model_two_node.is_latest_version
# assert unpinned ref points to latest version
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v1"]
# assert unpinned ref to latest-not-max version yields an "FYI" info-level log
assert "UnpinnedRefNewVersionAvailable" in log_output
# update versioned model
write_file(model_two_sql, project.project_root, "models", "model_one_different.sql")
results = run_dbt(["--partial-parse", "run"])
assert len(results) == 3
manifest = get_manifest(project.project_root)
assert len(manifest.nodes) == 3
print(f"--- nodes: {manifest.nodes.keys()}")
# create a new model_one in model_one.sql and re-parse
write_file(model_one_sql, project.project_root, "models", "model_one.sql")
with pytest.raises(DuplicateVersionedUnversionedError):
run_dbt(["parse"])
class TestSources:
@pytest.fixture(scope="class")
def models(self):

View File

@@ -1,126 +0,0 @@
import pytest
import pathlib
from dbt.tests.util import (
run_dbt,
get_manifest,
write_file,
rm_file,
read_file,
)
from dbt.exceptions import DuplicateVersionedUnversionedError
model_one_sql = """
select 1 as fun
"""
model_one_downstream_sql = """
select fun from {{ ref('model_one') }}
"""
models_versions_schema_yml = """
models:
- name: model_one
description: "The first model"
versions:
- v: 1
- v: 2
"""
models_versions_defined_in_schema_yml = """
models:
- name: model_one
description: "The first model"
versions:
- v: 1
- v: 2
defined_in: model_one_different
"""
models_versions_updated_schema_yml = """
models:
- name: model_one
latest_version: 1
description: "The first model"
versions:
- v: 1
- v: 2
defined_in: model_one_different
"""
model_two_sql = """
select 1 as notfun
"""
class TestVersionedModels:
@pytest.fixture(scope="class")
def models(self):
return {
"model_one_v1.sql": model_one_sql,
"model_one.sql": model_one_sql,
"model_one_downstream.sql": model_one_downstream_sql,
"schema.yml": models_versions_schema_yml,
}
def test_pp_versioned_models(self, project):
results = run_dbt(["run"])
assert len(results) == 3
manifest = get_manifest(project.project_root)
model_one_node = manifest.nodes["model.test.model_one.v1"]
assert not model_one_node.is_latest_version
model_two_node = manifest.nodes["model.test.model_one.v2"]
assert model_two_node.is_latest_version
# assert unpinned ref points to latest version
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v2"]
# update schema.yml block - model_one is now 'defined_in: model_one_different'
rm_file(project.project_root, "models", "model_one.sql")
write_file(model_one_sql, project.project_root, "models", "model_one_different.sql")
write_file(
models_versions_defined_in_schema_yml, project.project_root, "models", "schema.yml"
)
results = run_dbt(["--partial-parse", "run"])
assert len(results) == 3
# update versions schema.yml block - latest_version from 2 to 1
write_file(
models_versions_updated_schema_yml, project.project_root, "models", "schema.yml"
)
# This is where the test was failings in a CI run with:
# relation \"test..._test_partial_parsing.model_one_downstream\" does not exist
# because in core/dbt/include/global_project/macros/materializations/models/view/view.sql
# "existing_relation" didn't actually exist by the time it gets to the rename of the
# existing relation.
(pathlib.Path(project.project_root) / "log_output").mkdir(parents=True, exist_ok=True)
results = run_dbt(
["--partial-parse", "--log-format-file", "json", "--log-path", "log_output", "run"]
)
assert len(results) == 3
manifest = get_manifest(project.project_root)
model_one_node = manifest.nodes["model.test.model_one.v1"]
assert model_one_node.is_latest_version
model_two_node = manifest.nodes["model.test.model_one.v2"]
assert not model_two_node.is_latest_version
# assert unpinned ref points to latest version
model_one_downstream_node = manifest.nodes["model.test.model_one_downstream"]
assert model_one_downstream_node.depends_on.nodes == ["model.test.model_one.v1"]
# assert unpinned ref to latest-not-max version yields an "FYI" info-level log
log_output = read_file("log_output", "dbt.log").replace("\n", " ").replace("\\n", " ")
assert "UnpinnedRefNewVersionAvailable" in log_output
# update versioned model
write_file(model_two_sql, project.project_root, "models", "model_one_different.sql")
results = run_dbt(["--partial-parse", "run"])
assert len(results) == 3
manifest = get_manifest(project.project_root)
assert len(manifest.nodes) == 3
# create a new model_one in model_one.sql and re-parse
write_file(model_one_sql, project.project_root, "models", "model_one.sql")
with pytest.raises(DuplicateVersionedUnversionedError):
run_dbt(["parse"])

View File

@@ -57,11 +57,6 @@ class TestFlags:
assert hasattr(flags, "LOG_PATH")
assert getattr(flags, "LOG_PATH") == Path("logs")
def test_log_file_max_size_default(self, run_context):
flags = Flags(run_context)
assert hasattr(flags, "LOG_FILE_MAX_BYTES")
assert getattr(flags, "LOG_FILE_MAX_BYTES") == 10 * 1024 * 1024
@pytest.mark.parametrize(
"set_stats_param,do_not_track,expected_anonymous_usage_stats",
[

View File

@@ -424,9 +424,6 @@ def test_invocation_args_to_dict_in_macro_runtime_context(
# Comes from unit/utils.py config_from_parts_or_dicts method
assert ctx["invocation_args_dict"]["profile_dir"] == "/dev/null"
assert isinstance(ctx["invocation_args_dict"]["warn_error_options"], Dict)
assert ctx["invocation_args_dict"]["warn_error_options"] == {"include": [], "exclude": []}
def test_model_parse_context(config_postgres, manifest_fx, get_adapter, get_include_paths):
ctx = providers.generate_parser_model_context(

View File

@@ -28,11 +28,6 @@ class TestCoreDbtUtils(unittest.TestCase):
connection_exception_retry(lambda: Counter._add_with_untar_exception(), 5)
self.assertEqual(2, counter) # 2 = original attempt returned ReadError, plus 1 retry
def test_connection_exception_retry_success_failed_eofexception(self):
Counter._reset()
connection_exception_retry(lambda: Counter._add_with_eof_exception(), 5)
self.assertEqual(2, counter) # 2 = original attempt returned EOFError, plus 1 retry
counter: int = 0
@@ -62,12 +57,6 @@ class Counter:
if counter < 2:
raise tarfile.ReadError
def _add_with_eof_exception():
global counter
counter += 1
if counter < 2:
raise EOFError
def _reset():
global counter
counter = 0

View File

@@ -2,7 +2,7 @@ from argparse import Namespace
import pytest
import dbt.flags as flags
from dbt.events.functions import msg_to_dict, warn_or_error, setup_event_logger
from dbt.events.functions import msg_to_dict, warn_or_error
from dbt.events.types import InfoLevel, NoNodesForSelectionCriteria
from dbt.exceptions import EventCompilationError
@@ -59,13 +59,3 @@ def test_msg_to_dict_handles_exceptions_gracefully():
assert (
False
), f"We expect `msg_to_dict` to gracefully handle exceptions, but it raised {exc}"
def test_setup_event_logger_specify_max_bytes(mocker):
patched_file_handler = mocker.patch("dbt.events.eventmgr.RotatingFileHandler")
args = Namespace(log_file_max_bytes=1234567)
flags.set_from_args(args, {})
setup_event_logger(flags.get_flags())
patched_file_handler.assert_called_once_with(
filename="logs/dbt.log", encoding="utf8", maxBytes=1234567, backupCount=5
)

View File

@@ -18,7 +18,6 @@ from dbt import tracking
from dbt.contracts.files import SourceFile, FileHash, FilePath
from dbt.contracts.graph.manifest import MacroManifest, ManifestStateCheck
from dbt.graph import NodeSelector, parse_difference
from dbt.events.functions import setup_event_logger
try:
from queue import Empty
@@ -141,7 +140,6 @@ class GraphTest(unittest.TestCase):
config = config_from_parts_or_dicts(project=cfg, profile=self.profile)
dbt.flags.set_from_args(Namespace(), config)
setup_event_logger(dbt.flags.get_flags())
object.__setattr__(dbt.flags.get_flags(), "PARTIAL_PARSE", False)
return config

View File

@@ -1,20 +1,7 @@
import pytest
from dbt.exceptions import DbtRuntimeError
from dbt.plugins import PluginManager, dbtPlugin, dbt_hook
from dbt.plugins.manifest import PluginNodes, ModelNodeArgs
from dbt.plugins.contracts import PluginArtifacts, PluginArtifact
from dbt.plugins.exceptions import dbtPluginError
class ExceptionInitializePlugin(dbtPlugin):
def initialize(self) -> None:
raise Exception("plugin error message")
class dbtRuntimeErrorInitializePlugin(dbtPlugin):
def initialize(self) -> None:
raise dbtPluginError("plugin error message")
class GetNodesPlugin(dbtPlugin):
@@ -55,14 +42,6 @@ class TestPluginManager:
def get_artifacts_plugins(self, get_artifacts_plugin):
return [get_artifacts_plugin, GetArtifactsPlugin(project_name="test2")]
def test_plugin_manager_init_exception(self):
with pytest.raises(DbtRuntimeError, match="plugin error message"):
PluginManager(plugins=[ExceptionInitializePlugin(project_name="test")])
def test_plugin_manager_init_plugin_exception(self):
with pytest.raises(DbtRuntimeError, match="^Runtime Error\n plugin error message"):
PluginManager(plugins=[dbtRuntimeErrorInitializePlugin(project_name="test")])
def test_plugin_manager_init_single_hook(self, get_nodes_plugin):
pm = PluginManager(plugins=[get_nodes_plugin])
assert len(pm.hooks) == 1