Compare commits

...

46 Commits

Author SHA1 Message Date
Gerda Shank
a0e32c8722 Merge branch 'main' into mashumaro_fixes 2025-01-09 13:54:41 -05:00
Gerda Shank
5f94de572c Put back Union order changes, use dbtClassMixin in test_serialization.py 2024-12-19 13:41:29 -05:00
Gerda Shank
d1274b1655 Merge branch 'main' into mashumaro_fixes 2024-12-19 13:26:32 -05:00
Gerda Shank
715ad2ff1a dev-requirements 2024-12-19 13:19:15 -05:00
Gerda Shank
4585bd032b formatting 2024-12-16 15:55:28 -05:00
Gerda Shank
ebc2359f1e Remove requirements for 3.15 2024-12-12 10:16:22 -05:00
Gerda Shank
e91914490a Merge branch 'main' into mashumaro_fixes 2024-12-12 10:00:49 -05:00
Gerda Shank
0fb57ee7f0 Update return type of generate_node_config 2024-12-11 18:02:00 -05:00
Gerda Shank
f0117892b9 Remove base parameter from get_config_for 2024-12-11 17:23:42 -05:00
Gerda Shank
c1370412d0 Merge branch 'main' into mashumaro_fixes 2024-12-10 12:41:35 -05:00
Gerda Shank
1a30f648bd Update test_serialization to simplify for mashumaro, add test_graph_serialization.py 2024-12-10 11:05:28 -05:00
Gerda Shank
923b8b6341 add test for serialization of ExternalPartitions in ExternalTable 2024-12-09 12:15:52 -05:00
Gerda Shank
31ce086116 More comments 2024-12-04 11:38:40 -05:00
Gerda Shank
e6fc9b0c90 Remove unnecessary mangle_hooks, other comments and cleanup 2024-12-03 17:58:05 -05:00
Gerda Shank
049418e436 More cleanup 2024-12-03 17:33:11 -05:00
Gerda Shank
c0fd389341 rename model_configs resource_configs 2024-12-03 17:10:34 -05:00
Gerda Shank
c16cbe05e7 Rename ContextConfig ConfigBuilder 2024-12-03 15:47:38 -05:00
Gerda Shank
3f7ee0ed1e Remove unnecessary ConfigSource 2024-12-03 14:53:09 -05:00
Gerda Shank
13105cd930 fix expected_manifest version: "2" 2024-12-03 14:12:08 -05:00
Gerda Shank
1c709d2c19 Remove skips from tests 2024-12-03 14:04:42 -05:00
Gerda Shank
7d78b475ef Bump mashumaro again 2024-12-03 13:45:20 -05:00
Gerda Shank
1afb2bb04f fix hooks before validation 2024-12-03 12:35:32 -05:00
Gerda Shank
bbfc6e6887 rename config to context_config 2024-12-03 11:26:19 -05:00
Gerda Shank
54adabe2c7 Fix a few tests, remove unnecessary finalize_and_validate calls 2024-12-02 22:11:36 -05:00
Gerda Shank
b4f72e403b Use dbt-common branch 2024-12-02 20:58:43 -05:00
Gerda Shank
4554eb36a6 comment 2024-12-02 17:41:05 -05:00
Gerda Shank
d5616de14b passes unit tests 2024-12-02 17:14:35 -05:00
Gerda Shank
86791e0ff5 Remove unnecessary "base" parameter 2024-12-02 12:51:38 -05:00
Gerda Shank
5b4af1741e Changie 2024-12-02 11:54:50 -05:00
Gerda Shank
25899ca695 Change order of external.partitions schema elements 2024-12-02 11:47:16 -05:00
Gerda Shank
9c6883b707 Change order of Union in source definitions partitions 2024-12-02 11:45:38 -05:00
Gerda Shank
42a1afb0d9 Make specialized 'calculate_node_config' methods 2024-11-30 14:34:02 -05:00
Gerda Shank
df23c7d982 Skip a couple of tests until mash 3.15 2024-11-30 13:49:51 -05:00
Gerda Shank
524ce54d82 Fix NodeVersion in artifacts test 2024-11-26 22:25:46 -05:00
Gerda Shank
9df05a87b5 Put back default for validate in _update_from_config 2024-11-26 21:52:36 -05:00
Gerda Shank
baf42c4a0a Again 2024-11-26 21:29:07 -05:00
Gerda Shank
9d6daa1884 Use older mashumaro 2024-11-26 21:04:14 -05:00
Gerda Shank
e47bed34fa Put back test_disabled_model.py 2024-11-26 21:03:20 -05:00
Gerda Shank
b81632e0ae Comments 2024-11-26 21:01:29 -05:00
Gerda Shank
dfca4e572a error classes 2024-11-26 13:41:39 -05:00
Gerda Shank
c0ea26e143 validate in "update_from" 2024-11-26 13:34:24 -05:00
Gerda Shank
7876edb7dc Update v12.json schema 2024-11-25 21:57:49 -05:00
Gerda Shank
354af6e7ff specify dbt-adapters branch 2024-11-25 21:52:30 -05:00
Gerda Shank
52fa7c0956 Try pinning to >=3.15,<4.0 2024-11-25 21:41:41 -05:00
Gerda Shank
37be156f42 Bump up mashumaro version again 2024-11-25 18:03:14 -05:00
Gerda Shank
261e1245a8 Fix NodeVersion definition, make_semantic_model utility 2024-11-25 18:01:24 -05:00
27 changed files with 616 additions and 436 deletions

View File

@@ -0,0 +1,6 @@
kind: Under the Hood
body: Support upgrading mashumaro to 3.15
time: 2024-12-02T11:54:45.103325-05:00
custom:
Author: gshank
Issue: "11044"

View File

@@ -12,7 +12,7 @@ from dbt_common.contracts.util import Mergeable
from dbt_common.dataclass_schema import ExtensibleDbtClassMixin, dbtClassMixin from dbt_common.dataclass_schema import ExtensibleDbtClassMixin, dbtClassMixin
from dbt_semantic_interfaces.type_enums import TimeGranularity from dbt_semantic_interfaces.type_enums import TimeGranularity
NodeVersion = Union[str, float] NodeVersion = Union[int, float, str]
@dataclass @dataclass

View File

@@ -71,12 +71,6 @@ class SnapshotConfig(NodeConfig):
if self.materialized and self.materialized != "snapshot": if self.materialized and self.materialized != "snapshot":
raise ValidationError("A snapshot must have a materialized value of 'snapshot'") raise ValidationError("A snapshot must have a materialized value of 'snapshot'")
# Called by "calculate_node_config_dict" in ContextConfigGenerator
def finalize_and_validate(self):
data = self.to_dict(omit_none=True)
self.validate(data)
return self.from_dict(data)
@dataclass @dataclass
class Snapshot(CompiledResource): class Snapshot(CompiledResource):

View File

@@ -40,7 +40,7 @@ class ExternalTable(AdditionalPropertiesAllowed, Mergeable):
file_format: Optional[str] = None file_format: Optional[str] = None
row_format: Optional[str] = None row_format: Optional[str] = None
tbl_properties: Optional[str] = None tbl_properties: Optional[str] = None
partitions: Optional[Union[List[str], List[ExternalPartition]]] = None partitions: Optional[Union[List[ExternalPartition], List[str]]] = None
def __bool__(self): def __bool__(self):
return self.location is not None return self.location is not None

View File

@@ -1,8 +1,9 @@
from abc import abstractmethod from abc import abstractmethod
from copy import deepcopy from copy import deepcopy
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Dict, Generic, Iterator, List, Optional, TypeVar from typing import Any, Dict, Generic, Iterator, List, Optional, Type, TypeVar
from dbt import hooks
from dbt.adapters.factory import get_config_class_by_name from dbt.adapters.factory import get_config_class_by_name
from dbt.config import IsFQNResource, Project, RuntimeConfig from dbt.config import IsFQNResource, Project, RuntimeConfig
from dbt.contracts.graph.model_config import get_config_for from dbt.contracts.graph.model_config import get_config_for
@@ -26,81 +27,21 @@ T = TypeVar("T") # any old type
C = TypeVar("C", bound=BaseConfig) C = TypeVar("C", bound=BaseConfig)
class ConfigSource: def fix_hooks(config_dict: Dict[str, Any]):
def __init__(self, project): """Given a config dict that may have `pre-hook`/`post-hook` keys,
self.project = project convert it from the yucky maybe-a-string, maybe-a-dict to a dict.
"""
def get_config_dict(self, resource_type: NodeType): ... # Like most of parsing, this is a horrible hack :(
for key in hooks.ModelHookType:
if key in config_dict:
config_dict[key] = [hooks.get_hook_dict(h) for h in config_dict[key]]
class UnrenderedConfig(ConfigSource): class BaseConfigGenerator(Generic[T]):
def __init__(self, project: Project):
self.project = project
def get_config_dict(self, resource_type: NodeType) -> Dict[str, Any]:
unrendered = self.project.unrendered.project_dict
if resource_type == NodeType.Seed:
model_configs = unrendered.get("seeds")
elif resource_type == NodeType.Snapshot:
model_configs = unrendered.get("snapshots")
elif resource_type == NodeType.Source:
model_configs = unrendered.get("sources")
elif resource_type == NodeType.Test:
model_configs = unrendered.get("data_tests")
elif resource_type == NodeType.Metric:
model_configs = unrendered.get("metrics")
elif resource_type == NodeType.SemanticModel:
model_configs = unrendered.get("semantic_models")
elif resource_type == NodeType.SavedQuery:
model_configs = unrendered.get("saved_queries")
elif resource_type == NodeType.Exposure:
model_configs = unrendered.get("exposures")
elif resource_type == NodeType.Unit:
model_configs = unrendered.get("unit_tests")
else:
model_configs = unrendered.get("models")
if model_configs is None:
return {}
else:
return model_configs
class RenderedConfig(ConfigSource):
def __init__(self, project: Project):
self.project = project
def get_config_dict(self, resource_type: NodeType) -> Dict[str, Any]:
if resource_type == NodeType.Seed:
model_configs = self.project.seeds
elif resource_type == NodeType.Snapshot:
model_configs = self.project.snapshots
elif resource_type == NodeType.Source:
model_configs = self.project.sources
elif resource_type == NodeType.Test:
model_configs = self.project.data_tests
elif resource_type == NodeType.Metric:
model_configs = self.project.metrics
elif resource_type == NodeType.SemanticModel:
model_configs = self.project.semantic_models
elif resource_type == NodeType.SavedQuery:
model_configs = self.project.saved_queries
elif resource_type == NodeType.Exposure:
model_configs = self.project.exposures
elif resource_type == NodeType.Unit:
model_configs = self.project.unit_tests
else:
model_configs = self.project.models
return model_configs
class BaseContextConfigGenerator(Generic[T]):
def __init__(self, active_project: RuntimeConfig): def __init__(self, active_project: RuntimeConfig):
self._active_project = active_project self._active_project = active_project
def get_config_source(self, project: Project) -> ConfigSource: def get_node_project_config(self, project_name: str):
return RenderedConfig(project)
def get_node_project(self, project_name: str):
if project_name == self._active_project.project_name: if project_name == self._active_project.project_name:
return self._active_project return self._active_project
dependencies = self._active_project.load_dependencies() dependencies = self._active_project.load_dependencies()
@@ -114,9 +55,8 @@ class BaseContextConfigGenerator(Generic[T]):
def _project_configs( def _project_configs(
self, project: Project, fqn: List[str], resource_type: NodeType self, project: Project, fqn: List[str], resource_type: NodeType
) -> Iterator[Dict[str, Any]]: ) -> Iterator[Dict[str, Any]]:
src = self.get_config_source(project) resource_configs = self.get_resource_configs(project, resource_type)
model_configs = src.get_config_dict(resource_type) for level_config in fqn_search(resource_configs, fqn):
for level_config in fqn_search(model_configs, fqn):
result = {} result = {}
for key, value in level_config.items(): for key, value in level_config.items():
if key.startswith("+"): if key.startswith("+"):
@@ -131,85 +71,112 @@ class BaseContextConfigGenerator(Generic[T]):
) -> Iterator[Dict[str, Any]]: ) -> Iterator[Dict[str, Any]]:
return self._project_configs(self._active_project, fqn, resource_type) return self._project_configs(self._active_project, fqn, resource_type)
@abstractmethod def combine_config_dicts(
def _update_from_config(
self, result: T, partial: Dict[str, Any], validate: bool = False
) -> T: ...
@abstractmethod
def initial_result(self, resource_type: NodeType, base: bool) -> T: ...
def calculate_node_config(
self, self,
config_call_dict: Dict[str, Any], config_call_dict: Dict[str, Any],
fqn: List[str], fqn: List[str],
resource_type: NodeType, resource_type: NodeType,
project_name: str, project_name: str,
base: bool,
patch_config_dict: Optional[Dict[str, Any]] = None, patch_config_dict: Optional[Dict[str, Any]] = None,
) -> BaseConfig: ) -> Dict[str, Any]:
own_config = self.get_node_project(project_name) """This method takes resource configs from the project, the model (if applicable),
and the patch, and combines them into one config dictionary."""
result = self.initial_result(resource_type=resource_type, base=base) project_config = self.get_node_project_config(project_name)
config_cls = get_config_for(resource_type)
project_configs = self._project_configs(own_config, fqn, resource_type) # creates "default" config object. Unrendered config starts with
# empty dictionary, rendered config starts with to_dict() from empty config object.
config_dict = self.initial_result(config_cls)
# Update with project configs
project_configs = self._project_configs(project_config, fqn, resource_type)
for fqn_config in project_configs: for fqn_config in project_configs:
result = self._update_from_config(result, fqn_config) config_dict = self._update_from_config(config_cls, config_dict, fqn_config)
# When schema files patch config, it has lower precedence than # Update with schema file configs (patches)
# config in the models (config_call_dict), so we add the patch_config_dict
# before the config_call_dict
if patch_config_dict: if patch_config_dict:
result = self._update_from_config(result, patch_config_dict) config_dict = self._update_from_config(config_cls, config_dict, patch_config_dict)
# config_calls are created in the 'experimental' model parser and # Update with config dictionary from sql files (config_call_dict)
# the ParseConfigObject (via add_config_call) config_dict = self._update_from_config(config_cls, config_dict, config_call_dict)
result = self._update_from_config(result, config_call_dict)
if own_config.project_name != self._active_project.project_name: # If this is not the root project, update with configs from root project
if project_config.project_name != self._active_project.project_name:
for fqn_config in self._active_project_configs(fqn, resource_type): for fqn_config in self._active_project_configs(fqn, resource_type):
result = self._update_from_config(result, fqn_config) config_dict = self._update_from_config(config_cls, config_dict, fqn_config)
# this is mostly impactful in the snapshot config case return config_dict
# TODO CT-211
return result # type: ignore[return-value]
@abstractmethod @abstractmethod
def calculate_node_config_dict( def get_resource_configs(
self, self, project: Project, resource_type: NodeType
config_call_dict: Dict[str, Any],
fqn: List[str],
resource_type: NodeType,
project_name: str,
base: bool,
patch_config_dict: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]: ... ) -> Dict[str, Any]: ...
@abstractmethod
def _update_from_config(
self, config_cls: Type[BaseConfig], result_dict: Dict[str, Any], partial: Dict[str, Any]
) -> Dict[str, Any]: ...
@abstractmethod
def initial_result(self, config_cls: Type[BaseConfig]) -> Dict[str, Any]: ...
@abstractmethod
def generate_node_config(
self,
config_call_dict: Dict[str, Any],
fqn: List[str],
resource_type: NodeType,
project_name: str,
patch_config_dict: Optional[Dict[str, Any]] = None,
): ...
class RenderedConfigGenerator(BaseConfigGenerator[C]):
"""This class produces the config dictionary used to create the resource config."""
class ContextConfigGenerator(BaseContextConfigGenerator[C]):
def __init__(self, active_project: RuntimeConfig): def __init__(self, active_project: RuntimeConfig):
self._active_project = active_project self._active_project = active_project
def get_config_source(self, project: Project) -> ConfigSource: def get_resource_configs(self, project: Project, resource_type: NodeType) -> Dict[str, Any]:
return RenderedConfig(project) if resource_type == NodeType.Seed:
resource_configs = project.seeds
elif resource_type == NodeType.Snapshot:
resource_configs = project.snapshots
elif resource_type == NodeType.Source:
resource_configs = project.sources
elif resource_type == NodeType.Test:
resource_configs = project.data_tests
elif resource_type == NodeType.Metric:
resource_configs = project.metrics
elif resource_type == NodeType.SemanticModel:
resource_configs = project.semantic_models
elif resource_type == NodeType.SavedQuery:
resource_configs = project.saved_queries
elif resource_type == NodeType.Exposure:
resource_configs = project.exposures
elif resource_type == NodeType.Unit:
resource_configs = project.unit_tests
else:
resource_configs = project.models
return resource_configs
def initial_result(self, resource_type: NodeType, base: bool) -> C: def initial_result(self, config_cls: Type[BaseConfig]) -> Dict[str, Any]:
# defaults, own_config, config calls, active_config (if != own_config) # Produce a dictionary with config defaults.
config_cls = get_config_for(resource_type, base=base) result = config_cls.from_dict({}).to_dict()
# Calculate the defaults. We don't want to validate the defaults,
# because it might be invalid in the case of required config members
# (such as on snapshots!)
result = config_cls.from_dict({})
return result return result
def _update_from_config(self, result: C, partial: Dict[str, Any], validate: bool = False) -> C: def _update_from_config(
self, config_cls: Type[BaseConfig], result_dict: Dict[str, Any], partial: Dict[str, Any]
) -> Dict[str, Any]:
translated = self._active_project.credentials.translate_aliases(partial) translated = self._active_project.credentials.translate_aliases(partial)
translated = self.translate_hook_names(translated) translated = self.translate_hook_names(translated)
adapter_type = self._active_project.credentials.type adapter_type = self._active_project.credentials.type
adapter_config_cls = get_config_class_by_name(adapter_type) adapter_config_cls = get_config_class_by_name(adapter_type)
updated = result.update_from(translated, adapter_config_cls, validate=validate) # The "update_from" method in BaseConfig merges dictionaries using MergeBehavior
updated = config_cls.update_from(result_dict, translated, adapter_config_cls)
return updated return updated
def translate_hook_names(self, project_dict): def translate_hook_names(self, project_dict):
@@ -222,69 +189,107 @@ class ContextConfigGenerator(BaseContextConfigGenerator[C]):
project_dict["post-hook"] = project_dict.pop("post_hook") project_dict["post-hook"] = project_dict.pop("post_hook")
return project_dict return project_dict
def calculate_node_config_dict( # RenderedConfigGenerator. Validation is performed, and a config object is returned.
def generate_node_config(
self, self,
config_call_dict: Dict[str, Any], config_call_dict: Dict[str, Any],
fqn: List[str], fqn: List[str],
resource_type: NodeType, resource_type: NodeType,
project_name: str, project_name: str,
base: bool,
patch_config_dict: Optional[dict] = None, patch_config_dict: Optional[dict] = None,
) -> Dict[str, Any]: ) -> BaseConfig:
config = self.calculate_node_config(
config_cls = get_config_for(resource_type)
config_dict = self.combine_config_dicts(
config_call_dict=config_call_dict, config_call_dict=config_call_dict,
fqn=fqn, fqn=fqn,
resource_type=resource_type, resource_type=resource_type,
project_name=project_name, project_name=project_name,
base=base,
patch_config_dict=patch_config_dict, patch_config_dict=patch_config_dict,
) )
fix_hooks(config_dict)
try: try:
finalized = config.finalize_and_validate() config_cls.validate(config_dict)
return finalized.to_dict(omit_none=True) config_obj = config_cls.from_dict(config_dict)
return config_obj
except ValidationError as exc: except ValidationError as exc:
# we got a ValidationError - probably bad types in config() # we got a ValidationError - probably bad types in config()
raise SchemaConfigError(exc, node=config) from exc config_obj = config_cls.from_dict(config_dict)
raise SchemaConfigError(exc, node=config_obj) from exc
class UnrenderedConfigGenerator(BaseContextConfigGenerator[Dict[str, Any]]): class UnrenderedConfigGenerator(BaseConfigGenerator[Dict[str, Any]]):
def get_config_source(self, project: Project) -> ConfigSource: """This class produces the unrendered_config dictionary in the resource."""
return UnrenderedConfig(project)
def calculate_node_config_dict( def get_resource_configs(self, project: Project, resource_type: NodeType) -> Dict[str, Any]:
"""Get configs for this resource_type from the project's unrendered config"""
unrendered = project.unrendered.project_dict
if resource_type == NodeType.Seed:
resource_configs = unrendered.get("seeds")
elif resource_type == NodeType.Snapshot:
resource_configs = unrendered.get("snapshots")
elif resource_type == NodeType.Source:
resource_configs = unrendered.get("sources")
elif resource_type == NodeType.Test:
resource_configs = unrendered.get("data_tests")
elif resource_type == NodeType.Metric:
resource_configs = unrendered.get("metrics")
elif resource_type == NodeType.SemanticModel:
resource_configs = unrendered.get("semantic_models")
elif resource_type == NodeType.SavedQuery:
resource_configs = unrendered.get("saved_queries")
elif resource_type == NodeType.Exposure:
resource_configs = unrendered.get("exposures")
elif resource_type == NodeType.Unit:
resource_configs = unrendered.get("unit_tests")
else:
resource_configs = unrendered.get("models")
if resource_configs is None:
return {}
else:
return resource_configs
# UnrenderedConfigGenerator. No validation is performed and a dictionary is returned.
def generate_node_config(
self, self,
config_call_dict: Dict[str, Any], config_call_dict: Dict[str, Any],
fqn: List[str], fqn: List[str],
resource_type: NodeType, resource_type: NodeType,
project_name: str, project_name: str,
base: bool,
patch_config_dict: Optional[dict] = None, patch_config_dict: Optional[dict] = None,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
# TODO CT-211
return self.calculate_node_config( result = self.combine_config_dicts(
config_call_dict=config_call_dict, config_call_dict=config_call_dict,
fqn=fqn, fqn=fqn,
resource_type=resource_type, resource_type=resource_type,
project_name=project_name, project_name=project_name,
base=base,
patch_config_dict=patch_config_dict, patch_config_dict=patch_config_dict,
) # type: ignore[return-value] )
return result
def initial_result(self, resource_type: NodeType, base: bool) -> Dict[str, Any]: def initial_result(self, config_cls: Type[BaseConfig]) -> Dict[str, Any]:
# We don't want the config defaults here, just the configs which have
# actually been set.
return {} return {}
def _update_from_config( def _update_from_config(
self, self,
result: Dict[str, Any], config_cls: Type[BaseConfig],
result_dict: Dict[str, Any],
partial: Dict[str, Any], partial: Dict[str, Any],
validate: bool = False,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
translated = self._active_project.credentials.translate_aliases(partial) translated = self._active_project.credentials.translate_aliases(partial)
result.update(translated) result_dict.update(translated)
return result return result_dict
class ContextConfig: class ConfigBuilder:
"""This object is included in various jinja contexts in order to collect the _config_call_dicts
and the _unrendered_config_call dicts from the config calls in sql files.
It is then used to run "build_config_dict" which calls the rendered or unrendered
config generators and returns a config dictionary."""
def __init__( def __init__(
self, self,
active_project: RuntimeConfig, active_project: RuntimeConfig,
@@ -309,18 +314,14 @@ class ContextConfig:
def build_config_dict( def build_config_dict(
self, self,
base: bool = False,
*,
rendered: bool = True, rendered: bool = True,
patch_config_dict: Optional[dict] = None, patch_config_dict: Optional[dict] = None,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
if rendered: if rendered:
# TODO CT-211 config_generator = RenderedConfigGenerator(self._active_project) # type: ignore[var-annotated]
src = ContextConfigGenerator(self._active_project) # type: ignore[var-annotated]
config_call_dict = self._config_call_dict config_call_dict = self._config_call_dict
else: else: # unrendered
# TODO CT-211 config_generator = UnrenderedConfigGenerator(self._active_project) # type: ignore[assignment]
src = UnrenderedConfigGenerator(self._active_project) # type: ignore[assignment]
# preserve legacy behaviour - using unreliable (potentially rendered) _config_call_dict # preserve legacy behaviour - using unreliable (potentially rendered) _config_call_dict
if get_flags().state_modified_compare_more_unrendered_values is False: if get_flags().state_modified_compare_more_unrendered_values is False:
@@ -333,11 +334,14 @@ class ContextConfig:
else: else:
config_call_dict = self._unrendered_config_call_dict config_call_dict = self._unrendered_config_call_dict
return src.calculate_node_config_dict( config = config_generator.generate_node_config(
config_call_dict=config_call_dict, config_call_dict=config_call_dict,
fqn=self._fqn, fqn=self._fqn,
resource_type=self._resource_type, resource_type=self._resource_type,
project_name=self._project_name, project_name=self._project_name,
base=base,
patch_config_dict=patch_config_dict, patch_config_dict=patch_config_dict,
) )
if isinstance(config, BaseConfig):
return config.to_dict(omit_none=True)
else:
return config

View File

@@ -40,7 +40,7 @@ from dbt.config import IsFQNResource, Project, RuntimeConfig
from dbt.constants import DEFAULT_ENV_PLACEHOLDER from dbt.constants import DEFAULT_ENV_PLACEHOLDER
from dbt.context.base import Var, contextmember, contextproperty from dbt.context.base import Var, contextmember, contextproperty
from dbt.context.configured import FQNLookup from dbt.context.configured import FQNLookup
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.context.exceptions_jinja import wrapped_exports from dbt.context.exceptions_jinja import wrapped_exports
from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace
from dbt.context.macros import MacroNamespace, MacroNamespaceBuilder from dbt.context.macros import MacroNamespace, MacroNamespaceBuilder
@@ -367,14 +367,14 @@ class BaseMetricResolver(BaseResolver):
class Config(Protocol): class Config(Protocol):
def __init__(self, model, context_config: Optional[ContextConfig]): ... def __init__(self, model, config_builder: Optional[ConfigBuilder]): ...
# Implementation of "config(..)" calls in models # Implementation of "config(..)" calls in models
class ParseConfigObject(Config): class ParseConfigObject(Config):
def __init__(self, model, context_config: Optional[ContextConfig]): def __init__(self, model, config_builder: Optional[ConfigBuilder]):
self.model = model self.model = model
self.context_config = context_config self.config_builder = config_builder
def _transform_config(self, config): def _transform_config(self, config):
for oldkey in ("pre_hook", "post_hook"): for oldkey in ("pre_hook", "post_hook"):
@@ -395,19 +395,19 @@ class ParseConfigObject(Config):
opts = self._transform_config(opts) opts = self._transform_config(opts)
# it's ok to have a parse context with no context config, but you must # it's ok to have a parse context with no config builder, but you must
# not call it! # not call it!
if self.context_config is None: if self.config_builder is None:
raise DbtRuntimeError("At parse time, did not receive a context config") raise DbtRuntimeError("At parse time, did not receive a config builder")
# Track unrendered opts to build parsed node unrendered_config later on # Track unrendered opts to build parsed node unrendered_config later on
if get_flags().state_modified_compare_more_unrendered_values: if get_flags().state_modified_compare_more_unrendered_values:
unrendered_config = statically_parse_unrendered_config(self.model.raw_code) unrendered_config = statically_parse_unrendered_config(self.model.raw_code)
if unrendered_config: if unrendered_config:
self.context_config.add_unrendered_config_call(unrendered_config) self.config_builder.add_unrendered_config_call(unrendered_config)
# Use rendered opts to populate context_config # Use rendered opts to populate config builder
self.context_config.add_config_call(opts) self.config_builder.add_config_call(opts)
return "" return ""
def set(self, name, value): def set(self, name, value):
@@ -427,7 +427,7 @@ class ParseConfigObject(Config):
class RuntimeConfigObject(Config): class RuntimeConfigObject(Config):
def __init__(self, model, context_config: Optional[ContextConfig] = None): def __init__(self, model, config_builder: Optional[ConfigBuilder] = None):
self.model = model self.model = model
# we never use or get a config, only the parser cares # we never use or get a config, only the parser cares
@@ -887,7 +887,7 @@ class ProviderContext(ManifestContext):
config: RuntimeConfig, config: RuntimeConfig,
manifest: Manifest, manifest: Manifest,
provider: Provider, provider: Provider,
context_config: Optional[ContextConfig], config_builder: Optional[ConfigBuilder],
) -> None: ) -> None:
if provider is None: if provider is None:
raise DbtInternalError(f"Invalid provider given to context: {provider}") raise DbtInternalError(f"Invalid provider given to context: {provider}")
@@ -896,7 +896,7 @@ class ProviderContext(ManifestContext):
self.model: Union[Macro, ManifestNode, SourceDefinition] = model self.model: Union[Macro, ManifestNode, SourceDefinition] = model
super().__init__(config, manifest, model.package_name) super().__init__(config, manifest, model.package_name)
self.sql_results: Dict[str, Optional[AttrDict]] = {} self.sql_results: Dict[str, Optional[AttrDict]] = {}
self.context_config: Optional[ContextConfig] = context_config self.config_builder: Optional[ConfigBuilder] = config_builder
self.provider: Provider = provider self.provider: Provider = provider
self.adapter = get_adapter(self.config) self.adapter = get_adapter(self.config)
# The macro namespace is used in creating the DatabaseWrapper # The macro namespace is used in creating the DatabaseWrapper
@@ -1165,7 +1165,7 @@ class ProviderContext(ManifestContext):
{%- set unique_key = config.require('unique_key') -%} {%- set unique_key = config.require('unique_key') -%}
... ...
""" # noqa """ # noqa
return self.provider.Config(self.model, self.context_config) return self.provider.Config(self.model, self.config_builder)
@contextproperty() @contextproperty()
def execute(self) -> bool: def execute(self) -> bool:
@@ -1703,12 +1703,12 @@ def generate_parser_model_context(
model: ManifestNode, model: ManifestNode,
config: RuntimeConfig, config: RuntimeConfig,
manifest: Manifest, manifest: Manifest,
context_config: ContextConfig, config_builder: ConfigBuilder,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
# The __init__ method of ModelContext also initializes # The __init__ method of ModelContext also initializes
# a ManifestContext object which creates a MacroNamespaceBuilder # a ManifestContext object which creates a MacroNamespaceBuilder
# which adds every macro in the Manifest. # which adds every macro in the Manifest.
ctx = ModelContext(model, config, manifest, ParseProvider(), context_config) ctx = ModelContext(model, config, manifest, ParseProvider(), config_builder)
# The 'to_dict' method in ManifestContext moves all of the macro names # The 'to_dict' method in ManifestContext moves all of the macro names
# in the macro 'namespace' up to top level keys # in the macro 'namespace' up to top level keys
return ctx.to_dict() return ctx.to_dict()
@@ -1910,14 +1910,14 @@ class TestContext(ProviderContext):
config: RuntimeConfig, config: RuntimeConfig,
manifest: Manifest, manifest: Manifest,
provider: Provider, provider: Provider,
context_config: Optional[ContextConfig], config_builder: Optional[ConfigBuilder],
macro_resolver: MacroResolver, macro_resolver: MacroResolver,
) -> None: ) -> None:
# this must be before super init so that macro_resolver exists for # this must be before super init so that macro_resolver exists for
# build_namespace # build_namespace
self.macro_resolver = macro_resolver self.macro_resolver = macro_resolver
self.thread_ctx = MacroStack() self.thread_ctx = MacroStack()
super().__init__(model, config, manifest, provider, context_config) super().__init__(model, config, manifest, provider, config_builder)
self._build_test_namespace() self._build_test_namespace()
# We need to rebuild this because it's already been built by # We need to rebuild this because it's already been built by
# the ProviderContext with the wrong namespace. # the ProviderContext with the wrong namespace.
@@ -1989,10 +1989,10 @@ def generate_test_context(
model: ManifestNode, model: ManifestNode,
config: RuntimeConfig, config: RuntimeConfig,
manifest: Manifest, manifest: Manifest,
context_config: ContextConfig, config_builder: ConfigBuilder,
macro_resolver: MacroResolver, macro_resolver: MacroResolver,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
ctx = TestContext(model, config, manifest, ParseProvider(), context_config, macro_resolver) ctx = TestContext(model, config, manifest, ParseProvider(), config_builder, macro_resolver)
# The 'to_dict' method in ManifestContext moves all of the macro names # The 'to_dict' method in ManifestContext moves all of the macro names
# in the macro 'namespace' up to top level keys # in the macro 'namespace' up to top level keys
return ctx.to_dict() return ctx.to_dict()

View File

@@ -53,14 +53,5 @@ RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {
} }
# base resource types are like resource types, except nothing has mandatory def get_config_for(resource_type: NodeType) -> Type[BaseConfig]:
# configs. return RESOURCE_TYPES.get(resource_type, NodeConfig)
BASE_RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = RESOURCE_TYPES.copy()
def get_config_for(resource_type: NodeType, base=False) -> Type[BaseConfig]:
if base:
lookup = BASE_RESOURCE_TYPES
else:
lookup = RESOURCE_TYPES
return lookup.get(resource_type, NodeConfig)

View File

@@ -3,12 +3,12 @@ import itertools
import os import os
from typing import Any, Dict, Generic, List, Optional, TypeVar from typing import Any, Dict, Generic, List, Optional, TypeVar
from dbt import hooks, utils from dbt import utils
from dbt.adapters.factory import get_adapter # noqa: F401 from dbt.adapters.factory import get_adapter # noqa: F401
from dbt.artifacts.resources import Contract from dbt.artifacts.resources import Contract
from dbt.clients.jinja import MacroGenerator, get_rendered from dbt.clients.jinja import MacroGenerator, get_rendered
from dbt.config import RuntimeConfig from dbt.config import RuntimeConfig
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.context.providers import ( from dbt.context.providers import (
generate_generate_name_macro_context, generate_generate_name_macro_context,
generate_parser_model_context, generate_parser_model_context,
@@ -178,15 +178,6 @@ class ConfiguredParser(
fqn.append(name) fqn.append(name)
return fqn return fqn
def _mangle_hooks(self, config):
"""Given a config dict that may have `pre-hook`/`post-hook` keys,
convert it from the yucky maybe-a-string, maybe-a-dict to a dict.
"""
# Like most of parsing, this is a horrible hack :(
for key in hooks.ModelHookType:
if key in config:
config[key] = [hooks.get_hook_dict(h) for h in config[key]]
def _create_error_node( def _create_error_node(
self, name: str, path: str, original_file_path: str, raw_code: str, language: str = "sql" self, name: str, path: str, original_file_path: str, raw_code: str, language: str = "sql"
) -> UnparsedNode: ) -> UnparsedNode:
@@ -209,7 +200,7 @@ class ConfiguredParser(
self, self,
block: ConfiguredBlockType, block: ConfiguredBlockType,
path: str, path: str,
config: ContextConfig, config_builder: ConfigBuilder,
fqn: List[str], fqn: List[str],
name=None, name=None,
**kwargs, **kwargs,
@@ -239,7 +230,7 @@ class ConfiguredParser(
"raw_code": block.contents, "raw_code": block.contents,
"language": language, "language": language,
"unique_id": self.generate_unique_id(name), "unique_id": self.generate_unique_id(name),
"config": self.config_dict(config), "config": self.config_dict(config_builder),
"checksum": block.file.checksum.to_dict(omit_none=True), "checksum": block.file.checksum.to_dict(omit_none=True),
} }
dct.update(kwargs) dct.update(kwargs)
@@ -257,14 +248,18 @@ class ConfiguredParser(
) )
raise DictParseError(exc, node=node) raise DictParseError(exc, node=node)
def _context_for(self, parsed_node: FinalNode, config: ContextConfig) -> Dict[str, Any]: def _context_for(
return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) self, parsed_node: FinalNode, config_builder: ConfigBuilder
) -> Dict[str, Any]:
return generate_parser_model_context(
parsed_node, self.root_project, self.manifest, config_builder
)
def render_with_context(self, parsed_node: FinalNode, config: ContextConfig): def render_with_context(self, parsed_node: FinalNode, config_builder: ConfigBuilder):
# Given the parsed node and a ContextConfig to use during parsing, # Given the parsed node and a ConfigBuilder to use during parsing,
# render the node's sql with macro capture enabled. # render the node's sql with macro capture enabled.
# Note: this mutates the config object when config calls are rendered. # Note: this mutates the config object when config calls are rendered.
context = self._context_for(parsed_node, config) context = self._context_for(parsed_node, config_builder)
# this goes through the process of rendering, but just throws away # this goes through the process of rendering, but just throws away
# the rendered result. The "macro capture" is the point? # the rendered result. The "macro capture" is the point?
@@ -274,14 +269,12 @@ class ConfiguredParser(
# This is taking the original config for the node, converting it to a dict, # This is taking the original config for the node, converting it to a dict,
# updating the config with new config passed in, then re-creating the # updating the config with new config passed in, then re-creating the
# config from the dict in the node. # config from the dict in the node.
def update_parsed_node_config_dict( def clean_and_fix_config_dict(
self, parsed_node: FinalNode, config_dict: Dict[str, Any] self, parsed_node: FinalNode, config_dict: Dict[str, Any]
) -> None: ) -> None:
# Overwrite node config # Overwrite node config
final_config_dict = parsed_node.config.to_dict(omit_none=True) final_config_dict = parsed_node.config.to_dict(omit_none=True)
final_config_dict.update({k.strip(): v for (k, v) in config_dict.items()}) final_config_dict.update({k.strip(): v for (k, v) in config_dict.items()})
# re-mangle hooks, in case we got new ones
self._mangle_hooks(final_config_dict)
parsed_node.config = parsed_node.config.from_dict(final_config_dict) parsed_node.config = parsed_node.config.from_dict(final_config_dict)
def update_parsed_node_relation_names( def update_parsed_node_relation_names(
@@ -308,29 +301,33 @@ class ConfiguredParser(
def update_parsed_node_config( def update_parsed_node_config(
self, self,
parsed_node: FinalNode, parsed_node: FinalNode,
config: ContextConfig, config_builder: ConfigBuilder,
context=None, context=None,
patch_config_dict=None, patch_config_dict=None,
patch_file_id=None, patch_file_id=None,
) -> None: ) -> None:
"""Given the ContextConfig used for parsing and the parsed node, """Given the ConfigBuilder used for parsing and the parsed node,
generate and set the true values to use, overriding the temporary parse generate the final resource config and the unrendered_config
values set in _build_intermediate_parsed_node.
""" """
# build_config_dict takes the config_call_dict in the ContextConfig object
# and calls calculate_node_config to combine dbt_project configs and
# config calls from SQL files, plus patch configs (from schema files)
# This normalize the config for a model node due #8520; should be improved latter
if not patch_config_dict: if not patch_config_dict:
patch_config_dict = {} patch_config_dict = {}
if ( if (
parsed_node.resource_type == NodeType.Model parsed_node.resource_type == NodeType.Model
and parsed_node.language == ModelLanguage.python and parsed_node.language == ModelLanguage.python
): ):
# This normalize the config for a python model node due #8520; should be improved latter
if "materialized" not in patch_config_dict: if "materialized" not in patch_config_dict:
patch_config_dict["materialized"] = "table" patch_config_dict["materialized"] = "table"
config_dict = config.build_config_dict(patch_config_dict=patch_config_dict)
# build_config_dict takes the config_call_dict in the ConfigBuilder object
# and calls generate_node_config to combine dbt_project configs and
# config calls from SQL files, plus patch configs (from schema files).
# Validation is performed when building the rendered config_dict and
# hooks are converted into hook objects for later rendering.
config_dict = config_builder.build_config_dict(
rendered=True, patch_config_dict=patch_config_dict
)
# Set tags on node provided in config blocks. Tags are additive, so even if # Set tags on node provided in config blocks. Tags are additive, so even if
# config has been built before, we don't have to reset tags in the parsed_node. # config has been built before, we don't have to reset tags in the parsed_node.
@@ -396,16 +393,16 @@ class ConfiguredParser(
# unrendered_config is used to compare the original database/schema/alias # unrendered_config is used to compare the original database/schema/alias
# values and to handle 'same_config' and 'same_contents' calls # values and to handle 'same_config' and 'same_contents' calls
parsed_node.unrendered_config = config.build_config_dict( parsed_node.unrendered_config = config_builder.build_config_dict(
rendered=False, patch_config_dict=patch_config_dict rendered=False, patch_config_dict=patch_config_dict
) )
parsed_node.config_call_dict = config._config_call_dict parsed_node.config_call_dict = config_builder._config_call_dict
parsed_node.unrendered_config_call_dict = config._unrendered_config_call_dict parsed_node.unrendered_config_call_dict = config_builder._unrendered_config_call_dict
# do this once before we parse the node database/schema/alias, so # do this once before we parse the node database/schema/alias, so
# parsed_node.config is what it would be if they did nothing # parsed_node.config is what it would be if they did nothing
self.update_parsed_node_config_dict(parsed_node, config_dict) self.clean_and_fix_config_dict(parsed_node, config_dict)
# This updates the node database/schema/alias/relation_name # This updates the node database/schema/alias/relation_name
self.update_parsed_node_relation_names(parsed_node, config_dict) self.update_parsed_node_relation_names(parsed_node, config_dict)
@@ -413,44 +410,36 @@ class ConfiguredParser(
if parsed_node.resource_type == NodeType.Test: if parsed_node.resource_type == NodeType.Test:
return return
# at this point, we've collected our hooks. Use the node context to # Use the node context to render each hook and collect refs/sources.
# render each hook and collect refs/sources
assert hasattr(parsed_node.config, "pre_hook") and hasattr(parsed_node.config, "post_hook") assert hasattr(parsed_node.config, "pre_hook") and hasattr(parsed_node.config, "post_hook")
hooks = list(itertools.chain(parsed_node.config.pre_hook, parsed_node.config.post_hook)) hooks = list(itertools.chain(parsed_node.config.pre_hook, parsed_node.config.post_hook))
# skip context rebuilding if there aren't any hooks # skip context rebuilding if there aren't any hooks
if not hooks: if not hooks:
return return
if not context: if not context:
context = self._context_for(parsed_node, config) context = self._context_for(parsed_node, config_builder)
for hook in hooks: for hook in hooks:
get_rendered(hook.sql, context, parsed_node, capture_macros=True) get_rendered(hook.sql, context, parsed_node, capture_macros=True)
def initial_config(self, fqn: List[str]) -> ContextConfig: def initial_config_builder(self, fqn: List[str]) -> ConfigBuilder:
config_version = min([self.project.config_version, self.root_project.config_version]) return ConfigBuilder(
if config_version == 2: self.root_project,
return ContextConfig( fqn,
self.root_project, self.resource_type,
fqn, self.project.project_name,
self.resource_type, )
self.project.project_name,
)
else:
raise DbtInternalError(
f"Got an unexpected project version={config_version}, expected 2"
)
def config_dict( def config_dict(
self, self,
config: ContextConfig, config_builder: ConfigBuilder,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
config_dict = config.build_config_dict(base=True) config_dict = config_builder.build_config_dict(rendered=True)
self._mangle_hooks(config_dict)
return config_dict return config_dict
def render_update(self, node: FinalNode, config: ContextConfig) -> None: def render_update(self, node: FinalNode, config_builder: ConfigBuilder) -> None:
try: try:
context = self.render_with_context(node, config) context = self.render_with_context(node, config_builder)
self.update_parsed_node_config(node, config, context=context) self.update_parsed_node_config(node, config_builder, context=context)
except ValidationError as exc: except ValidationError as exc:
# we got a ValidationError - probably bad types in config() # we got a ValidationError - probably bad types in config()
raise ConfigUpdateError(exc, node=node) from exc raise ConfigUpdateError(exc, node=node) from exc
@@ -465,15 +454,15 @@ class ConfiguredParser(
compiled_path: str = self.get_compiled_path(block) compiled_path: str = self.get_compiled_path(block)
fqn = self.get_fqn(compiled_path, block.name) fqn = self.get_fqn(compiled_path, block.name)
config: ContextConfig = self.initial_config(fqn) config_builder: ConfigBuilder = self.initial_config_builder(fqn)
node = self._create_parsetime_node( node = self._create_parsetime_node(
block=block, block=block,
path=compiled_path, path=compiled_path,
config=config, config_builder=config_builder,
fqn=fqn, fqn=fqn,
) )
self.render_update(node, config) self.render_update(node, config_builder)
self.add_result_node(block, node) self.add_result_node(block, node)
return node return node

View File

@@ -1,7 +1,7 @@
from dataclasses import dataclass from dataclasses import dataclass
from typing import Iterable, Iterator, List, Tuple, Union from typing import Iterable, Iterator, List, Tuple, Union
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.contracts.files import FilePath from dbt.contracts.files import FilePath
from dbt.contracts.graph.nodes import HookNode from dbt.contracts.graph.nodes import HookNode
from dbt.node_types import NodeType, RunHookType from dbt.node_types import NodeType, RunHookType
@@ -92,7 +92,7 @@ class HookParser(SimpleParser[HookBlock, HookNode]):
self, self,
block: HookBlock, block: HookBlock,
path: str, path: str,
config: ContextConfig, config_builder: ConfigBuilder,
fqn: List[str], fqn: List[str],
name=None, name=None,
**kwargs, **kwargs,
@@ -101,7 +101,7 @@ class HookParser(SimpleParser[HookBlock, HookNode]):
return super()._create_parsetime_node( return super()._create_parsetime_node(
block=block, block=block,
path=path, path=path,
config=config, config_builder=config_builder,
fqn=fqn, fqn=fqn,
index=block.index, index=block.index,
name=name, name=name,

View File

@@ -10,7 +10,7 @@ import dbt.tracking as tracking
from dbt import utils from dbt import utils
from dbt.artifacts.resources import RefArgs from dbt.artifacts.resources import RefArgs
from dbt.clients.jinja import get_rendered from dbt.clients.jinja import get_rendered
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.contracts.graph.nodes import ModelNode from dbt.contracts.graph.nodes import ModelNode
from dbt.exceptions import ( from dbt.exceptions import (
ModelConfigError, ModelConfigError,
@@ -233,15 +233,15 @@ class ModelParser(SimpleSQLParser[ModelNode]):
config_keys_defaults=config_keys_defaults, config_keys_defaults=config_keys_defaults,
) )
def render_update(self, node: ModelNode, config: ContextConfig) -> None: def render_update(self, node: ModelNode, config_builder: ConfigBuilder) -> None:
self.manifest._parsing_info.static_analysis_path_count += 1 self.manifest._parsing_info.static_analysis_path_count += 1
flags = get_flags() flags = get_flags()
if node.language == ModelLanguage.python: if node.language == ModelLanguage.python:
try: try:
verify_python_model_code(node) verify_python_model_code(node)
context = self._context_for(node, config) context = self._context_for(node, config_builder)
self.parse_python_model(node, config, context) self.parse_python_model(node, config_builder, context)
self.update_parsed_node_config(node, config, context=context) self.update_parsed_node_config(node, config_builder, context=context)
except ValidationError as exc: except ValidationError as exc:
# we got a ValidationError - probably bad types in config() # we got a ValidationError - probably bad types in config()
@@ -250,7 +250,7 @@ class ModelParser(SimpleSQLParser[ModelNode]):
elif not flags.STATIC_PARSER: elif not flags.STATIC_PARSER:
# jinja rendering # jinja rendering
super().render_update(node, config) super().render_update(node, config_builder)
return return
# only sample for experimental parser correctness on normal runs, # only sample for experimental parser correctness on normal runs,
@@ -277,9 +277,9 @@ class ModelParser(SimpleSQLParser[ModelNode]):
statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None
experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None
exp_sample_node: Optional[ModelNode] = None exp_sample_node: Optional[ModelNode] = None
exp_sample_config: Optional[ContextConfig] = None exp_sample_config_builder: Optional[ConfigBuilder] = None
jinja_sample_node: Optional[ModelNode] = None jinja_sample_node: Optional[ModelNode] = None
jinja_sample_config: Optional[ContextConfig] = None jinja_sample_config_builder: Optional[ConfigBuilder] = None
result: List[str] = [] result: List[str] = []
# sample the experimental parser only during a normal run # sample the experimental parser only during a normal run
@@ -295,8 +295,10 @@ class ModelParser(SimpleSQLParser[ModelNode]):
if isinstance(experimental_sample, dict): if isinstance(experimental_sample, dict):
model_parser_copy = self.partial_deepcopy() model_parser_copy = self.partial_deepcopy()
exp_sample_node = deepcopy(node) exp_sample_node = deepcopy(node)
exp_sample_config = deepcopy(config) exp_sample_config_builder = deepcopy(config_builder)
model_parser_copy.populate(exp_sample_node, exp_sample_config, experimental_sample) model_parser_copy.populate(
exp_sample_node, exp_sample_config_builder, experimental_sample
)
# use the experimental parser exclusively if the flag is on # use the experimental parser exclusively if the flag is on
if flags.USE_EXPERIMENTAL_PARSER: if flags.USE_EXPERIMENTAL_PARSER:
statically_parsed = self.run_experimental_parser(node) statically_parsed = self.run_experimental_parser(node)
@@ -317,36 +319,36 @@ class ModelParser(SimpleSQLParser[ModelNode]):
# but we can't really guarantee that going forward. # but we can't really guarantee that going forward.
model_parser_copy = self.partial_deepcopy() model_parser_copy = self.partial_deepcopy()
jinja_sample_node = deepcopy(node) jinja_sample_node = deepcopy(node)
jinja_sample_config = deepcopy(config) jinja_sample_config_builder = deepcopy(config_builder)
# rendering mutates the node and the config # rendering mutates the node and the config
super(ModelParser, model_parser_copy).render_update( super(ModelParser, model_parser_copy).render_update(
jinja_sample_node, jinja_sample_config jinja_sample_node, jinja_sample_config_builder
) )
# update the unrendered config with values from the static parser. # update the unrendered config with values from the static parser.
# values from yaml files are in there already # values from yaml files are in there already
self.populate(node, config, statically_parsed) self.populate(node, config_builder, statically_parsed)
# if we took a jinja sample, compare now that the base node has been populated # if we took a jinja sample, compare now that the base node has been populated
if jinja_sample_node is not None and jinja_sample_config is not None: if jinja_sample_node is not None and jinja_sample_config_builder is not None:
result = _get_stable_sample_result( result = _get_stable_sample_result(
jinja_sample_node, jinja_sample_config, node, config jinja_sample_node, jinja_sample_config_builder, node, config_builder
) )
# if we took an experimental sample, compare now that the base node has been populated # if we took an experimental sample, compare now that the base node has been populated
if exp_sample_node is not None and exp_sample_config is not None: if exp_sample_node is not None and exp_sample_config_builder is not None:
result = _get_exp_sample_result( result = _get_exp_sample_result(
exp_sample_node, exp_sample_node,
exp_sample_config, exp_sample_config_builder,
node, node,
config, config_builder,
) )
self.manifest._parsing_info.static_analysis_parsed_path_count += 1 self.manifest._parsing_info.static_analysis_parsed_path_count += 1
# if the static parser didn't succeed, fall back to jinja # if the static parser didn't succeed, fall back to jinja
else: else:
# jinja rendering # jinja rendering
super().render_update(node, config) super().render_update(node, config_builder)
# if sampling, add the correct messages for tracking # if sampling, add the correct messages for tracking
if exp_sample and isinstance(experimental_sample, str): if exp_sample and isinstance(experimental_sample, str):
@@ -432,13 +434,15 @@ class ModelParser(SimpleSQLParser[ModelNode]):
# this method updates the model node rendered and unrendered config as well # this method updates the model node rendered and unrendered config as well
# as the node object. Used to populate these values when circumventing jinja # as the node object. Used to populate these values when circumventing jinja
# rendering like the static parser. # rendering like the static parser.
def populate(self, node: ModelNode, config: ContextConfig, statically_parsed: Dict[str, Any]): def populate(
self, node: ModelNode, config_builder: ConfigBuilder, statically_parsed: Dict[str, Any]
):
# manually fit configs in # manually fit configs in
config._config_call_dict = _get_config_call_dict(statically_parsed) config_builder._config_call_dict = _get_config_call_dict(statically_parsed)
# if there are hooks present this, it WILL render jinja. Will need to change # if there are hooks present this, it WILL render jinja. Will need to change
# when the experimental parser supports hooks # when the experimental parser supports hooks
self.update_parsed_node_config(node, config) self.update_parsed_node_config(node, config_builder)
# update the unrendered config with values from the file. # update the unrendered config with values from the file.
# values from yaml files are in there already # values from yaml files are in there already
@@ -488,11 +492,13 @@ def _shift_sources(static_parser_result: Dict[str, List[Any]]) -> Dict[str, List
# returns a list of string codes to be sent as a tracking event # returns a list of string codes to be sent as a tracking event
def _get_exp_sample_result( def _get_exp_sample_result(
sample_node: ModelNode, sample_node: ModelNode,
sample_config: ContextConfig, sample_config_builder: ConfigBuilder,
node: ModelNode, node: ModelNode,
config: ContextConfig, config_builder: ConfigBuilder,
) -> List[str]: ) -> List[str]:
result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) result: List[Tuple[int, str]] = _get_sample_result(
sample_node, sample_config_builder, node, config_builder
)
def process(codemsg): def process(codemsg):
code, msg = codemsg code, msg = codemsg
@@ -504,11 +510,13 @@ def _get_exp_sample_result(
# returns a list of string codes to be sent as a tracking event # returns a list of string codes to be sent as a tracking event
def _get_stable_sample_result( def _get_stable_sample_result(
sample_node: ModelNode, sample_node: ModelNode,
sample_config: ContextConfig, sample_config_builder: ConfigBuilder,
node: ModelNode, node: ModelNode,
config: ContextConfig, config_builder: ConfigBuilder,
) -> List[str]: ) -> List[str]:
result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) result: List[Tuple[int, str]] = _get_sample_result(
sample_node, sample_config_builder, node, config_builder
)
def process(codemsg): def process(codemsg):
code, msg = codemsg code, msg = codemsg
@@ -521,20 +529,20 @@ def _get_stable_sample_result(
# before being sent as a tracking event # before being sent as a tracking event
def _get_sample_result( def _get_sample_result(
sample_node: ModelNode, sample_node: ModelNode,
sample_config: ContextConfig, sample_config_builder: ConfigBuilder,
node: ModelNode, node: ModelNode,
config: ContextConfig, config_builder: ConfigBuilder,
) -> List[Tuple[int, str]]: ) -> List[Tuple[int, str]]:
result: List[Tuple[int, str]] = [] result: List[Tuple[int, str]] = []
# look for false positive configs # look for false positive configs
for k in sample_config._config_call_dict.keys(): for k in sample_config_builder._config_call_dict.keys():
if k not in config._config_call_dict.keys(): if k not in config_builder._config_call_dict.keys():
result += [(2, "false_positive_config_value")] result += [(2, "false_positive_config_value")]
break break
# look for missed configs # look for missed configs
for k in config._config_call_dict.keys(): for k in config_builder._config_call_dict.keys():
if k not in sample_config._config_call_dict.keys(): if k not in sample_config_builder._config_call_dict.keys():
result += [(3, "missed_config_value")] result += [(3, "missed_config_value")]
break break

View File

@@ -7,7 +7,7 @@ from dbt.adapters.factory import get_adapter, get_adapter_package_names
from dbt.artifacts.resources import NodeVersion, RefArgs from dbt.artifacts.resources import NodeVersion, RefArgs
from dbt.clients.jinja import add_rendered_test_kwargs, get_rendered from dbt.clients.jinja import add_rendered_test_kwargs, get_rendered
from dbt.context.configured import SchemaYamlVars, generate_schema_yml_context from dbt.context.configured import SchemaYamlVars, generate_schema_yml_context
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.context.macro_resolver import MacroResolver from dbt.context.macro_resolver import MacroResolver
from dbt.context.providers import generate_test_context from dbt.context.providers import generate_test_context
from dbt.contracts.files import FileHash from dbt.contracts.files import FileHash
@@ -88,7 +88,7 @@ class SchemaGenericTestParser(SimpleParser):
self, self,
target: Union[UnpatchedSourceDefinition, UnparsedNodeUpdate], target: Union[UnpatchedSourceDefinition, UnparsedNodeUpdate],
path: str, path: str,
config: ContextConfig, config_builder: ConfigBuilder,
tags: List[str], tags: List[str],
fqn: List[str], fqn: List[str],
name: str, name: str,
@@ -130,7 +130,7 @@ class SchemaGenericTestParser(SimpleParser):
"raw_code": raw_code, "raw_code": raw_code,
"language": "sql", "language": "sql",
"unique_id": self.generate_unique_id(name, test_hash), "unique_id": self.generate_unique_id(name, test_hash),
"config": self.config_dict(config), "config": self.config_dict(config_builder),
"test_metadata": test_metadata, "test_metadata": test_metadata,
"column_name": column_name, "column_name": column_name,
"checksum": FileHash.empty().to_dict(omit_none=True), "checksum": FileHash.empty().to_dict(omit_none=True),
@@ -200,11 +200,11 @@ class SchemaGenericTestParser(SimpleParser):
relative_path = str(path.relative_to(*path.parts[:1])) relative_path = str(path.relative_to(*path.parts[:1]))
fqn = self.get_fqn(relative_path, builder.fqn_name) fqn = self.get_fqn(relative_path, builder.fqn_name)
# this is the ContextConfig that is used in render_update # this is the ConfigBuilder that is used in render_update
config: ContextConfig = self.initial_config(fqn) config_builder: ConfigBuilder = self.initial_config_builder(fqn)
# Adding the builder's config to the ContextConfig # Adding the builder's config to the ConfigBuilder
# is needed to ensure the config makes it to the pre_model hook which dbt-snowflake needs # is needed to ensure the config makes it to the pre_model hook which dbt-snowflake needs
config.add_config_call(builder.config) config_builder.add_config_call(builder.config)
# builder.args contains keyword args for the test macro, # builder.args contains keyword args for the test macro,
# not configs which have been separated out in the builder. # not configs which have been separated out in the builder.
# The keyword args are not completely rendered until compilation. # The keyword args are not completely rendered until compilation.
@@ -223,7 +223,7 @@ class SchemaGenericTestParser(SimpleParser):
node = self.create_test_node( node = self.create_test_node(
target=target, target=target,
path=compiled_path, path=compiled_path,
config=config, config_builder=config_builder,
fqn=fqn, fqn=fqn,
tags=tags, tags=tags,
name=builder.fqn_name, name=builder.fqn_name,
@@ -233,7 +233,7 @@ class SchemaGenericTestParser(SimpleParser):
file_key_name=file_key_name, file_key_name=file_key_name,
description=builder.description, description=builder.description,
) )
self.render_test_update(node, config, builder, schema_file_id) self.render_test_update(node, config_builder, builder, schema_file_id)
return node return node
@@ -278,7 +278,7 @@ class SchemaGenericTestParser(SimpleParser):
# In the future we will look at generalizing this # In the future we will look at generalizing this
# more to handle additional macros or to use static # more to handle additional macros or to use static
# parsing to avoid jinja overhead. # parsing to avoid jinja overhead.
def render_test_update(self, node, config, builder, schema_file_id): def render_test_update(self, node, config_builder, builder, schema_file_id):
macro_unique_id = self.macro_resolver.get_macro_id( macro_unique_id = self.macro_resolver.get_macro_id(
node.package_name, "test_" + builder.name node.package_name, "test_" + builder.name
) )
@@ -287,9 +287,9 @@ class SchemaGenericTestParser(SimpleParser):
node.depends_on.add_macro(macro_unique_id) node.depends_on.add_macro(macro_unique_id)
if macro_unique_id in ["macro.dbt.test_not_null", "macro.dbt.test_unique"]: if macro_unique_id in ["macro.dbt.test_not_null", "macro.dbt.test_unique"]:
config_call_dict = builder.get_static_config() config_call_dict = builder.get_static_config()
config._config_call_dict = config_call_dict config_builder._config_call_dict = config_call_dict
# This sets the config from dbt_project # This sets the config from dbt_project
self.update_parsed_node_config(node, config) self.update_parsed_node_config(node, config_builder)
# source node tests are processed at patch_source time # source node tests are processed at patch_source time
if isinstance(builder.target, UnpatchedSourceDefinition): if isinstance(builder.target, UnpatchedSourceDefinition):
sources = [builder.target.fqn[-2], builder.target.fqn[-1]] sources = [builder.target.fqn[-2], builder.target.fqn[-1]]
@@ -303,7 +303,7 @@ class SchemaGenericTestParser(SimpleParser):
node, node,
self.root_project, self.root_project,
self.manifest, self.manifest,
config, config_builder,
self.macro_resolver, self.macro_resolver,
) )
# update with rendered test kwargs (which collects any refs) # update with rendered test kwargs (which collects any refs)
@@ -312,7 +312,7 @@ class SchemaGenericTestParser(SimpleParser):
add_rendered_test_kwargs(context, node, capture_macros=True) add_rendered_test_kwargs(context, node, capture_macros=True)
# the parsed node is not rendered in the native context. # the parsed node is not rendered in the native context.
get_rendered(node.raw_code, context, node, capture_macros=True) get_rendered(node.raw_code, context, node, capture_macros=True)
self.update_parsed_node_config(node, config) self.update_parsed_node_config(node, config_builder)
# env_vars should have been updated in the context env_var method # env_vars should have been updated in the context env_var method
except ValidationError as exc: except ValidationError as exc:
# we got a ValidationError - probably bad types in config() # we got a ValidationError - probably bad types in config()
@@ -351,14 +351,14 @@ class SchemaGenericTestParser(SimpleParser):
def render_with_context( def render_with_context(
self, self,
node: GenericTestNode, node: GenericTestNode,
config: ContextConfig, config_builder: ConfigBuilder,
) -> None: ) -> None:
"""Given the parsed node and a ContextConfig to use during """Given the parsed node and a ConfigBuilder to use during
parsing, collect all the refs that might be squirreled away in the test parsing, collect all the refs that might be squirreled away in the test
arguments. This includes the implicit "model" argument. arguments. This includes the implicit "model" argument.
""" """
# make a base context that doesn't have the magic kwargs field # make a base context that doesn't have the magic kwargs field
context = self._context_for(node, config) context = self._context_for(node, config_builder)
# update it with the rendered test kwargs (which collects any refs) # update it with the rendered test kwargs (which collects any refs)
add_rendered_test_kwargs(context, node, capture_macros=True) add_rendered_test_kwargs(context, node, capture_macros=True)

View File

@@ -25,8 +25,8 @@ from dbt.artifacts.resources import (
from dbt.artifacts.resources.v1.semantic_model import SemanticLayerElementConfig from dbt.artifacts.resources.v1.semantic_model import SemanticLayerElementConfig
from dbt.clients.jinja import get_rendered from dbt.clients.jinja import get_rendered
from dbt.context.context_config import ( from dbt.context.context_config import (
BaseContextConfigGenerator, BaseConfigGenerator,
ContextConfigGenerator, RenderedConfigGenerator,
UnrenderedConfigGenerator, UnrenderedConfigGenerator,
) )
from dbt.context.providers import ( from dbt.context.providers import (
@@ -96,6 +96,7 @@ class ExposureParser(YamlReader):
fqn = self.schema_parser.get_fqn_prefix(path) fqn = self.schema_parser.get_fqn_prefix(path)
fqn.append(unparsed.name) fqn.append(unparsed.name)
# Also validates
config = self._generate_exposure_config( config = self._generate_exposure_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,
@@ -103,8 +104,6 @@ class ExposureParser(YamlReader):
rendered=True, rendered=True,
) )
config = config.finalize_and_validate()
unrendered_config = self._generate_exposure_config( unrendered_config = self._generate_exposure_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,
@@ -155,9 +154,9 @@ class ExposureParser(YamlReader):
def _generate_exposure_config( def _generate_exposure_config(
self, target: UnparsedExposure, fqn: List[str], package_name: str, rendered: bool self, target: UnparsedExposure, fqn: List[str], package_name: str, rendered: bool
): ):
generator: BaseContextConfigGenerator generator: BaseConfigGenerator
if rendered: if rendered:
generator = ContextConfigGenerator(self.root_project) generator = RenderedConfigGenerator(self.root_project)
else: else:
generator = UnrenderedConfigGenerator(self.root_project) generator = UnrenderedConfigGenerator(self.root_project)
@@ -166,12 +165,11 @@ class ExposureParser(YamlReader):
# apply exposure configs # apply exposure configs
precedence_configs.update(target.config) precedence_configs.update(target.config)
return generator.calculate_node_config( return generator.generate_node_config(
config_call_dict={}, config_call_dict={},
fqn=fqn, fqn=fqn,
resource_type=NodeType.Exposure, resource_type=NodeType.Exposure,
project_name=package_name, project_name=package_name,
base=False,
patch_config_dict=precedence_configs, patch_config_dict=precedence_configs,
) )
@@ -384,6 +382,7 @@ class MetricParser(YamlReader):
fqn = self.schema_parser.get_fqn_prefix(path) fqn = self.schema_parser.get_fqn_prefix(path)
fqn.append(unparsed.name) fqn.append(unparsed.name)
# Also validates
config = self._generate_metric_config( config = self._generate_metric_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,
@@ -391,8 +390,6 @@ class MetricParser(YamlReader):
rendered=True, rendered=True,
) )
config = config.finalize_and_validate()
unrendered_config = self._generate_metric_config( unrendered_config = self._generate_metric_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,
@@ -441,23 +438,22 @@ class MetricParser(YamlReader):
def _generate_metric_config( def _generate_metric_config(
self, target: UnparsedMetric, fqn: List[str], package_name: str, rendered: bool self, target: UnparsedMetric, fqn: List[str], package_name: str, rendered: bool
): ):
generator: BaseContextConfigGenerator generator: BaseConfigGenerator
if rendered: if rendered:
generator = ContextConfigGenerator(self.root_project) generator = RenderedConfigGenerator(self.root_project)
else: else:
generator = UnrenderedConfigGenerator(self.root_project) generator = UnrenderedConfigGenerator(self.root_project)
# configs with precendence set # configs with precedence set
precedence_configs = dict() precedence_configs = dict()
# first apply metric configs # first apply metric configs
precedence_configs.update(target.config) precedence_configs.update(target.config)
config = generator.calculate_node_config( config = generator.generate_node_config(
config_call_dict={}, config_call_dict={},
fqn=fqn, fqn=fqn,
resource_type=NodeType.Metric, resource_type=NodeType.Metric,
project_name=package_name, project_name=package_name,
base=False,
patch_config_dict=precedence_configs, patch_config_dict=precedence_configs,
) )
return config return config
@@ -613,9 +609,9 @@ class SemanticModelParser(YamlReader):
def _generate_semantic_model_config( def _generate_semantic_model_config(
self, target: UnparsedSemanticModel, fqn: List[str], package_name: str, rendered: bool self, target: UnparsedSemanticModel, fqn: List[str], package_name: str, rendered: bool
): ):
generator: BaseContextConfigGenerator generator: BaseConfigGenerator
if rendered: if rendered:
generator = ContextConfigGenerator(self.root_project) generator = RenderedConfigGenerator(self.root_project)
else: else:
generator = UnrenderedConfigGenerator(self.root_project) generator = UnrenderedConfigGenerator(self.root_project)
@@ -624,12 +620,11 @@ class SemanticModelParser(YamlReader):
# first apply semantic model configs # first apply semantic model configs
precedence_configs.update(target.config) precedence_configs.update(target.config)
config = generator.calculate_node_config( config = generator.generate_node_config(
config_call_dict={}, config_call_dict={},
fqn=fqn, fqn=fqn,
resource_type=NodeType.SemanticModel, resource_type=NodeType.SemanticModel,
project_name=package_name, project_name=package_name,
base=False,
patch_config_dict=precedence_configs, patch_config_dict=precedence_configs,
) )
@@ -647,6 +642,7 @@ class SemanticModelParser(YamlReader):
measures = self._get_measures(unparsed.measures) measures = self._get_measures(unparsed.measures)
dimensions = self._get_dimensions(unparsed.dimensions) dimensions = self._get_dimensions(unparsed.dimensions)
# Also validates
config = self._generate_semantic_model_config( config = self._generate_semantic_model_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,
@@ -745,9 +741,9 @@ class SavedQueryParser(YamlReader):
def _generate_saved_query_config( def _generate_saved_query_config(
self, target: UnparsedSavedQuery, fqn: List[str], package_name: str, rendered: bool self, target: UnparsedSavedQuery, fqn: List[str], package_name: str, rendered: bool
): ):
generator: BaseContextConfigGenerator generator: BaseConfigGenerator
if rendered: if rendered:
generator = ContextConfigGenerator(self.root_project) generator = RenderedConfigGenerator(self.root_project)
else: else:
generator = UnrenderedConfigGenerator(self.root_project) generator = UnrenderedConfigGenerator(self.root_project)
@@ -756,12 +752,11 @@ class SavedQueryParser(YamlReader):
# first apply semantic model configs # first apply semantic model configs
precedence_configs.update(target.config) precedence_configs.update(target.config)
config = generator.calculate_node_config( config = generator.generate_node_config(
config_call_dict={}, config_call_dict={},
fqn=fqn, fqn=fqn,
resource_type=NodeType.SavedQuery, resource_type=NodeType.SavedQuery,
project_name=package_name, project_name=package_name,
base=False,
patch_config_dict=precedence_configs, patch_config_dict=precedence_configs,
) )
@@ -805,6 +800,7 @@ class SavedQueryParser(YamlReader):
fqn = self.schema_parser.get_fqn_prefix(path) fqn = self.schema_parser.get_fqn_prefix(path)
fqn.append(unparsed.name) fqn.append(unparsed.name)
# Also validates
config = self._generate_saved_query_config( config = self._generate_saved_query_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,
@@ -812,8 +808,6 @@ class SavedQueryParser(YamlReader):
rendered=True, rendered=True,
) )
config = config.finalize_and_validate()
unrendered_config = self._generate_saved_query_config( unrendered_config = self._generate_saved_query_config(
target=unparsed, target=unparsed,
fqn=fqn, fqn=fqn,

View File

@@ -16,7 +16,7 @@ from dbt.clients.jinja_static import statically_parse_ref_or_source
from dbt.clients.yaml_helper import load_yaml_text from dbt.clients.yaml_helper import load_yaml_text
from dbt.config import RuntimeConfig from dbt.config import RuntimeConfig
from dbt.context.configured import SchemaYamlVars, generate_schema_yml_context from dbt.context.configured import SchemaYamlVars, generate_schema_yml_context
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.contracts.files import SchemaSourceFile, SourceFile from dbt.contracts.files import SchemaSourceFile, SourceFile
from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.nodes import ( from dbt.contracts.graph.nodes import (
@@ -304,7 +304,7 @@ class SchemaParser(SimpleParser[YamlBlock, ModelNode]):
snapshot_node = parser._create_parsetime_node( snapshot_node = parser._create_parsetime_node(
block, block,
compiled_path, compiled_path,
parser.initial_config(fqn), parser.initial_config_builder(fqn),
fqn, fqn,
snapshot["name"], snapshot["name"],
) )
@@ -699,20 +699,20 @@ class PatchParser(YamlReader, Generic[NonSourceTarget, Parsed]):
unique_id=node.unique_id, unique_id=node.unique_id,
field_value=patch.config["access"], field_value=patch.config["access"],
) )
# Get the ContextConfig that's used in calculating the config # Get the ConfigBuilder that's used in calculating the config
# This must match the model resource_type that's being patched # This must match the model resource_type that's being patched
config = ContextConfig( config_builder = ConfigBuilder(
self.schema_parser.root_project, self.schema_parser.root_project,
node.fqn, node.fqn,
node.resource_type, node.resource_type,
self.schema_parser.project.project_name, self.schema_parser.project.project_name,
) )
# We need to re-apply the config_call_dict after the patch config # We need to re-apply the config_call_dict after the patch config
config._config_call_dict = node.config_call_dict config_builder._config_call_dict = node.config_call_dict
config._unrendered_config_call_dict = node.unrendered_config_call_dict config_builder._unrendered_config_call_dict = node.unrendered_config_call_dict
self.schema_parser.update_parsed_node_config( self.schema_parser.update_parsed_node_config(
node, node,
config, config_builder,
patch_config_dict=patch.config, patch_config_dict=patch.config,
patch_file_id=patch.file_id, patch_file_id=patch.file_id,
) )

View File

@@ -1,4 +1,4 @@
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.contracts.graph.nodes import SeedNode from dbt.contracts.graph.nodes import SeedNode
from dbt.node_types import NodeType from dbt.node_types import NodeType
from dbt.parser.base import SimpleSQLParser from dbt.parser.base import SimpleSQLParser
@@ -24,5 +24,5 @@ class SeedParser(SimpleSQLParser[SeedNode]):
def get_compiled_path(cls, block: FileBlock): def get_compiled_path(cls, block: FileBlock):
return block.path.relative_path return block.path.relative_path
def render_with_context(self, parsed_node: SeedNode, config: ContextConfig) -> None: def render_with_context(self, parsed_node: SeedNode, config_builder: ConfigBuilder) -> None:
"""Seeds don't need to do any rendering.""" """Seeds don't need to do any rendering."""

View File

@@ -8,8 +8,8 @@ from dbt.adapters.factory import get_adapter
from dbt.artifacts.resources import FreshnessThreshold, SourceConfig, Time from dbt.artifacts.resources import FreshnessThreshold, SourceConfig, Time
from dbt.config import RuntimeConfig from dbt.config import RuntimeConfig
from dbt.context.context_config import ( from dbt.context.context_config import (
BaseContextConfigGenerator, BaseConfigGenerator,
ContextConfigGenerator, RenderedConfigGenerator,
UnrenderedConfigGenerator, UnrenderedConfigGenerator,
) )
from dbt.contracts.graph.manifest import Manifest, SourceKey from dbt.contracts.graph.manifest import Manifest, SourceKey
@@ -164,13 +164,12 @@ class SourcePatcher:
# make sure we don't do duplicate tags from source + table # make sure we don't do duplicate tags from source + table
tags = sorted(set(itertools.chain(source.tags, table.tags))) tags = sorted(set(itertools.chain(source.tags, table.tags)))
# This will also validate
config = self._generate_source_config( config = self._generate_source_config(
target=target, target=target,
rendered=True, rendered=True,
) )
config = config.finalize_and_validate()
unrendered_config = self._generate_source_config( unrendered_config = self._generate_source_config(
target=target, target=target,
rendered=False, rendered=False,
@@ -306,9 +305,9 @@ class SourcePatcher:
return node return node
def _generate_source_config(self, target: UnpatchedSourceDefinition, rendered: bool): def _generate_source_config(self, target: UnpatchedSourceDefinition, rendered: bool):
generator: BaseContextConfigGenerator generator: BaseConfigGenerator
if rendered: if rendered:
generator = ContextConfigGenerator(self.root_project) generator = RenderedConfigGenerator(self.root_project)
else: else:
generator = UnrenderedConfigGenerator(self.root_project) generator = UnrenderedConfigGenerator(self.root_project)
@@ -321,12 +320,11 @@ class SourcePatcher:
# it works while source configs can only include `enabled`. # it works while source configs can only include `enabled`.
precedence_configs.update(target.table.config) precedence_configs.update(target.table.config)
return generator.calculate_node_config( return generator.generate_node_config(
config_call_dict={}, config_call_dict={},
fqn=target.fqn, fqn=target.fqn,
resource_type=NodeType.Source, resource_type=NodeType.Source,
project_name=target.package_name, project_name=target.package_name,
base=False,
patch_config_dict=precedence_configs, patch_config_dict=precedence_configs,
) )

View File

@@ -9,7 +9,7 @@ from typing import Any, Dict, List, Optional, Set
from dbt import utils from dbt import utils
from dbt.artifacts.resources import ModelConfig, UnitTestConfig, UnitTestFormat from dbt.artifacts.resources import ModelConfig, UnitTestConfig, UnitTestFormat
from dbt.config import RuntimeConfig from dbt.config import RuntimeConfig
from dbt.context.context_config import ContextConfig from dbt.context.context_config import ConfigBuilder
from dbt.context.providers import generate_parse_exposure, get_rendered from dbt.context.providers import generate_parse_exposure, get_rendered
from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.files import FileHash, SchemaSourceFile
from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.manifest import Manifest
@@ -314,13 +314,15 @@ class UnitTestParser(YamlReader):
def _build_unit_test_config( def _build_unit_test_config(
self, unit_test_fqn: List[str], config_dict: Dict[str, Any] self, unit_test_fqn: List[str], config_dict: Dict[str, Any]
) -> UnitTestConfig: ) -> UnitTestConfig:
config = ContextConfig( config_builder = ConfigBuilder(
self.schema_parser.root_project, self.schema_parser.root_project,
unit_test_fqn, unit_test_fqn,
NodeType.Unit, NodeType.Unit,
self.schema_parser.project.project_name, self.schema_parser.project.project_name,
) )
unit_test_config_dict = config.build_config_dict(patch_config_dict=config_dict) unit_test_config_dict = config_builder.build_config_dict(
rendered=True, patch_config_dict=config_dict
)
unit_test_config_dict = self.render_entry(unit_test_config_dict) unit_test_config_dict = self.render_entry(unit_test_config_dict)
return UnitTestConfig.from_dict(unit_test_config_dict) return UnitTestConfig.from_dict(unit_test_config_dict)

View File

@@ -51,7 +51,7 @@ setup(
# Pin to the patch or minor version, and bump in each new minor version of dbt-core. # Pin to the patch or minor version, and bump in each new minor version of dbt-core.
"agate>=1.7.0,<1.10", "agate>=1.7.0,<1.10",
"Jinja2>=3.1.3,<4", "Jinja2>=3.1.3,<4",
"mashumaro[msgpack]>=3.9,<3.15", "mashumaro[msgpack]>=3.15,<4.0",
# ---- # ----
# dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility
# with major versions in each new minor version of dbt-core. # with major versions in each new minor version of dbt-core.

View File

@@ -1,6 +1,6 @@
git+https://github.com/dbt-labs/dbt-adapters.git@main git+https://github.com/dbt-labs/dbt-adapters.git@mashumaro_fixes
git+https://github.com/dbt-labs/dbt-adapters.git@main#subdirectory=dbt-tests-adapter git+https://github.com/dbt-labs/dbt-adapters.git@main#subdirectory=dbt-tests-adapter
git+https://github.com/dbt-labs/dbt-common.git@main git+https://github.com/dbt-labs/dbt-common.git@mashumaro_fixes
git+https://github.com/dbt-labs/dbt-postgres.git@main git+https://github.com/dbt-labs/dbt-postgres.git@main
# black must match what's in .pre-commit-config.yaml to be sure local env matches CI # black must match what's in .pre-commit-config.yaml to be sure local env matches CI
black==24.3.0 black==24.3.0

View File

@@ -1802,11 +1802,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -2456,11 +2459,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -3250,11 +3256,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -4063,11 +4072,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -4292,11 +4304,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -4306,11 +4321,14 @@
"latest_version": { "latest_version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -5471,11 +5489,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -6125,11 +6146,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -7114,11 +7138,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -7968,12 +7995,6 @@
}, },
"partitions": { "partitions": {
"anyOf": [ "anyOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{ {
"type": "array", "type": "array",
"items": { "items": {
@@ -8008,6 +8029,12 @@
"additionalProperties": true "additionalProperties": true
} }
}, },
{
"type": "array",
"items": {
"type": "string"
}
},
{ {
"type": "null" "type": "null"
} }
@@ -8684,11 +8711,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -9802,11 +9832,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -11651,11 +11684,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -12305,11 +12341,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -13099,11 +13138,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -13912,11 +13954,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -14141,11 +14186,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -14155,11 +14203,14 @@
"latest_version": { "latest_version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -15320,11 +15371,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -15974,11 +16028,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -16963,11 +17020,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -17808,12 +17868,6 @@
}, },
"partitions": { "partitions": {
"anyOf": [ "anyOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{ {
"type": "array", "type": "array",
"items": { "items": {
@@ -17848,6 +17902,12 @@
"additionalProperties": true "additionalProperties": true
} }
}, },
{
"type": "array",
"items": {
"type": "string"
}
},
{ {
"type": "null" "type": "null"
} }
@@ -18322,11 +18382,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -19433,11 +19496,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -19868,11 +19934,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -20595,11 +20664,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -20973,10 +21045,13 @@
"items": { "items": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
},
{
"type": "string"
} }
] ]
} }
@@ -20994,10 +21069,13 @@
"items": { "items": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
},
{
"type": "string"
} }
] ]
} }
@@ -21020,11 +21098,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -21499,11 +21580,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -22233,11 +22317,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }
@@ -22618,10 +22705,13 @@
"items": { "items": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
},
{
"type": "string"
} }
] ]
} }
@@ -22639,10 +22729,13 @@
"items": { "items": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
},
{
"type": "string"
} }
] ]
} }
@@ -22665,11 +22758,14 @@
"version": { "version": {
"anyOf": [ "anyOf": [
{ {
"type": "string" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{
"type": "string"
},
{ {
"type": "null" "type": "null"
} }

View File

@@ -0,0 +1,48 @@
import pytest
from dbt.tests.util import run_dbt
sources_yml = """
sources:
- name: TEST
schema: STAGE
tables:
- name: TABLE
external:
partitions:
- name: dl_partition
data_type: string
expression: split_part(METADATA$FILENAME, '/', 2)
"""
get_partitions_sql = """
{% macro get_partitions() -%}
{% set source_nodes = graph.sources.values() if graph.sources else [] %}
{% for node in source_nodes %}
{% if node.external %}
{% if node.external.partitions %}
{{print(node.external.partitions)}}
{% endif %}
{% endif %}
{% endfor %}
{%- endmacro %}
"""
class TestGraphSerialization:
@pytest.fixture(scope="class")
def models(self):
return {
"sources.yml": sources_yml,
}
@pytest.fixture(scope="class")
def macros(self):
return {"get_partitions.sql": get_partitions_sql}
def test_graph_serialization(self, project):
manifest = run_dbt(["parse"])
assert manifest
assert len(manifest.sources) == 1
run_dbt(["run-operation", "get_partitions"])

View File

@@ -0,0 +1,49 @@
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
from dbt_common.dataclass_schema import dbtClassMixin
@dataclass
class ExternalPartition(dbtClassMixin):
name: str = ""
description: str = ""
data_type: str = ""
meta: Dict[str, Any] = field(default_factory=dict)
@dataclass
class ExternalTable(dbtClassMixin):
location: Optional[str] = None
file_format: Optional[str] = None
row_format: Optional[str] = None
tbl_properties: Optional[str] = None
partitions: Optional[Union[List[ExternalPartition], List[str]]] = None
def test_partitions_serialization():
part1 = ExternalPartition(
name="partition 1",
description="partition 1",
data_type="string",
)
part2 = ExternalPartition(
name="partition 2",
description="partition 2",
data_type="string",
)
ext_table = ExternalTable(
location="my_location",
file_format="my file format",
row_format="row format",
partitions=[part1, part2],
)
ext_table_dict = ext_table.to_dict()
assert isinstance(ext_table_dict["partitions"][0], dict)
ext_table_msgpack = ext_table.to_msgpack()
assert ext_table_msgpack

View File

@@ -1,8 +1,8 @@
import pytest import pytest
from dbt.artifacts.resources import ExposureConfig from dbt.artifacts.resources import ExposureConfig
from dbt.exceptions import SchemaConfigError
from dbt.tests.util import get_manifest, run_dbt, update_config_file from dbt.tests.util import get_manifest, run_dbt, update_config_file
from dbt_common.dataclass_schema import ValidationError
from tests.functional.exposures.fixtures import ( from tests.functional.exposures.fixtures import (
disabled_models_exposure_yml, disabled_models_exposure_yml,
enabled_yaml_level_exposure_yml, enabled_yaml_level_exposure_yml,
@@ -126,7 +126,7 @@ class TestInvalidConfig(ExposureConfigTests):
} }
def test_exposure_config_yaml_level(self, project): def test_exposure_config_yaml_level(self, project):
with pytest.raises(ValidationError) as excinfo: with pytest.raises(SchemaConfigError) as excinfo:
run_dbt(["parse"]) run_dbt(["parse"])
expected_msg = "'True and False' is not of type 'boolean'" expected_msg = "'True and False' is not of type 'boolean'"
assert expected_msg in str(excinfo.value) assert expected_msg in str(excinfo.value)

View File

@@ -1,9 +1,8 @@
import pytest import pytest
from dbt.artifacts.resources import MetricConfig from dbt.artifacts.resources import MetricConfig
from dbt.exceptions import CompilationError, ParsingError from dbt.exceptions import CompilationError, ParsingError, SchemaConfigError
from dbt.tests.util import get_manifest, run_dbt, update_config_file from dbt.tests.util import get_manifest, run_dbt, update_config_file
from dbt_common.dataclass_schema import ValidationError
from tests.functional.metrics.fixtures import ( from tests.functional.metrics.fixtures import (
disabled_metric_level_schema_yml, disabled_metric_level_schema_yml,
enabled_metric_level_schema_yml, enabled_metric_level_schema_yml,
@@ -170,7 +169,7 @@ class TestInvalidMetric(MetricConfigTests):
} }
def test_invalid_config_metric(self, project): def test_invalid_config_metric(self, project):
with pytest.raises(ValidationError) as excinfo: with pytest.raises(SchemaConfigError) as excinfo:
run_dbt(["parse"]) run_dbt(["parse"])
expected_msg = "'True and False' is not of type 'boolean'" expected_msg = "'True and False' is not of type 'boolean'"
assert expected_msg in str(excinfo.value) assert expected_msg in str(excinfo.value)

View File

@@ -1,8 +1,8 @@
import pytest import pytest
from dbt.artifacts.resources import SourceConfig from dbt.artifacts.resources import SourceConfig
from dbt.exceptions import SchemaConfigError
from dbt.tests.util import get_manifest, run_dbt, update_config_file from dbt.tests.util import get_manifest, run_dbt, update_config_file
from dbt_common.dataclass_schema import ValidationError
from tests.functional.sources.fixtures import ( from tests.functional.sources.fixtures import (
all_configs_everywhere_schema_yml, all_configs_everywhere_schema_yml,
all_configs_not_table_schema_yml, all_configs_not_table_schema_yml,
@@ -175,7 +175,7 @@ class TestInvalidSourceConfig(SourceConfigTests):
} }
def test_invalid_config_source(self, project): def test_invalid_config_source(self, project):
with pytest.raises(ValidationError) as excinfo: with pytest.raises(SchemaConfigError) as excinfo:
run_dbt(["parse"]) run_dbt(["parse"])
expected_msg = "'True and False' is not of type 'boolean'" expected_msg = "'True and False' is not of type 'boolean'"
assert expected_msg in str(excinfo.value) assert expected_msg in str(excinfo.value)

View File

@@ -473,7 +473,7 @@ def test_model_parse_context(config_postgres, manifest_fx, get_adapter, get_incl
model=mock_model(), model=mock_model(),
config=config_postgres, config=config_postgres,
manifest=manifest_fx, manifest=manifest_fx,
context_config=mock.MagicMock(), config_builder=mock.MagicMock(),
) )
assert_has_keys(REQUIRED_MODEL_KEYS, MAYBE_KEYS, ctx) assert_has_keys(REQUIRED_MODEL_KEYS, MAYBE_KEYS, ctx)

View File

@@ -8,11 +8,11 @@ import yaml
from dbt import tracking from dbt import tracking
from dbt.artifacts.resources import ModelConfig, RefArgs from dbt.artifacts.resources import ModelConfig, RefArgs
from dbt.context.context_config import ConfigBuilder
from dbt.artifacts.resources.v1.model import ( from dbt.artifacts.resources.v1.model import (
ModelBuildAfter, ModelBuildAfter,
ModelFreshnessDependsOnOptions, ModelFreshnessDependsOnOptions,
) )
from dbt.context.context_config import ContextConfig
from dbt.contracts.files import FileHash, FilePath, SchemaSourceFile, SourceFile from dbt.contracts.files import FileHash, FilePath, SchemaSourceFile, SourceFile
from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.model_config import NodeConfig, SnapshotConfig, TestConfig from dbt.contracts.graph.model_config import NodeConfig, SnapshotConfig, TestConfig
@@ -1425,7 +1425,7 @@ class StaticModelParserUnitTest(BaseParserTest):
checksum=None, checksum=None,
unrendered_config={"materialized": "table"}, unrendered_config={"materialized": "table"},
) )
self.example_config = ContextConfig( self.example_config_builder = ConfigBuilder(
self.root_project_config, self.root_project_config,
self.example_node.fqn, self.example_node.fqn,
self.example_node.resource_type, self.example_node.resource_type,
@@ -1455,90 +1455,92 @@ class StaticModelParserUnitTest(BaseParserTest):
def test_sample_results(self): def test_sample_results(self):
# --- missed ref --- # # --- missed ref --- #
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
sample_node.refs = [] sample_node.refs = []
node.refs = ["myref"] node.refs = ["myref"]
result = _get_sample_result(sample_node, sample_config, node, config) result = _get_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual([(7, "missed_ref_value")], result) self.assertEqual([(7, "missed_ref_value")], result)
# --- false positive ref --- # # --- false positive ref --- #
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
sample_node.refs = ["myref"] sample_node.refs = ["myref"]
node.refs = [] node.refs = []
result = _get_sample_result(sample_node, sample_config, node, config) result = _get_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual([(6, "false_positive_ref_value")], result) self.assertEqual([(6, "false_positive_ref_value")], result)
# --- missed source --- # # --- missed source --- #
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
sample_node.sources = [] sample_node.sources = []
node.sources = [["abc", "def"]] node.sources = [["abc", "def"]]
result = _get_sample_result(sample_node, sample_config, node, config) result = _get_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual([(5, "missed_source_value")], result) self.assertEqual([(5, "missed_source_value")], result)
# --- false positive source --- # # --- false positive source --- #
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
sample_node.sources = [["abc", "def"]] sample_node.sources = [["abc", "def"]]
node.sources = [] node.sources = []
result = _get_sample_result(sample_node, sample_config, node, config) result = _get_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual([(4, "false_positive_source_value")], result) self.assertEqual([(4, "false_positive_source_value")], result)
# --- missed config --- # # --- missed config --- #
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
sample_config._config_call_dict = {} sample_config_builder._config_call_dict = {}
config._config_call_dict = {"key": "value"} config_builder._config_call_dict = {"key": "value"}
result = _get_sample_result(sample_node, sample_config, node, config) result = _get_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual([(3, "missed_config_value")], result) self.assertEqual([(3, "missed_config_value")], result)
# --- false positive config --- # # --- false positive config --- #
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
sample_config._config_call_dict = {"key": "value"} sample_config_builder._config_call_dict = {"key": "value"}
config._config_call_dict = {} config_builder._config_call_dict = {}
result = _get_sample_result(sample_node, sample_config, node, config) result = _get_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual([(2, "false_positive_config_value")], result) self.assertEqual([(2, "false_positive_config_value")], result)
def test_exp_sample_results(self): def test_exp_sample_results(self):
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
result = _get_exp_sample_result(sample_node, sample_config, node, config) result = _get_exp_sample_result(sample_node, sample_config_builder, node, config_builder)
self.assertEqual(["00_experimental_exact_match"], result) self.assertEqual(["00_experimental_exact_match"], result)
def test_stable_sample_results(self): def test_stable_sample_results(self):
node = deepcopy(self.example_node) node = deepcopy(self.example_node)
config = deepcopy(self.example_config) config_builder = deepcopy(self.example_config_builder)
sample_node = deepcopy(self.example_node) sample_node = deepcopy(self.example_node)
sample_config = deepcopy(self.example_config) sample_config_builder = deepcopy(self.example_config_builder)
result = _get_stable_sample_result(sample_node, sample_config, node, config) result = _get_stable_sample_result(
sample_node, sample_config_builder, node, config_builder
)
self.assertEqual(["80_stable_exact_match"], result) self.assertEqual(["80_stable_exact_match"], result)

View File

@@ -470,7 +470,7 @@ def make_semantic_model(
return SemanticModel( return SemanticModel(
name=name, name=name,
resource_type=NodeType.SemanticModel, resource_type=NodeType.SemanticModel,
model=model, model=model.name,
node_relation=NodeRelation( node_relation=NodeRelation(
alias=model.alias, alias=model.alias,
schema_name="dbt", schema_name="dbt",