Compare commits

..

16 Commits

Author SHA1 Message Date
Jeremy Cohen
5cce911842 Ongoing experiment 2023-01-29 21:32:19 +01:00
lostmygithubaccount
158aa81b0c update per suggestions 2022-11-23 09:06:33 -08:00
lostmygithubaccount
5ddb088049 Merge remote-tracking branch 'origin/main' into cody/ibis 2022-11-22 21:54:05 -08:00
lostmygithubaccount
3edc9e53ad initial implementation based on prql pr 2022-11-20 17:55:34 -08:00
Maximilian Roos
e0c32f425d Merge branch 'main' into prql 2022-10-11 11:18:13 -07:00
Maximilian Roos
90223ed279 Merge branch 'main' into prql 2022-10-06 13:00:37 -07:00
Maximilian Roos
472940423c Remove unused PrqlNode & friends 2022-10-05 18:35:06 -07:00
Maximilian Roos
dddb0bff5a Merge branch 'main' into prql 2022-10-05 18:02:20 -07:00
Maximilian Roos
bc8b65095e Add language on error nodes 2022-10-05 11:34:15 -07:00
Maximilian Roos
86eb68f40d Add test to test_graph.py 2022-10-05 11:34:15 -07:00
Maximilian Roos
8eece383ea flake 2022-10-05 11:34:15 -07:00
Maximilian Roos
c9572c3106 Always use the mock method to align the snapshot tests 2022-10-05 11:34:15 -07:00
Maximilian Roos
ebff2ceb72 Revert to importing builtins from typing 2022-10-05 11:34:15 -07:00
Maximilian Roos
5a8fd1e90d Ignore types in the import hacks
(tests still fail b/c typing_extensions is not installed)
2022-10-05 11:34:15 -07:00
Maximilian Roos
fa3f17200f Add a mock return from prql_python 2022-10-05 11:34:15 -07:00
Maximilian Roos
506f2c939a A very-WIP implementation of the PRQL parser 2022-10-05 11:34:08 -07:00
149 changed files with 1606 additions and 19285 deletions

View File

@@ -1,7 +0,0 @@
kind: "Dependency"
body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core"
time: 2022-10-20T00:07:53.00000Z
custom:
Author: dependabot[bot]
Issue: 4904
PR: 6108

View File

@@ -1,7 +0,0 @@
kind: Features
body: Added an md5 function to the base context
time: 2022-11-14T18:52:07.788593+02:00
custom:
Author: haritamar
Issue: "6246"
PR: "6247"

View File

@@ -1,7 +0,0 @@
kind: Features
body: Exposures support metrics in lineage
time: 2022-11-30T11:29:13.256034-05:00
custom:
Author: michelleark
Issue: "6057"
PR: "6342"

View File

@@ -1,7 +0,0 @@
kind: Fixes
body: Clarify Error Message for how many models are allowed in a Python file
time: 2022-11-15T08:10:21.527884-05:00
custom:
Author: justbldwn
Issue: "6245"
PR: "6251"

View File

@@ -1,7 +0,0 @@
kind: Under the Hood
body: Add github actions workflow to generate high level CLI API docs
time: 2022-11-16T13:00:37.916202-06:00
custom:
Author: stu-k
Issue: "5942"
PR: "6187"

View File

@@ -1,166 +0,0 @@
# **what?**
# On push, if anything in core/dbt/docs or core/dbt/cli has been
# created or modified, regenerate the CLI API docs using sphinx.
# **why?**
# We watch for changes in core/dbt/cli because the CLI API docs rely on click
# and all supporting flags/params to be generated. We watch for changes in
# core/dbt/docs since any changes to sphinx configuration or any of the
# .rst files there could result in a differently build final index.html file.
# **when?**
# Whenever a change has been pushed to a branch, and only if there is a diff
# between the PR branch and main's core/dbt/cli and or core/dbt/docs dirs.
# TODO: add bot comment to PR informing contributor that the docs have been committed
# TODO: figure out why github action triggered pushes cause github to fail to report
# the status of jobs
name: Generate CLI API docs
on:
pull_request:
permissions:
contents: write
pull-requests: write
env:
CLI_DIR: ${{ github.workspace }}/core/dbt/cli
DOCS_DIR: ${{ github.workspace }}/core/dbt/docs
DOCS_BUILD_DIR: ${{ github.workspace }}/core/dbt/docs/build
jobs:
check_gen:
name: check if generation needed
runs-on: ubuntu-latest
outputs:
cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }}
docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }}
steps:
- name: "[DEBUG] print variables"
run: |
echo "env.CLI_DIR: ${{ env.CLI_DIR }}"
echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}"
echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}"
echo ">>>>> git log"
git log --pretty=oneline | head -5
- name: git checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.head_ref }}
- name: set shas
id: set_shas
run: |
THIS_SHA=$(git rev-parse @)
LAST_SHA=$(git rev-parse @~1)
echo "this sha: $THIS_SHA"
echo "last sha: $LAST_SHA"
echo "this_sha=$THIS_SHA" >> $GITHUB_OUTPUT
echo "last_sha=$LAST_SHA" >> $GITHUB_OUTPUT
- name: check for changes in core/dbt/cli
id: check_cli
run: |
CLI_DIR_CHANGES=$(git diff \
${{ steps.set_shas.outputs.last_sha }} \
${{ steps.set_shas.outputs.this_sha }} \
-- ${{ env.CLI_DIR }})
if [ -n "$CLI_DIR_CHANGES" ]; then
echo "changes found"
echo $CLI_DIR_CHANGES
echo "cli_dir_changed=true" >> $GITHUB_OUTPUT
exit 0
fi
echo "cli_dir_changed=false" >> $GITHUB_OUTPUT
echo "no changes found"
- name: check for changes in core/dbt/docs
id: check_docs
if: steps.check_cli.outputs.cli_dir_changed == 'false'
run: |
DOCS_DIR_CHANGES=$(git diff --name-only \
${{ steps.set_shas.outputs.last_sha }} \
${{ steps.set_shas.outputs.this_sha }} \
-- ${{ env.DOCS_DIR }} ':!${{ env.DOCS_BUILD_DIR }}')
DOCS_BUILD_DIR_CHANGES=$(git diff --name-only \
${{ steps.set_shas.outputs.last_sha }} \
${{ steps.set_shas.outputs.this_sha }} \
-- ${{ env.DOCS_BUILD_DIR }})
if [ -n "$DOCS_DIR_CHANGES" ] && [ -z "$DOCS_BUILD_DIR_CHANGES" ]; then
echo "changes found"
echo $DOCS_DIR_CHANGES
echo "docs_dir_changed=true" >> $GITHUB_OUTPUT
exit 0
fi
echo "docs_dir_changed=false" >> $GITHUB_OUTPUT
echo "no changes found"
gen_docs:
name: generate docs
runs-on: ubuntu-latest
needs: [check_gen]
if: |
needs.check_gen.outputs.cli_dir_changed == 'true'
|| needs.check_gen.outputs.docs_dir_changed == 'true'
steps:
- name: "[DEBUG] print variables"
run: |
echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}"
echo "github head_ref: ${{ github.head_ref }}"
- name: git checkout
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: install python
uses: actions/setup-python@v4.3.0
with:
python-version: 3.8
- name: install dev requirements
run: |
python3 -m venv env
source env/bin/activate
python -m pip install --upgrade pip
pip install -r requirements.txt -r dev-requirements.txt
- name: generate docs
run: |
source env/bin/activate
cd ${{ env.DOCS_DIR }}
echo "cleaning existing docs"
make clean
echo "creating docs"
make html
- name: debug
run: |
echo ">>>>> status"
git status
echo ">>>>> remotes"
git remote -v
echo ">>>>> branch"
git branch -v
echo ">>>>> log"
git log --pretty=oneline | head -5
- name: commit docs
run: |
git config user.name 'Github Build Bot'
git config user.email 'buildbot@fishtownanalytics.com'
git commit -am "Add generated CLI API docs"
git push -u origin ${{ github.head_ref }}

View File

@@ -119,7 +119,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"] python-version: ["3.7", "3.8", "3.9", "3.10"]
os: [ubuntu-20.04] os: [ubuntu-latest]
include: include:
- python-version: 3.8 - python-version: 3.8
os: windows-latest os: windows-latest

View File

@@ -9,4 +9,13 @@ permissions:
jobs: jobs:
stale: stale:
uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main runs-on: ubuntu-latest
steps:
# pinned at v4 (https://github.com/actions/stale/releases/tag/v4.0.0)
- uses: actions/stale@cdf15f641adb27a71842045a94023bef6945e3aa
with:
stale-issue-message: "This issue has been marked as Stale because it has been open for 180 days with no activity. If you would like the issue to remain open, please remove the stale label or comment on the issue, or it will be closed in 7 days."
stale-pr-message: "This PR has been marked as Stale because it has been open for 180 days with no activity. If you would like the PR to remain open, please remove the stale label or comment on the PR, or it will be closed in 7 days."
close-issue-message: "Although we are closing this issue as stale, it's not gone forever. Issues can be reopened if there is renewed community interest; add a comment to notify the maintainers."
# mark issues/PRs stale when they haven't seen activity in 180 days
days-before-stale: 180

View File

@@ -22,7 +22,7 @@ jobs:
# run the performance measurements on the current or default branch # run the performance measurements on the current or default branch
test-schema: test-schema:
name: Test Log Schema name: Test Log Schema
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
env: env:
# turns warnings into errors # turns warnings into errors
RUSTFLAGS: "-D warnings" RUSTFLAGS: "-D warnings"

1
.gitignore vendored
View File

@@ -11,7 +11,6 @@ __pycache__/
env*/ env*/
dbt_env/ dbt_env/
build/ build/
!core/dbt/docs/build
develop-eggs/ develop-eggs/
dist/ dist/
downloads/ downloads/

View File

@@ -2,7 +2,7 @@
# Eventually the hooks described here will be run as tests before merging each PR. # Eventually the hooks described here will be run as tests before merging each PR.
# TODO: remove global exclusion of tests when testing overhaul is complete # TODO: remove global exclusion of tests when testing overhaul is complete
exclude: ^(test/|core/dbt/docs/build/) exclude: ^test/
# Force all unspecified python hooks to run python 3.8 # Force all unspecified python hooks to run python 3.8
default_language_version: default_language_version:

View File

@@ -29,10 +29,12 @@ from dbt.exceptions import (
from dbt.graph import Graph from dbt.graph import Graph
from dbt.events.functions import fire_event from dbt.events.functions import fire_event
from dbt.events.types import FoundStats, CompilingNode, WritingInjectedSQLForNode from dbt.events.types import FoundStats, CompilingNode, WritingInjectedSQLForNode
from dbt.node_types import NodeType, ModelLanguage from dbt.node_types import NodeType
from dbt.events.format import pluralize from dbt.events.format import pluralize
import dbt.tracking import dbt.tracking
from dbt.parser.languages import get_language_provider_by_name
graph_file_name = "graph.gpickle" graph_file_name = "graph.gpickle"
@@ -56,7 +58,6 @@ def print_compile_stats(stats):
NodeType.Source: "source", NodeType.Source: "source",
NodeType.Exposure: "exposure", NodeType.Exposure: "exposure",
NodeType.Metric: "metric", NodeType.Metric: "metric",
NodeType.Entity: "entity",
} }
results = {k: 0 for k in names.keys()} results = {k: 0 for k in names.keys()}
@@ -92,8 +93,6 @@ def _generate_stats(manifest: Manifest):
stats[exposure.resource_type] += 1 stats[exposure.resource_type] += 1
for metric in manifest.metrics.values(): for metric in manifest.metrics.values():
stats[metric.resource_type] += 1 stats[metric.resource_type] += 1
for entity in manifest.entities.values():
stats[entity.resource_type] += 1
for macro in manifest.macros.values(): for macro in manifest.macros.values():
stats[macro.resource_type] += 1 stats[macro.resource_type] += 1
return stats return stats
@@ -366,42 +365,19 @@ class Compiler:
{ {
"compiled": False, "compiled": False,
"compiled_code": None, "compiled_code": None,
"compiled_language": None,
"extra_ctes_injected": False, "extra_ctes_injected": False,
"extra_ctes": [], "extra_ctes": [],
} }
) )
compiled_node = _compiled_type_for(node).from_dict(data) compiled_node = _compiled_type_for(node).from_dict(data)
if compiled_node.language == ModelLanguage.python: context = self._create_node_context(compiled_node, manifest, extra_context)
# TODO could we also 'minify' this code at all? just aesthetic, not functional provider = get_language_provider_by_name(node.language)
# quoating seems like something very specific to sql so far
# for all python implementations we are seeing there's no quating.
# TODO try to find better way to do this, given that
original_quoting = self.config.quoting
self.config.quoting = {key: False for key in original_quoting.keys()}
context = self._create_node_context(compiled_node, manifest, extra_context)
postfix = jinja.get_rendered(
"{{ py_script_postfix(model) }}",
context,
node,
)
# we should NOT jinja render the python model's 'raw code'
compiled_node.compiled_code = f"{node.raw_code}\n\n{postfix}"
# restore quoting settings in the end since context is lazy evaluated
self.config.quoting = original_quoting
else:
context = self._create_node_context(compiled_node, manifest, extra_context)
compiled_node.compiled_code = jinja.get_rendered(
node.raw_code,
context,
node,
)
compiled_node.compiled_code = provider.get_compiled_code(node, context)
compiled_node.relation_name = self._get_relation_name(node) compiled_node.relation_name = self._get_relation_name(node)
compiled_node.compiled_language = provider.compiled_language()
compiled_node.compiled = True compiled_node.compiled = True
return compiled_node return compiled_node
@@ -517,6 +493,8 @@ class Compiler:
fire_event(WritingInjectedSQLForNode(unique_id=node.unique_id)) fire_event(WritingInjectedSQLForNode(unique_id=node.unique_id))
if node.compiled_code: if node.compiled_code:
# TODO: should compiled_path depend on the compiled_language?
# e.g. "model.prql" (source) -> "model.sql" (compiled)
node.compiled_path = node.write_node( node.compiled_path = node.write_node(
self.config.target_path, "compiled", node.compiled_code self.config.target_path, "compiled", node.compiled_code
) )

View File

@@ -381,7 +381,6 @@ class PartialProject(RenderComponents):
sources: Dict[str, Any] sources: Dict[str, Any]
tests: Dict[str, Any] tests: Dict[str, Any]
metrics: Dict[str, Any] metrics: Dict[str, Any]
entities: Dict[str, Any]
exposures: Dict[str, Any] exposures: Dict[str, Any]
vars_value: VarProvider vars_value: VarProvider
@@ -392,7 +391,6 @@ class PartialProject(RenderComponents):
sources = cfg.sources sources = cfg.sources
tests = cfg.tests tests = cfg.tests
metrics = cfg.metrics metrics = cfg.metrics
entities = cfg.entities
exposures = cfg.exposures exposures = cfg.exposures
if cfg.vars is None: if cfg.vars is None:
vars_dict: Dict[str, Any] = {} vars_dict: Dict[str, Any] = {}
@@ -448,7 +446,6 @@ class PartialProject(RenderComponents):
sources=sources, sources=sources,
tests=tests, tests=tests,
metrics=metrics, metrics=metrics,
entities=entities,
exposures=exposures, exposures=exposures,
vars=vars_value, vars=vars_value,
config_version=cfg.config_version, config_version=cfg.config_version,
@@ -553,7 +550,6 @@ class Project:
sources: Dict[str, Any] sources: Dict[str, Any]
tests: Dict[str, Any] tests: Dict[str, Any]
metrics: Dict[str, Any] metrics: Dict[str, Any]
entities: Dict[str, Any]
exposures: Dict[str, Any] exposures: Dict[str, Any]
vars: VarProvider vars: VarProvider
dbt_version: List[VersionSpecifier] dbt_version: List[VersionSpecifier]
@@ -628,7 +624,6 @@ class Project:
"sources": self.sources, "sources": self.sources,
"tests": self.tests, "tests": self.tests,
"metrics": self.metrics, "metrics": self.metrics,
"entities": self.entities,
"exposures": self.exposures, "exposures": self.exposures,
"vars": self.vars.to_dict(), "vars": self.vars.to_dict(),
"require-dbt-version": [v.to_version_string() for v in self.dbt_version], "require-dbt-version": [v.to_version_string() for v in self.dbt_version],

View File

@@ -116,7 +116,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
sources=project.sources, sources=project.sources,
tests=project.tests, tests=project.tests,
metrics=project.metrics, metrics=project.metrics,
entities=project.entities,
exposures=project.exposures, exposures=project.exposures,
vars=project.vars, vars=project.vars,
config_version=project.config_version, config_version=project.config_version,
@@ -312,7 +311,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
"sources": self._get_config_paths(self.sources), "sources": self._get_config_paths(self.sources),
"tests": self._get_config_paths(self.tests), "tests": self._get_config_paths(self.tests),
"metrics": self._get_config_paths(self.metrics), "metrics": self._get_config_paths(self.metrics),
"entities": self._get_config_paths(self.entities),
"exposures": self._get_config_paths(self.exposures), "exposures": self._get_config_paths(self.exposures),
} }
@@ -508,7 +506,6 @@ class UnsetProfileConfig(RuntimeConfig):
"sources": self.sources, "sources": self.sources,
"tests": self.tests, "tests": self.tests,
"metrics": self.metrics, "metrics": self.metrics,
"entities": self.entities,
"exposures": self.exposures, "exposures": self.exposures,
"vars": self.vars.to_dict(), "vars": self.vars.to_dict(),
"require-dbt-version": [v.to_version_string() for v in self.dbt_version], "require-dbt-version": [v.to_version_string() for v in self.dbt_version],
@@ -571,7 +568,6 @@ class UnsetProfileConfig(RuntimeConfig):
sources=project.sources, sources=project.sources,
tests=project.tests, tests=project.tests,
metrics=project.metrics, metrics=project.metrics,
entities=project.entities,
exposures=project.exposures, exposures=project.exposures,
vars=project.vars, vars=project.vars,
config_version=project.config_version, config_version=project.config_version,

View File

@@ -4,7 +4,6 @@ from typing import Any, Dict, NoReturn, Optional, Mapping, Iterable, Set, List
from dbt import flags from dbt import flags
from dbt import tracking from dbt import tracking
from dbt import utils
from dbt.clients.jinja import get_rendered from dbt.clients.jinja import get_rendered
from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401
from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER
@@ -688,19 +687,6 @@ class BaseContext(metaclass=ContextMeta):
dict_diff.update({k: dict_a[k]}) dict_diff.update({k: dict_a[k]})
return dict_diff return dict_diff
@contextmember
@staticmethod
def local_md5(value: str) -> str:
"""Calculates an MD5 hash of the given string.
It's called "local_md5" to emphasize that it runs locally in dbt (in jinja context) and not an MD5 SQL command.
:param value: The value to hash
Usage:
{% set value_hash = local_md5("hello world") %}
"""
return utils.md5(value)
def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]:
ctx = BaseContext(cli_vars) ctx = BaseContext(cli_vars)

View File

@@ -45,8 +45,6 @@ class UnrenderedConfig(ConfigSource):
model_configs = unrendered.get("tests") model_configs = unrendered.get("tests")
elif resource_type == NodeType.Metric: elif resource_type == NodeType.Metric:
model_configs = unrendered.get("metrics") model_configs = unrendered.get("metrics")
elif resource_type == NodeType.Entity:
model_configs = unrendered.get("entities")
elif resource_type == NodeType.Exposure: elif resource_type == NodeType.Exposure:
model_configs = unrendered.get("exposures") model_configs = unrendered.get("exposures")
else: else:
@@ -72,8 +70,6 @@ class RenderedConfig(ConfigSource):
model_configs = self.project.tests model_configs = self.project.tests
elif resource_type == NodeType.Metric: elif resource_type == NodeType.Metric:
model_configs = self.project.metrics model_configs = self.project.metrics
elif resource_type == NodeType.Entity:
model_configs = self.project.entities
elif resource_type == NodeType.Exposure: elif resource_type == NodeType.Exposure:
model_configs = self.project.exposures model_configs = self.project.exposures
else: else:

View File

@@ -37,7 +37,6 @@ from dbt.contracts.graph.parsed import (
ParsedMacro, ParsedMacro,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
ParsedSeedNode, ParsedSeedNode,
ParsedSourceDefinition, ParsedSourceDefinition,
) )
@@ -302,10 +301,12 @@ class BaseMetricResolver(BaseResolver):
self.validate_args(name, package) self.validate_args(name, package)
return self.resolve(name, package) return self.resolve(name, package)
class Config(Protocol): class Config(Protocol):
def __init__(self, model, context_config: Optional[ContextConfig]): def __init__(self, model, context_config: Optional[ContextConfig]):
... ...
# Implementation of "config(..)" calls in models # Implementation of "config(..)" calls in models
class ParseConfigObject(Config): class ParseConfigObject(Config):
def __init__(self, model, context_config: Optional[ContextConfig]): def __init__(self, model, context_config: Optional[ContextConfig]):
@@ -1315,7 +1316,7 @@ class ModelContext(ProviderContext):
# only doing this in sql model for backward compatible # only doing this in sql model for backward compatible
if ( if (
getattr(self.model, "extra_ctes_injected", None) getattr(self.model, "extra_ctes_injected", None)
and self.model.language == ModelLanguage.sql # type: ignore[union-attr] and self.model.compiled_language == ModelLanguage.sql # type: ignore[union-attr]
): ):
# TODO CT-211 # TODO CT-211
return self.model.compiled_code # type: ignore[union-attr] return self.model.compiled_code # type: ignore[union-attr]
@@ -1434,14 +1435,6 @@ class ExposureSourceResolver(BaseResolver):
return "" return ""
class ExposureMetricResolver(BaseResolver):
def __call__(self, *args) -> str:
if len(args) not in (1, 2):
metric_invalid_args(self.model, args)
self.model.metrics.append(list(args))
return ""
def generate_parse_exposure( def generate_parse_exposure(
exposure: ParsedExposure, exposure: ParsedExposure,
config: RuntimeConfig, config: RuntimeConfig,
@@ -1462,12 +1455,6 @@ def generate_parse_exposure(
project, project,
manifest, manifest,
), ),
"metric": ExposureMetricResolver(
None,
exposure,
project,
manifest,
),
} }
@@ -1491,6 +1478,7 @@ class MetricRefResolver(BaseResolver):
"the name argument to ref() must be a string" "the name argument to ref() must be a string"
) )
def generate_parse_metrics( def generate_parse_metrics(
metric: ParsedMetric, metric: ParsedMetric,
config: RuntimeConfig, config: RuntimeConfig,
@@ -1513,41 +1501,6 @@ def generate_parse_metrics(
), ),
} }
class EntityRefResolver(BaseResolver):
def __call__(self, *args) -> str:
package = None
if len(args) == 1:
name = args[0]
elif len(args) == 2:
package, name = args
else:
ref_invalid_args(self.model, args)
self.validate_args(name, package)
self.model.refs.append(list(args))
return ""
def validate_args(self, name, package):
if not isinstance(name, str):
raise ParsingException(
f"In the entity associated with {self.model.original_file_path} "
"the name argument to ref() must be a string"
)
def generate_parse_entities(
entity: ParsedEntity,
config: RuntimeConfig,
manifest: Manifest,
package_name: str,
) -> Dict[str, Any]:
project = config.load_dependencies()[package_name]
return {
"ref": EntityRefResolver(
None,
entity,
project,
manifest,
),
}
# This class is currently used by the schema parser in order # This class is currently used by the schema parser in order
# to limit the number of macros in the context by using # to limit the number of macros in the context by using

View File

@@ -22,6 +22,7 @@ class ParseFileType(StrEnum):
Documentation = "docs" Documentation = "docs"
Schema = "schema" Schema = "schema"
Hook = "hook" # not a real filetype, from dbt_project.yml Hook = "hook" # not a real filetype, from dbt_project.yml
language: str = "sql"
parse_file_type_to_parser = { parse_file_type_to_parser = {
@@ -194,6 +195,7 @@ class SourceFile(BaseSourceFile):
docs: List[str] = field(default_factory=list) docs: List[str] = field(default_factory=list)
macros: List[str] = field(default_factory=list) macros: List[str] = field(default_factory=list)
env_vars: List[str] = field(default_factory=list) env_vars: List[str] = field(default_factory=list)
language: str = "sql"
@classmethod @classmethod
def big_seed(cls, path: FilePath) -> "SourceFile": def big_seed(cls, path: FilePath) -> "SourceFile":
@@ -227,7 +229,6 @@ class SchemaSourceFile(BaseSourceFile):
sources: List[str] = field(default_factory=list) sources: List[str] = field(default_factory=list)
exposures: List[str] = field(default_factory=list) exposures: List[str] = field(default_factory=list)
metrics: List[str] = field(default_factory=list) metrics: List[str] = field(default_factory=list)
entities: List[str] = field(default_factory=list)
# node patches contain models, seeds, snapshots, analyses # node patches contain models, seeds, snapshots, analyses
ndp: List[str] = field(default_factory=list) ndp: List[str] = field(default_factory=list)
# any macro patches in this file by macro unique_id. # any macro patches in this file by macro unique_id.

View File

@@ -7,7 +7,6 @@ from dbt.contracts.graph.parsed import (
ParsedModelNode, ParsedModelNode,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
ParsedResource, ParsedResource,
ParsedRPCNode, ParsedRPCNode,
ParsedSqlNode, ParsedSqlNode,
@@ -43,6 +42,7 @@ class CompiledNodeMixin(dbtClassMixin):
@dataclass @dataclass
class CompiledNode(ParsedNode, CompiledNodeMixin): class CompiledNode(ParsedNode, CompiledNodeMixin):
compiled_code: Optional[str] = None compiled_code: Optional[str] = None
compiled_language: Optional[str] = None # TODO: ModelLanguage
extra_ctes_injected: bool = False extra_ctes_injected: bool = False
extra_ctes: List[InjectedCTE] = field(default_factory=list) extra_ctes: List[InjectedCTE] = field(default_factory=list)
relation_name: Optional[str] = None relation_name: Optional[str] = None
@@ -234,5 +234,4 @@ GraphMemberNode = Union[
CompileResultNode, CompileResultNode,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
] ]

View File

@@ -36,7 +36,6 @@ from dbt.contracts.graph.parsed import (
ParsedGenericTestNode, ParsedGenericTestNode,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
HasUniqueID, HasUniqueID,
UnpatchedSourceDefinition, UnpatchedSourceDefinition,
ManifestNodes, ManifestNodes,
@@ -217,39 +216,8 @@ class MetricLookup(dbtClassMixin):
) )
return manifest.metrics[unique_id] return manifest.metrics[unique_id]
class EntityLookup(dbtClassMixin):
def __init__(self, manifest: "Manifest"):
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, search_name, package: Optional[PackageName]): # This handles both models/seeds/snapshots and sources/metrics/exposures
return find_unique_id_for_package(self.storage, search_name, package)
def find(self, search_name, package: Optional[PackageName], manifest: "Manifest"):
unique_id = self.get_unique_id(search_name, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_entity(self, entity: ParsedEntity):
if entity.search_name not in self.storage:
self.storage[entity.search_name] = {}
self.storage[entity.search_name][entity.package_name] = entity.unique_id
def populate(self, manifest):
for entity in manifest.entities.values():
if hasattr(entity, "name"):
self.add_entity(entity)
def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedEntity:
if unique_id not in manifest.entities:
raise dbt.exceptions.InternalException(
f"Entity {unique_id} found in cache but not found in manifest"
)
return manifest.entities[unique_id]
# This handles both models/seeds/snapshots and sources/metrics/entities/exposures
class DisabledLookup(dbtClassMixin): class DisabledLookup(dbtClassMixin):
def __init__(self, manifest: "Manifest"): def __init__(self, manifest: "Manifest"):
self.storage: Dict[str, Dict[PackageName, List[Any]]] = {} self.storage: Dict[str, Dict[PackageName, List[Any]]] = {}
@@ -499,7 +467,6 @@ class Disabled(Generic[D]):
MaybeMetricNode = Optional[Union[ParsedMetric, Disabled[ParsedMetric]]] MaybeMetricNode = Optional[Union[ParsedMetric, Disabled[ParsedMetric]]]
MaybeEntityNode = Optional[Union[ParsedEntity, Disabled[ParsedEntity]]]
MaybeDocumentation = Optional[ParsedDocumentation] MaybeDocumentation = Optional[ParsedDocumentation]
@@ -644,7 +611,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict) docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict)
exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict) exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict)
metrics: MutableMapping[str, ParsedMetric] = field(default_factory=dict) metrics: MutableMapping[str, ParsedMetric] = field(default_factory=dict)
entities: MutableMapping[str, ParsedEntity] = field(default_factory=dict)
selectors: MutableMapping[str, Any] = field(default_factory=dict) selectors: MutableMapping[str, Any] = field(default_factory=dict)
files: MutableMapping[str, AnySourceFile] = field(default_factory=dict) files: MutableMapping[str, AnySourceFile] = field(default_factory=dict)
metadata: ManifestMetadata = field(default_factory=ManifestMetadata) metadata: ManifestMetadata = field(default_factory=ManifestMetadata)
@@ -666,9 +632,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
_metric_lookup: Optional[MetricLookup] = field( _metric_lookup: Optional[MetricLookup] = field(
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None} default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
) )
_entity_lookup: Optional[EntityLookup] = field(
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
)
_disabled_lookup: Optional[DisabledLookup] = field( _disabled_lookup: Optional[DisabledLookup] = field(
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None} default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
) )
@@ -719,9 +682,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
def update_metric(self, new_metric: ParsedMetric): def update_metric(self, new_metric: ParsedMetric):
_update_into(self.metrics, new_metric) _update_into(self.metrics, new_metric)
def update_entity(self, new_entity: ParsedEntity):
_update_into(self.entities, new_entity)
def update_node(self, new_node: ManifestNode): def update_node(self, new_node: ManifestNode):
_update_into(self.nodes, new_node) _update_into(self.nodes, new_node)
@@ -737,7 +697,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self.flat_graph = { self.flat_graph = {
"exposures": {k: v.to_dict(omit_none=False) for k, v in self.exposures.items()}, "exposures": {k: v.to_dict(omit_none=False) for k, v in self.exposures.items()},
"metrics": {k: v.to_dict(omit_none=False) for k, v in self.metrics.items()}, "metrics": {k: v.to_dict(omit_none=False) for k, v in self.metrics.items()},
"entities": {k: v.to_dict(omit_none=False) for k, v in self.entities.items()},
"nodes": {k: v.to_dict(omit_none=False) for k, v in self.nodes.items()}, "nodes": {k: v.to_dict(omit_none=False) for k, v in self.nodes.items()},
"sources": {k: v.to_dict(omit_none=False) for k, v in self.sources.items()}, "sources": {k: v.to_dict(omit_none=False) for k, v in self.sources.items()},
} }
@@ -800,7 +759,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self.nodes.values(), self.nodes.values(),
self.sources.values(), self.sources.values(),
self.metrics.values(), self.metrics.values(),
self.entities.values(),
) )
for resource in all_resources: for resource in all_resources:
resource_type_plural = resource.resource_type.pluralize() resource_type_plural = resource.resource_type.pluralize()
@@ -829,7 +787,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
docs={k: _deepcopy(v) for k, v in self.docs.items()}, docs={k: _deepcopy(v) for k, v in self.docs.items()},
exposures={k: _deepcopy(v) for k, v in self.exposures.items()}, exposures={k: _deepcopy(v) for k, v in self.exposures.items()},
metrics={k: _deepcopy(v) for k, v in self.metrics.items()}, metrics={k: _deepcopy(v) for k, v in self.metrics.items()},
entities={k: _deepcopy(v) for k, v in self.entities.items()},
selectors={k: _deepcopy(v) for k, v in self.selectors.items()}, selectors={k: _deepcopy(v) for k, v in self.selectors.items()},
metadata=self.metadata, metadata=self.metadata,
disabled={k: _deepcopy(v) for k, v in self.disabled.items()}, disabled={k: _deepcopy(v) for k, v in self.disabled.items()},
@@ -846,7 +803,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self.sources.values(), self.sources.values(),
self.exposures.values(), self.exposures.values(),
self.metrics.values(), self.metrics.values(),
self.entities.values(),
) )
) )
forward_edges, backward_edges = build_node_edges(edge_members) forward_edges, backward_edges = build_node_edges(edge_members)
@@ -872,7 +828,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
docs=self.docs, docs=self.docs,
exposures=self.exposures, exposures=self.exposures,
metrics=self.metrics, metrics=self.metrics,
entities=self.entities,
selectors=self.selectors, selectors=self.selectors,
metadata=self.metadata, metadata=self.metadata,
disabled=self.disabled, disabled=self.disabled,
@@ -894,8 +849,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
return self.exposures[unique_id] return self.exposures[unique_id]
elif unique_id in self.metrics: elif unique_id in self.metrics:
return self.metrics[unique_id] return self.metrics[unique_id]
elif unique_id in self.entities:
return self.entities[unique_id]
else: else:
# something terrible has happened # something terrible has happened
raise dbt.exceptions.InternalException( raise dbt.exceptions.InternalException(
@@ -932,12 +885,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self._metric_lookup = MetricLookup(self) self._metric_lookup = MetricLookup(self)
return self._metric_lookup return self._metric_lookup
@property
def entity_lookup(self) -> EntityLookup:
if self._entity_lookup is None:
self._entity_lookup = EntityLookup(self)
return self._entity_lookup
def rebuild_ref_lookup(self): def rebuild_ref_lookup(self):
self._ref_lookup = RefableLookup(self) self._ref_lookup = RefableLookup(self)
@@ -1038,32 +985,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
return Disabled(disabled[0]) return Disabled(disabled[0])
return None return None
def resolve_entity(
self,
target_entity_name: str,
target_entity_package: Optional[str],
current_project: str,
node_package: str,
) -> MaybeEntityNode:
entity: Optional[ParsedEntity] = None
disabled: Optional[List[ParsedEntity]] = None
candidates = _search_packages(current_project, node_package, target_entity_package)
for pkg in candidates:
entity = self.entity_lookup.find(target_entity_name, pkg, self)
if entity is not None and entity.config.enabled:
return entity
# it's possible that the node is disabled
if disabled is None:
disabled = self.disabled_lookup.find(f"{target_entity_name}", pkg)
if disabled:
return Disabled(disabled[0])
return None
# Called by DocsRuntimeContext.doc # Called by DocsRuntimeContext.doc
def resolve_doc( def resolve_doc(
self, self,
@@ -1178,8 +1099,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
source_file.add_test(node.unique_id, test_from) source_file.add_test(node.unique_id, test_from)
if isinstance(node, ParsedMetric): if isinstance(node, ParsedMetric):
source_file.metrics.append(node.unique_id) source_file.metrics.append(node.unique_id)
if isinstance(node, ParsedEntity):
source_file.entities.append(node.unique_id)
if isinstance(node, ParsedExposure): if isinstance(node, ParsedExposure):
source_file.exposures.append(node.unique_id) source_file.exposures.append(node.unique_id)
else: else:
@@ -1195,11 +1114,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self.metrics[metric.unique_id] = metric self.metrics[metric.unique_id] = metric
source_file.metrics.append(metric.unique_id) source_file.metrics.append(metric.unique_id)
def add_entity(self, source_file: SchemaSourceFile, entity: ParsedEntity):
_check_duplicates(entity, self.entities)
self.entities[entity.unique_id] = entity
source_file.entities.append(entity.unique_id)
def add_disabled_nofile(self, node: GraphMemberNode): def add_disabled_nofile(self, node: GraphMemberNode):
# There can be multiple disabled nodes for the same unique_id # There can be multiple disabled nodes for the same unique_id
if node.unique_id in self.disabled: if node.unique_id in self.disabled:
@@ -1215,8 +1129,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
source_file.add_test(node.unique_id, test_from) source_file.add_test(node.unique_id, test_from)
if isinstance(node, ParsedMetric): if isinstance(node, ParsedMetric):
source_file.metrics.append(node.unique_id) source_file.metrics.append(node.unique_id)
if isinstance(node, ParsedEntity):
source_file.entities.append(node.unique_id)
if isinstance(node, ParsedExposure): if isinstance(node, ParsedExposure):
source_file.exposures.append(node.unique_id) source_file.exposures.append(node.unique_id)
else: else:
@@ -1244,7 +1156,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self.docs, self.docs,
self.exposures, self.exposures,
self.metrics, self.metrics,
self.entities,
self.selectors, self.selectors,
self.files, self.files,
self.metadata, self.metadata,
@@ -1257,7 +1168,6 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
self._source_lookup, self._source_lookup,
self._ref_lookup, self._ref_lookup,
self._metric_lookup, self._metric_lookup,
self._entity_lookup,
self._disabled_lookup, self._disabled_lookup,
self._analysis_lookup, self._analysis_lookup,
) )
@@ -1299,9 +1209,6 @@ class WritableManifest(ArtifactMixin):
metrics: Mapping[UniqueID, ParsedMetric] = field( metrics: Mapping[UniqueID, ParsedMetric] = field(
metadata=dict(description=("The metrics defined in the dbt project and its dependencies")) metadata=dict(description=("The metrics defined in the dbt project and its dependencies"))
) )
entities: Mapping[UniqueID, ParsedEntity] = field(
metadata=dict(description=("The entities defined in the dbt project and its dependencies"))
)
selectors: Mapping[UniqueID, Any] = field( selectors: Mapping[UniqueID, Any] = field(
metadata=dict(description=("The selectors defined in selectors.yml")) metadata=dict(description=("The selectors defined in selectors.yml"))
) )

View File

@@ -367,9 +367,6 @@ class BaseConfig(AdditionalPropertiesAllowed, Replaceable):
class MetricConfig(BaseConfig): class MetricConfig(BaseConfig):
enabled: bool = True enabled: bool = True
@dataclass
class EntityConfig(BaseConfig):
enabled: bool = True
@dataclass @dataclass
class ExposureConfig(BaseConfig): class ExposureConfig(BaseConfig):
@@ -607,7 +604,6 @@ class SnapshotConfig(EmptySnapshotConfig):
RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = { RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {
NodeType.Metric: MetricConfig, NodeType.Metric: MetricConfig,
NodeType.Entity: EntityConfig,
NodeType.Exposure: ExposureConfig, NodeType.Exposure: ExposureConfig,
NodeType.Source: SourceConfig, NodeType.Source: SourceConfig,
NodeType.Seed: SeedConfig, NodeType.Seed: SeedConfig,

View File

@@ -38,7 +38,6 @@ from dbt.contracts.graph.unparsed import (
MaturityType, MaturityType,
MetricFilter, MetricFilter,
MetricTime, MetricTime,
EntityDimension
) )
from dbt.contracts.util import Replaceable, AdditionalPropertiesMixin from dbt.contracts.util import Replaceable, AdditionalPropertiesMixin
from dbt.events.proto_types import NodeInfo from dbt.events.proto_types import NodeInfo
@@ -59,7 +58,6 @@ from .model_config import (
TestConfig, TestConfig,
SourceConfig, SourceConfig,
MetricConfig, MetricConfig,
EntityConfig,
ExposureConfig, ExposureConfig,
EmptySnapshotConfig, EmptySnapshotConfig,
SnapshotConfig, SnapshotConfig,
@@ -211,7 +209,6 @@ class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory):
refs: List[List[str]] = field(default_factory=list) refs: List[List[str]] = field(default_factory=list)
sources: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list)
metrics: List[List[str]] = field(default_factory=list) metrics: List[List[str]] = field(default_factory=list)
entities: List[List[str]] = field(default_factory=list)
depends_on: DependsOn = field(default_factory=DependsOn) depends_on: DependsOn = field(default_factory=DependsOn)
description: str = field(default="") description: str = field(default="")
columns: Dict[str, ColumnInfo] = field(default_factory=dict) columns: Dict[str, ColumnInfo] = field(default_factory=dict)
@@ -255,7 +252,7 @@ class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
@classmethod @classmethod
def _deserialize(cls, dct: Dict[str, int]): def _deserialize(cls, dct: Dict[str, int]):
# The serialized ParsedNodes do not differ from each other # The serialized ParsedNodes do not differ from each other
# in fields that would allow 'from_dict' to distinguish # in fields that would allow 'from_dict' to distinguis
# between them. # between them.
resource_type = dct["resource_type"] resource_type = dct["resource_type"]
if resource_type == "model": if resource_type == "model":
@@ -767,7 +764,6 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
depends_on: DependsOn = field(default_factory=DependsOn) depends_on: DependsOn = field(default_factory=DependsOn)
refs: List[List[str]] = field(default_factory=list) refs: List[List[str]] = field(default_factory=list)
sources: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list)
metrics: List[List[str]] = field(default_factory=list)
created_at: float = field(default_factory=lambda: time.time()) created_at: float = field(default_factory=lambda: time.time())
@property @property
@@ -848,7 +844,7 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn):
resource_type: NodeType = NodeType.Metric resource_type: NodeType = NodeType.Metric
meta: Dict[str, Any] = field(default_factory=dict) meta: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list) tags: List[str] = field(default_factory=list)
config: EntityConfig = field(default_factory=EntityConfig) config: MetricConfig = field(default_factory=MetricConfig)
unrendered_config: Dict[str, Any] = field(default_factory=dict) unrendered_config: Dict[str, Any] = field(default_factory=dict)
sources: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list)
depends_on: DependsOn = field(default_factory=DependsOn) depends_on: DependsOn = field(default_factory=DependsOn)
@@ -921,60 +917,6 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn):
and True and True
) )
@dataclass
class ParsedEntity(UnparsedBaseNode, HasUniqueID, HasFqn):
name: str
model: str
description: str
dimensions: Dict[str, EntityDimension] = field(default_factory=dict)
model_unique_id: Optional[str] = None
resource_type: NodeType = NodeType.Metric
meta: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list)
config: MetricConfig = field(default_factory=MetricConfig)
unrendered_config: Dict[str, Any] = field(default_factory=dict)
sources: List[List[str]] = field(default_factory=list)
depends_on: DependsOn = field(default_factory=DependsOn)
refs: List[List[str]] = field(default_factory=list)
entities: List[List[str]] = field(default_factory=list)
created_at: float = field(default_factory=lambda: time.time())
@property
def depends_on_nodes(self):
return self.depends_on.nodes
@property
def search_name(self):
return self.name
def same_model(self, old: "ParsedEntity") -> bool:
return self.model == old.model
def same_description(self, old: "ParsedEntity") -> bool:
return self.description == old.description
def same_dimensions(self, old: "ParsedEntity") -> bool:
return self.dimensions == old.dimensions
def same_config(self, old: "ParsedEntity") -> bool:
return self.config.same_contents(
self.unrendered_config,
old.unrendered_config,
)
def same_contents(self, old: Optional["ParsedEntity"]) -> bool:
# existing when it didn't before is a change!
# metadata/tags changes are not "changes"
if old is None:
return True
return (
self.same_model(old)
and self.same_description(old)
and self.same_dimensions(old)
and self.same_config(old)
and True
)
ManifestNodes = Union[ ManifestNodes = Union[
ParsedAnalysisNode, ParsedAnalysisNode,
@@ -995,6 +937,5 @@ ParsedResource = Union[
ParsedNode, ParsedNode,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
ParsedSourceDefinition, ParsedSourceDefinition,
] ]

View File

@@ -523,47 +523,3 @@ class UnparsedMetric(dbtClassMixin, Replaceable):
if data.get("model") is not None and data.get("calculation_method") == "derived": if data.get("model") is not None and data.get("calculation_method") == "derived":
raise ValidationError("Derived metrics cannot have a 'model' property") raise ValidationError("Derived metrics cannot have a 'model' property")
@dataclass
class EntityDimension(dbtClassMixin, Mergeable):
"""This class is used for the dimension information at the entity level. It
closely matches the implementation of columns for models."""
name: str
description: str = ""
column_name: Optional[str] = None
date_type: Optional[str] = None
default_timestamp: Optional[bool] = None
primary_key: Optional[bool] = None
time_grains: Optional[List[str]] = field(default_factory=list)
tags: List[str] = field(default_factory=list)
meta: Dict[str, Any] = field(default_factory=dict)
@dataclass
class EntityInheritence(EntityDimension):
"""This class is used for entity dimension inheritence. This class is optional
but if it is present then include needs to be present. Exclude cannot be present
without some idea of what is being included, whereas exclude is fully optional.
The acceptable inputs for include are either a list of columns/dimensions or *
to represent all fields. The acceptable inputs for exclude are a list of columns/
dimensions
"""
include: Union[List[str],str] = field(default_factory=list)
exclude: Optional[List[str]] = field(default_factory=list)
@dataclass
class UnparsedEntity(dbtClassMixin, Replaceable):
"""This class is used for entity information"""
name: str
model: str
description: str = ""
dimensions: Optional[Union[Optional[Sequence[EntityDimension]],Optional[EntityInheritence]]] = None
# dimensions: Optional[Sequence[EntityDimension]] = None
meta: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list)
config: Dict[str, Any] = field(default_factory=dict)
@classmethod
def validate(cls, data):
super(UnparsedEntity, cls).validate(data)
errors = []
## TODO: Add validation here around include/exclude and others

View File

@@ -208,7 +208,6 @@ class Project(HyphenatedDbtClassMixin, Replaceable):
sources: Dict[str, Any] = field(default_factory=dict) sources: Dict[str, Any] = field(default_factory=dict)
tests: Dict[str, Any] = field(default_factory=dict) tests: Dict[str, Any] = field(default_factory=dict)
metrics: Dict[str, Any] = field(default_factory=dict) metrics: Dict[str, Any] = field(default_factory=dict)
entities: Dict[str, Any] = field(default_factory=dict)
exposures: Dict[str, Any] = field(default_factory=dict) exposures: Dict[str, Any] = field(default_factory=dict)
vars: Optional[Dict[str, Any]] = field( vars: Optional[Dict[str, Any]] = field(
default=None, default=None,

Binary file not shown.

Binary file not shown.

View File

@@ -1,4 +0,0 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 1ee31fc16e025fb98598189ba2cb5fcb
tags: 645f666f9bcd5a90fca523b33c5a78b7

View File

@@ -1,4 +0,0 @@
dbt-core's API documentation
============================
.. dbt_click:: dbt.cli.main:cli

View File

@@ -1,134 +0,0 @@
/*
* _sphinx_javascript_frameworks_compat.js
* ~~~~~~~~~~
*
* Compatability shim for jQuery and underscores.js.
*
* WILL BE REMOVED IN Sphinx 6.0
* xref RemovedInSphinx60Warning
*
*/
/**
* select a different prefix for underscore
*/
$u = _.noConflict();
/**
* small helper function to urldecode strings
*
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
*/
jQuery.urldecode = function(x) {
if (!x) {
return x
}
return decodeURIComponent(x.replace(/\+/g, ' '));
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) {
if (node.nodeType === 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 &&
!jQuery(node.parentNode).hasClass(className) &&
!jQuery(node.parentNode).hasClass("nohighlight")) {
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
var bbox = node.parentElement.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
addItems.push({
"parent": node.parentNode,
"target": rect});
}
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this, addItems);
});
}
}
var addItems = [];
var result = this.each(function() {
highlight(this, addItems);
});
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}

View File

@@ -1,701 +0,0 @@
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: Georgia, serif;
font-size: 17px;
background-color: #fff;
color: #000;
margin: 0;
padding: 0;
}
div.document {
width: 940px;
margin: 30px auto 0 auto;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 220px;
}
div.sphinxsidebar {
width: 220px;
font-size: 14px;
line-height: 1.5;
}
hr {
border: 1px solid #B1B4B6;
}
div.body {
background-color: #fff;
color: #3E4349;
padding: 0 30px 0 30px;
}
div.body > .section {
text-align: left;
}
div.footer {
width: 940px;
margin: 20px auto 30px auto;
font-size: 14px;
color: #888;
text-align: right;
}
div.footer a {
color: #888;
}
p.caption {
font-family: inherit;
font-size: inherit;
}
div.relations {
display: none;
}
div.sphinxsidebar a {
color: #444;
text-decoration: none;
border-bottom: 1px dotted #999;
}
div.sphinxsidebar a:hover {
border-bottom: 1px solid #999;
}
div.sphinxsidebarwrapper {
padding: 18px 10px;
}
div.sphinxsidebarwrapper p.logo {
padding: 0;
margin: -10px 0 0 0px;
text-align: center;
}
div.sphinxsidebarwrapper h1.logo {
margin-top: -10px;
text-align: center;
margin-bottom: 5px;
text-align: left;
}
div.sphinxsidebarwrapper h1.logo-name {
margin-top: 0px;
}
div.sphinxsidebarwrapper p.blurb {
margin-top: 0;
font-style: normal;
}
div.sphinxsidebar h3,
div.sphinxsidebar h4 {
font-family: Georgia, serif;
color: #444;
font-size: 24px;
font-weight: normal;
margin: 0 0 5px 0;
padding: 0;
}
div.sphinxsidebar h4 {
font-size: 20px;
}
div.sphinxsidebar h3 a {
color: #444;
}
div.sphinxsidebar p.logo a,
div.sphinxsidebar h3 a,
div.sphinxsidebar p.logo a:hover,
div.sphinxsidebar h3 a:hover {
border: none;
}
div.sphinxsidebar p {
color: #555;
margin: 10px 0;
}
div.sphinxsidebar ul {
margin: 10px 0;
padding: 0;
color: #000;
}
div.sphinxsidebar ul li.toctree-l1 > a {
font-size: 120%;
}
div.sphinxsidebar ul li.toctree-l2 > a {
font-size: 110%;
}
div.sphinxsidebar input {
border: 1px solid #CCC;
font-family: Georgia, serif;
font-size: 1em;
}
div.sphinxsidebar hr {
border: none;
height: 1px;
color: #AAA;
background: #AAA;
text-align: left;
margin-left: 0;
width: 50%;
}
div.sphinxsidebar .badge {
border-bottom: none;
}
div.sphinxsidebar .badge:hover {
border-bottom: none;
}
/* To address an issue with donation coming after search */
div.sphinxsidebar h3.donation {
margin-top: 10px;
}
/* -- body styles ----------------------------------------------------------- */
a {
color: #004B6B;
text-decoration: underline;
}
a:hover {
color: #6D4100;
text-decoration: underline;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: Georgia, serif;
font-weight: normal;
margin: 30px 0px 10px 0px;
padding: 0;
}
div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
div.body h2 { font-size: 180%; }
div.body h3 { font-size: 150%; }
div.body h4 { font-size: 130%; }
div.body h5 { font-size: 100%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #DDD;
padding: 0 4px;
text-decoration: none;
}
a.headerlink:hover {
color: #444;
background: #EAEAEA;
}
div.body p, div.body dd, div.body li {
line-height: 1.4em;
}
div.admonition {
margin: 20px 0px;
padding: 10px 30px;
background-color: #EEE;
border: 1px solid #CCC;
}
div.admonition tt.xref, div.admonition code.xref, div.admonition a tt {
background-color: #FBFBFB;
border-bottom: 1px solid #fafafa;
}
div.admonition p.admonition-title {
font-family: Georgia, serif;
font-weight: normal;
font-size: 24px;
margin: 0 0 10px 0;
padding: 0;
line-height: 1;
}
div.admonition p.last {
margin-bottom: 0;
}
div.highlight {
background-color: #fff;
}
dt:target, .highlight {
background: #FAF3E8;
}
div.warning {
background-color: #FCC;
border: 1px solid #FAA;
}
div.danger {
background-color: #FCC;
border: 1px solid #FAA;
-moz-box-shadow: 2px 2px 4px #D52C2C;
-webkit-box-shadow: 2px 2px 4px #D52C2C;
box-shadow: 2px 2px 4px #D52C2C;
}
div.error {
background-color: #FCC;
border: 1px solid #FAA;
-moz-box-shadow: 2px 2px 4px #D52C2C;
-webkit-box-shadow: 2px 2px 4px #D52C2C;
box-shadow: 2px 2px 4px #D52C2C;
}
div.caution {
background-color: #FCC;
border: 1px solid #FAA;
}
div.attention {
background-color: #FCC;
border: 1px solid #FAA;
}
div.important {
background-color: #EEE;
border: 1px solid #CCC;
}
div.note {
background-color: #EEE;
border: 1px solid #CCC;
}
div.tip {
background-color: #EEE;
border: 1px solid #CCC;
}
div.hint {
background-color: #EEE;
border: 1px solid #CCC;
}
div.seealso {
background-color: #EEE;
border: 1px solid #CCC;
}
div.topic {
background-color: #EEE;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre, tt, code {
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
.hll {
background-color: #FFC;
margin: 0 -12px;
padding: 0 12px;
display: block;
}
img.screenshot {
}
tt.descname, tt.descclassname, code.descname, code.descclassname {
font-size: 0.95em;
}
tt.descname, code.descname {
padding-right: 0.08em;
}
img.screenshot {
-moz-box-shadow: 2px 2px 4px #EEE;
-webkit-box-shadow: 2px 2px 4px #EEE;
box-shadow: 2px 2px 4px #EEE;
}
table.docutils {
border: 1px solid #888;
-moz-box-shadow: 2px 2px 4px #EEE;
-webkit-box-shadow: 2px 2px 4px #EEE;
box-shadow: 2px 2px 4px #EEE;
}
table.docutils td, table.docutils th {
border: 1px solid #888;
padding: 0.25em 0.7em;
}
table.field-list, table.footnote {
border: none;
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
table.footnote {
margin: 15px 0;
width: 100%;
border: 1px solid #EEE;
background: #FDFDFD;
font-size: 0.9em;
}
table.footnote + table.footnote {
margin-top: -15px;
border-top: none;
}
table.field-list th {
padding: 0 0.8em 0 0;
}
table.field-list td {
padding: 0;
}
table.field-list p {
margin-bottom: 0.8em;
}
/* Cloned from
* https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68
*/
.field-name {
-moz-hyphens: manual;
-ms-hyphens: manual;
-webkit-hyphens: manual;
hyphens: manual;
}
table.footnote td.label {
width: .1px;
padding: 0.3em 0 0.3em 0.5em;
}
table.footnote td {
padding: 0.3em 0.5em;
}
dl {
margin: 0;
padding: 0;
}
dl dd {
margin-left: 30px;
}
blockquote {
margin: 0 0 0 30px;
padding: 0;
}
ul, ol {
/* Matches the 30px from the narrow-screen "li > ul" selector below */
margin: 10px 0 10px 30px;
padding: 0;
}
pre {
background: #EEE;
padding: 7px 30px;
margin: 15px 0px;
line-height: 1.3em;
}
div.viewcode-block:target {
background: #ffd;
}
dl pre, blockquote pre, li pre {
margin-left: 0;
padding-left: 30px;
}
tt, code {
background-color: #ecf0f3;
color: #222;
/* padding: 1px 2px; */
}
tt.xref, code.xref, a tt {
background-color: #FBFBFB;
border-bottom: 1px solid #fff;
}
a.reference {
text-decoration: none;
border-bottom: 1px dotted #004B6B;
}
/* Don't put an underline on images */
a.image-reference, a.image-reference:hover {
border-bottom: none;
}
a.reference:hover {
border-bottom: 1px solid #6D4100;
}
a.footnote-reference {
text-decoration: none;
font-size: 0.7em;
vertical-align: top;
border-bottom: 1px dotted #004B6B;
}
a.footnote-reference:hover {
border-bottom: 1px solid #6D4100;
}
a:hover tt, a:hover code {
background: #EEE;
}
@media screen and (max-width: 870px) {
div.sphinxsidebar {
display: none;
}
div.document {
width: 100%;
}
div.documentwrapper {
margin-left: 0;
margin-top: 0;
margin-right: 0;
margin-bottom: 0;
}
div.bodywrapper {
margin-top: 0;
margin-right: 0;
margin-bottom: 0;
margin-left: 0;
}
ul {
margin-left: 0;
}
li > ul {
/* Matches the 30px from the "ul, ol" selector above */
margin-left: 30px;
}
.document {
width: auto;
}
.footer {
width: auto;
}
.bodywrapper {
margin: 0;
}
.footer {
width: auto;
}
.github {
display: none;
}
}
@media screen and (max-width: 875px) {
body {
margin: 0;
padding: 20px 30px;
}
div.documentwrapper {
float: none;
background: #fff;
}
div.sphinxsidebar {
display: block;
float: none;
width: 102.5%;
margin: 50px -30px -20px -30px;
padding: 10px 20px;
background: #333;
color: #FFF;
}
div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
div.sphinxsidebar h3 a {
color: #fff;
}
div.sphinxsidebar a {
color: #AAA;
}
div.sphinxsidebar p.logo {
display: none;
}
div.document {
width: 100%;
margin: 0;
}
div.footer {
display: none;
}
div.bodywrapper {
margin: 0;
}
div.body {
min-height: 0;
padding: 0;
}
.rtd_doc_footer {
display: none;
}
.document {
width: auto;
}
.footer {
width: auto;
}
.footer {
width: auto;
}
.github {
display: none;
}
}
/* misc. */
.revsys-inline {
display: none!important;
}
/* Make nested-list/multi-paragraph items look better in Releases changelog
* pages. Without this, docutils' magical list fuckery causes inconsistent
* formatting between different release sub-lists.
*/
div#changelog > div.section > ul > li > p:only-child {
margin-bottom: 0;
}
/* Hide fugly table cell borders in ..bibliography:: directive output */
table.docutils.citation, table.docutils.citation td, table.docutils.citation th {
border: none;
/* Below needed in some edge cases; if not applied, bottom shadows appear */
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
/* relbar */
.related {
line-height: 30px;
width: 100%;
font-size: 0.9rem;
}
.related.top {
border-bottom: 1px solid #EEE;
margin-bottom: 20px;
}
.related.bottom {
border-top: 1px solid #EEE;
}
.related ul {
padding: 0;
margin: 0;
list-style: none;
}
.related li {
display: inline;
}
nav#rellinks {
float: right;
}
nav#rellinks li+li:before {
content: "|";
}
nav#breadcrumbs li+li:before {
content: "\00BB";
}
/* Hide certain items when printing */
@media print {
div.related {
display: none;
}
}

View File

@@ -1,900 +0,0 @@
/*
* basic.css
* ~~~~~~~~~
*
* Sphinx stylesheet -- basic theme.
*
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/* -- main layout ----------------------------------------------------------- */
div.clearer {
clear: both;
}
div.section::after {
display: block;
content: '';
clear: left;
}
/* -- relbar ---------------------------------------------------------------- */
div.related {
width: 100%;
font-size: 90%;
}
div.related h3 {
display: none;
}
div.related ul {
margin: 0;
padding: 0 0 0 10px;
list-style: none;
}
div.related li {
display: inline;
}
div.related li.right {
float: right;
margin-right: 5px;
}
/* -- sidebar --------------------------------------------------------------- */
div.sphinxsidebarwrapper {
padding: 10px 5px 0 10px;
}
div.sphinxsidebar {
float: left;
width: 230px;
margin-left: -100%;
font-size: 90%;
word-wrap: break-word;
overflow-wrap : break-word;
}
div.sphinxsidebar ul {
list-style: none;
}
div.sphinxsidebar ul ul,
div.sphinxsidebar ul.want-points {
margin-left: 20px;
list-style: square;
}
div.sphinxsidebar ul ul {
margin-top: 0;
margin-bottom: 0;
}
div.sphinxsidebar form {
margin-top: 10px;
}
div.sphinxsidebar input {
border: 1px solid #98dbcc;
font-family: sans-serif;
font-size: 1em;
}
div.sphinxsidebar #searchbox form.search {
overflow: hidden;
}
div.sphinxsidebar #searchbox input[type="text"] {
float: left;
width: 80%;
padding: 0.25em;
box-sizing: border-box;
}
div.sphinxsidebar #searchbox input[type="submit"] {
float: left;
width: 20%;
border-left: none;
padding: 0.25em;
box-sizing: border-box;
}
img {
border: 0;
max-width: 100%;
}
/* -- search page ----------------------------------------------------------- */
ul.search {
margin: 10px 0 0 20px;
padding: 0;
}
ul.search li {
padding: 5px 0 5px 20px;
background-image: url(file.png);
background-repeat: no-repeat;
background-position: 0 7px;
}
ul.search li a {
font-weight: bold;
}
ul.search li p.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
}
ul.keywordmatches li.goodmatch a {
font-weight: bold;
}
/* -- index page ------------------------------------------------------------ */
table.contentstable {
width: 90%;
margin-left: auto;
margin-right: auto;
}
table.contentstable p.biglink {
line-height: 150%;
}
a.biglink {
font-size: 1.3em;
}
span.linkdescr {
font-style: italic;
padding-top: 5px;
font-size: 90%;
}
/* -- general index --------------------------------------------------------- */
table.indextable {
width: 100%;
}
table.indextable td {
text-align: left;
vertical-align: top;
}
table.indextable ul {
margin-top: 0;
margin-bottom: 0;
list-style-type: none;
}
table.indextable > tbody > tr > td > ul {
padding-left: 0em;
}
table.indextable tr.pcap {
height: 10px;
}
table.indextable tr.cap {
margin-top: 10px;
background-color: #f2f2f2;
}
img.toggler {
margin-right: 3px;
margin-top: 3px;
cursor: pointer;
}
div.modindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
div.genindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
/* -- domain module index --------------------------------------------------- */
table.modindextable td {
padding: 2px;
border-collapse: collapse;
}
/* -- general body styles --------------------------------------------------- */
div.body {
min-width: 360px;
max-width: 800px;
}
div.body p, div.body dd, div.body li, div.body blockquote {
-moz-hyphens: auto;
-ms-hyphens: auto;
-webkit-hyphens: auto;
hyphens: auto;
}
a.headerlink {
visibility: hidden;
}
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink,
caption:hover > a.headerlink,
p.caption:hover > a.headerlink,
div.code-block-caption:hover > a.headerlink {
visibility: visible;
}
div.body p.caption {
text-align: inherit;
}
div.body td {
text-align: left;
}
.first {
margin-top: 0 !important;
}
p.rubric {
margin-top: 30px;
font-weight: bold;
}
img.align-left, figure.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
img.align-right, figure.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
img.align-center, figure.align-center, .figure.align-center, object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
img.align-default, figure.align-default, .figure.align-default {
display: block;
margin-left: auto;
margin-right: auto;
}
.align-left {
text-align: left;
}
.align-center {
text-align: center;
}
.align-default {
text-align: center;
}
.align-right {
text-align: right;
}
/* -- sidebars -------------------------------------------------------------- */
div.sidebar,
aside.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
padding: 7px;
background-color: #ffe;
width: 40%;
float: right;
clear: right;
overflow-x: auto;
}
p.sidebar-title {
font-weight: bold;
}
nav.contents,
aside.topic,
div.admonition, div.topic, blockquote {
clear: left;
}
/* -- topics ---------------------------------------------------------------- */
nav.contents,
aside.topic,
div.topic {
border: 1px solid #ccc;
padding: 7px;
margin: 10px 0 10px 0;
}
p.topic-title {
font-size: 1.1em;
font-weight: bold;
margin-top: 10px;
}
/* -- admonitions ----------------------------------------------------------- */
div.admonition {
margin-top: 10px;
margin-bottom: 10px;
padding: 7px;
}
div.admonition dt {
font-weight: bold;
}
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
}
div.body p.centered {
text-align: center;
margin-top: 25px;
}
/* -- content of sidebars/topics/admonitions -------------------------------- */
div.sidebar > :last-child,
aside.sidebar > :last-child,
nav.contents > :last-child,
aside.topic > :last-child,
div.topic > :last-child,
div.admonition > :last-child {
margin-bottom: 0;
}
div.sidebar::after,
aside.sidebar::after,
nav.contents::after,
aside.topic::after,
div.topic::after,
div.admonition::after,
blockquote::after {
display: block;
content: '';
clear: both;
}
/* -- tables ---------------------------------------------------------------- */
table.docutils {
margin-top: 10px;
margin-bottom: 10px;
border: 0;
border-collapse: collapse;
}
table.align-center {
margin-left: auto;
margin-right: auto;
}
table.align-default {
margin-left: auto;
margin-right: auto;
}
table caption span.caption-number {
font-style: italic;
}
table caption span.caption-text {
}
table.docutils td, table.docutils th {
padding: 1px 8px 1px 5px;
border-top: 0;
border-left: 0;
border-right: 0;
border-bottom: 1px solid #aaa;
}
th {
text-align: left;
padding-right: 5px;
}
table.citation {
border-left: solid 1px gray;
margin-left: 1px;
}
table.citation td {
border-bottom: none;
}
th > :first-child,
td > :first-child {
margin-top: 0px;
}
th > :last-child,
td > :last-child {
margin-bottom: 0px;
}
/* -- figures --------------------------------------------------------------- */
div.figure, figure {
margin: 0.5em;
padding: 0.5em;
}
div.figure p.caption, figcaption {
padding: 0.3em;
}
div.figure p.caption span.caption-number,
figcaption span.caption-number {
font-style: italic;
}
div.figure p.caption span.caption-text,
figcaption span.caption-text {
}
/* -- field list styles ----------------------------------------------------- */
table.field-list td, table.field-list th {
border: 0 !important;
}
.field-list ul {
margin: 0;
padding-left: 1em;
}
.field-list p {
margin: 0;
}
.field-name {
-moz-hyphens: manual;
-ms-hyphens: manual;
-webkit-hyphens: manual;
hyphens: manual;
}
/* -- hlist styles ---------------------------------------------------------- */
table.hlist {
margin: 1em 0;
}
table.hlist td {
vertical-align: top;
}
/* -- object description styles --------------------------------------------- */
.sig {
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
}
.sig-name, code.descname {
background-color: transparent;
font-weight: bold;
}
.sig-name {
font-size: 1.1em;
}
code.descname {
font-size: 1.2em;
}
.sig-prename, code.descclassname {
background-color: transparent;
}
.optional {
font-size: 1.3em;
}
.sig-paren {
font-size: larger;
}
.sig-param.n {
font-style: italic;
}
/* C++ specific styling */
.sig-inline.c-texpr,
.sig-inline.cpp-texpr {
font-family: unset;
}
.sig.c .k, .sig.c .kt,
.sig.cpp .k, .sig.cpp .kt {
color: #0033B3;
}
.sig.c .m,
.sig.cpp .m {
color: #1750EB;
}
.sig.c .s, .sig.c .sc,
.sig.cpp .s, .sig.cpp .sc {
color: #067D17;
}
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
list-style: decimal;
}
ol.loweralpha {
list-style: lower-alpha;
}
ol.upperalpha {
list-style: upper-alpha;
}
ol.lowerroman {
list-style: lower-roman;
}
ol.upperroman {
list-style: upper-roman;
}
:not(li) > ol > li:first-child > :first-child,
:not(li) > ul > li:first-child > :first-child {
margin-top: 0px;
}
:not(li) > ol > li:last-child > :last-child,
:not(li) > ul > li:last-child > :last-child {
margin-bottom: 0px;
}
ol.simple ol p,
ol.simple ul p,
ul.simple ol p,
ul.simple ul p {
margin-top: 0;
}
ol.simple > li:not(:first-child) > p,
ul.simple > li:not(:first-child) > p {
margin-top: 0;
}
ol.simple p,
ul.simple p {
margin-bottom: 0;
}
aside.footnote > span,
div.citation > span {
float: left;
}
aside.footnote > span:last-of-type,
div.citation > span:last-of-type {
padding-right: 0.5em;
}
aside.footnote > p {
margin-left: 2em;
}
div.citation > p {
margin-left: 4em;
}
aside.footnote > p:last-of-type,
div.citation > p:last-of-type {
margin-bottom: 0em;
}
aside.footnote > p:last-of-type:after,
div.citation > p:last-of-type:after {
content: "";
clear: both;
}
dl.field-list {
display: grid;
grid-template-columns: fit-content(30%) auto;
}
dl.field-list > dt {
font-weight: bold;
word-break: break-word;
padding-left: 0.5em;
padding-right: 5px;
}
dl.field-list > dd {
padding-left: 0.5em;
margin-top: 0em;
margin-left: 0em;
margin-bottom: 0em;
}
dl {
margin-bottom: 15px;
}
dd > :first-child {
margin-top: 0px;
}
dd ul, dd table {
margin-bottom: 10px;
}
dd {
margin-top: 3px;
margin-bottom: 10px;
margin-left: 30px;
}
dl > dd:last-child,
dl > dd:last-child > :last-child {
margin-bottom: 0;
}
dt:target, span.highlighted {
background-color: #fbe54e;
}
rect.highlighted {
fill: #fbe54e;
}
dl.glossary dt {
font-weight: bold;
font-size: 1.1em;
}
.versionmodified {
font-style: italic;
}
.system-message {
background-color: #fda;
padding: 5px;
border: 3px solid red;
}
.footnote:target {
background-color: #ffa;
}
.line-block {
display: block;
margin-top: 1em;
margin-bottom: 1em;
}
.line-block .line-block {
margin-top: 0;
margin-bottom: 0;
margin-left: 1.5em;
}
.guilabel, .menuselection {
font-family: sans-serif;
}
.accelerator {
text-decoration: underline;
}
.classifier {
font-style: oblique;
}
.classifier:before {
font-style: normal;
margin: 0 0.5em;
content: ":";
display: inline-block;
}
abbr, acronym {
border-bottom: dotted 1px;
cursor: help;
}
/* -- code displays --------------------------------------------------------- */
pre {
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
pre, div[class*="highlight-"] {
clear: both;
}
span.pre {
-moz-hyphens: none;
-ms-hyphens: none;
-webkit-hyphens: none;
hyphens: none;
white-space: nowrap;
}
div[class*="highlight-"] {
margin: 1em 0;
}
td.linenos pre {
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
display: block;
}
table.highlighttable tbody {
display: block;
}
table.highlighttable tr {
display: flex;
}
table.highlighttable td {
margin: 0;
padding: 0;
}
table.highlighttable td.linenos {
padding-right: 0.5em;
}
table.highlighttable td.code {
flex: 1;
overflow: hidden;
}
.highlight .hll {
display: block;
}
div.highlight pre,
table.highlighttable pre {
margin: 0;
}
div.code-block-caption + div {
margin-top: 0;
}
div.code-block-caption {
margin-top: 1em;
padding: 2px 5px;
font-size: small;
}
div.code-block-caption code {
background-color: transparent;
}
table.highlighttable td.linenos,
span.linenos,
div.highlight span.gp { /* gp: Generic.Prompt */
user-select: none;
-webkit-user-select: text; /* Safari fallback only */
-webkit-user-select: none; /* Chrome/Safari */
-moz-user-select: none; /* Firefox */
-ms-user-select: none; /* IE10+ */
}
div.code-block-caption span.caption-number {
padding: 0.1em 0.3em;
font-style: italic;
}
div.code-block-caption span.caption-text {
}
div.literal-block-wrapper {
margin: 1em 0;
}
code.xref, a code {
background-color: transparent;
font-weight: bold;
}
h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
background-color: transparent;
}
.viewcode-link {
float: right;
}
.viewcode-back {
float: right;
font-family: sans-serif;
}
div.viewcode-block:target {
margin: -1px -10px;
padding: 0 10px;
}
/* -- math display ---------------------------------------------------------- */
img.math {
vertical-align: middle;
}
div.body div.math p {
text-align: center;
}
span.eqno {
float: right;
}
span.eqno a.headerlink {
position: absolute;
z-index: 1;
}
div.math:hover a.headerlink {
visibility: visible;
}
/* -- printout stylesheet --------------------------------------------------- */
@media print {
div.document,
div.documentwrapper,
div.bodywrapper {
margin: 0 !important;
width: 100%;
}
div.sphinxsidebar,
div.related,
div.footer,
#top-link {
display: none;
}
}

View File

@@ -1 +0,0 @@
/* This file intentionally left blank. */

View File

@@ -1,156 +0,0 @@
/*
* doctools.js
* ~~~~~~~~~~~
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
"TEXTAREA",
"INPUT",
"SELECT",
"BUTTON",
]);
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
} else {
document.addEventListener("DOMContentLoaded", callback);
}
};
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
TRANSLATIONS: {},
PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext: (string) => {
const translated = Documentation.TRANSLATIONS[string];
switch (typeof translated) {
case "undefined":
return string; // no translation
case "string":
return translated; // translation exists
default:
return translated[0]; // (singular, plural) translation tuple exists
}
},
ngettext: (singular, plural, n) => {
const translated = Documentation.TRANSLATIONS[singular];
if (typeof translated !== "undefined")
return translated[Documentation.PLURAL_EXPR(n)];
return n === 1 ? singular : plural;
},
addTranslations: (catalog) => {
Object.assign(Documentation.TRANSLATIONS, catalog.messages);
Documentation.PLURAL_EXPR = new Function(
"n",
`return (${catalog.plural_expr})`
);
Documentation.LOCALE = catalog.locale;
},
/**
* helper function to focus on search bar
*/
focusSearchBar: () => {
document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
* Initialise the domain index toggle buttons
*/
initDomainIndexTable: () => {
const toggler = (el) => {
const idNumber = el.id.substr(7);
const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
if (el.src.substr(-9) === "minus.png") {
el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
toggledRows.forEach((el) => (el.style.display = "none"));
} else {
el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
toggledRows.forEach((el) => (el.style.display = ""));
}
};
const togglerElements = document.querySelectorAll("img.toggler");
togglerElements.forEach((el) =>
el.addEventListener("click", (event) => toggler(event.currentTarget))
);
togglerElements.forEach((el) => (el.style.display = ""));
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
initOnKeyListeners: () => {
// only install a listener if it is really needed
if (
!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
)
return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
case "ArrowLeft":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const prevLink = document.querySelector('link[rel="prev"]');
if (prevLink && prevLink.href) {
window.location.href = prevLink.href;
event.preventDefault();
}
break;
case "ArrowRight":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const nextLink = document.querySelector('link[rel="next"]');
if (nextLink && nextLink.href) {
window.location.href = nextLink.href;
event.preventDefault();
}
break;
}
}
// some keyboard layouts may need Shift to get /
switch (event.key) {
case "/":
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
Documentation.focusSearchBar();
event.preventDefault();
}
});
},
};
// quick alias for translations
const _ = Documentation.gettext;
_ready(Documentation.init);

View File

@@ -1,14 +0,0 @@
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
VERSION: '',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
FILE_SUFFIX: '.html',
LINK_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '.txt',
NAVIGATION_WITH_KEYS: false,
SHOW_SEARCH_SUMMARY: true,
ENABLE_SEARCH_SHORTCUTS: true,
};

Binary file not shown.

Before

Width:  |  Height:  |  Size: 286 B

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -1,199 +0,0 @@
/*
* language_data.js
* ~~~~~~~~~~~~~~~~
*
* This script contains the language-specific data used by searchtools.js,
* namely the list of stopwords, stemmer, scorer and splitter.
*
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
/* Non-minified version is copied as a separate JS file, is available */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 B

View File

@@ -1,83 +0,0 @@
pre { line-height: 125%; }
td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight .hll { background-color: #ffffcc }
.highlight { background: #f8f8f8; }
.highlight .c { color: #8f5902; font-style: italic } /* Comment */
.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */
.highlight .g { color: #000000 } /* Generic */
.highlight .k { color: #004461; font-weight: bold } /* Keyword */
.highlight .l { color: #000000 } /* Literal */
.highlight .n { color: #000000 } /* Name */
.highlight .o { color: #582800 } /* Operator */
.highlight .x { color: #000000 } /* Other */
.highlight .p { color: #000000; font-weight: bold } /* Punctuation */
.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */
.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */
.highlight .cp { color: #8f5902 } /* Comment.Preproc */
.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */
.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */
.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */
.highlight .gd { color: #a40000 } /* Generic.Deleted */
.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */
.highlight .gr { color: #ef2929 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
.highlight .gi { color: #00A000 } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #745334 } /* Generic.Prompt */
.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */
.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */
.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */
.highlight .ld { color: #000000 } /* Literal.Date */
.highlight .m { color: #990000 } /* Literal.Number */
.highlight .s { color: #4e9a06 } /* Literal.String */
.highlight .na { color: #c4a000 } /* Name.Attribute */
.highlight .nb { color: #004461 } /* Name.Builtin */
.highlight .nc { color: #000000 } /* Name.Class */
.highlight .no { color: #000000 } /* Name.Constant */
.highlight .nd { color: #888888 } /* Name.Decorator */
.highlight .ni { color: #ce5c00 } /* Name.Entity */
.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #000000 } /* Name.Function */
.highlight .nl { color: #f57900 } /* Name.Label */
.highlight .nn { color: #000000 } /* Name.Namespace */
.highlight .nx { color: #000000 } /* Name.Other */
.highlight .py { color: #000000 } /* Name.Property */
.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #000000 } /* Name.Variable */
.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */
.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */
.highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */
.highlight .mb { color: #990000 } /* Literal.Number.Bin */
.highlight .mf { color: #990000 } /* Literal.Number.Float */
.highlight .mh { color: #990000 } /* Literal.Number.Hex */
.highlight .mi { color: #990000 } /* Literal.Number.Integer */
.highlight .mo { color: #990000 } /* Literal.Number.Oct */
.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */
.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */
.highlight .sc { color: #4e9a06 } /* Literal.String.Char */
.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */
.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */
.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */
.highlight .se { color: #4e9a06 } /* Literal.String.Escape */
.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */
.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */
.highlight .sx { color: #4e9a06 } /* Literal.String.Other */
.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */
.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */
.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */
.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #000000 } /* Name.Function.Magic */
.highlight .vc { color: #000000 } /* Name.Variable.Class */
.highlight .vg { color: #000000 } /* Name.Variable.Global */
.highlight .vi { color: #000000 } /* Name.Variable.Instance */
.highlight .vm { color: #000000 } /* Name.Variable.Magic */
.highlight .il { color: #990000 } /* Literal.Number.Integer.Long */

View File

@@ -1,566 +0,0 @@
/*
* searchtools.js
* ~~~~~~~~~~~~~~~~
*
* Sphinx JavaScript utilities for the full-text search.
*
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
/**
* Simple result scoring code.
*/
if (typeof Scorer === "undefined") {
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [docname, title, anchor, descr, score, filename]
// and returns the new score.
/*
score: result => {
const [docname, title, anchor, descr, score, filename] = result
return score
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {
0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5, // used to be unimportantResults
},
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2,
};
}
const _removeChildren = (element) => {
while (element && element.lastChild) element.removeChild(element.lastChild);
};
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
*/
const _escapeRegExp = (string) =>
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
const _displayItem = (item, searchTerms) => {
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
const [docName, title, anchor, descr, score, _filename] = item;
let listItem = document.createElement("li");
let requestUrl;
let linkUrl;
if (docBuilder === "dirhtml") {
// dirhtml builder
let dirname = docName + "/";
if (dirname.match(/\/index\/$/))
dirname = dirname.substring(0, dirname.length - 6);
else if (dirname === "index/") dirname = "";
requestUrl = docUrlRoot + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = docUrlRoot + docName + docFileSuffix;
linkUrl = docName + docLinkSuffix;
}
let linkEl = listItem.appendChild(document.createElement("a"));
linkEl.href = linkUrl + anchor;
linkEl.dataset.score = score;
linkEl.innerHTML = title;
if (descr)
listItem.appendChild(document.createElement("span")).innerHTML =
" (" + descr + ")";
else if (showSearchSummary)
fetch(requestUrl)
.then((responseData) => responseData.text())
.then((data) => {
if (data)
listItem.appendChild(
Search.makeSearchSummary(data, searchTerms)
);
});
Search.output.appendChild(listItem);
};
const _finishSearch = (resultCount) => {
Search.stopPulse();
Search.title.innerText = _("Search Results");
if (!resultCount)
Search.status.innerText = Documentation.gettext(
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
);
else
Search.status.innerText = _(
`Search finished, found ${resultCount} page(s) matching the search query.`
);
};
const _displayNextItem = (
results,
resultCount,
searchTerms
) => {
// results left, load the summary and display it
// this is intended to be dynamic (don't sub resultsCount)
if (results.length) {
_displayItem(results.pop(), searchTerms);
setTimeout(
() => _displayNextItem(results, resultCount, searchTerms),
5
);
}
// search finished, update title and status message
else _finishSearch(resultCount);
};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
* custom function per language.
*
* The regular expression works by splitting the string on consecutive characters
* that are not Unicode letters, numbers, underscores, or emoji characters.
* This is the same as ``\W+`` in Python, preserving the surrogate pair area.
*/
if (typeof splitQuery === "undefined") {
var splitQuery = (query) => query
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
.filter(term => term) // remove remaining empty strings
}
/**
* Search Module
*/
const Search = {
_index: null,
_queued_query: null,
_pulse_status: -1,
htmlToText: (htmlString) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
const docContent = htmlElement.querySelector('[role="main"]');
if (docContent !== undefined) return docContent.textContent;
console.warn(
"Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
);
return "";
},
init: () => {
const query = new URLSearchParams(window.location.search).get("q");
document
.querySelectorAll('input[name="q"]')
.forEach((el) => (el.value = query));
if (query) Search.performSearch(query);
},
loadIndex: (url) =>
(document.body.appendChild(document.createElement("script")).src = url),
setIndex: (index) => {
Search._index = index;
if (Search._queued_query !== null) {
const query = Search._queued_query;
Search._queued_query = null;
Search.query(query);
}
},
hasIndex: () => Search._index !== null,
deferQuery: (query) => (Search._queued_query = query),
stopPulse: () => (Search._pulse_status = -1),
startPulse: () => {
if (Search._pulse_status >= 0) return;
const pulse = () => {
Search._pulse_status = (Search._pulse_status + 1) % 4;
Search.dots.innerText = ".".repeat(Search._pulse_status);
if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch: (query) => {
// create the required interface elements
const searchText = document.createElement("h2");
searchText.textContent = _("Searching");
const searchSummary = document.createElement("p");
searchSummary.classList.add("search-summary");
searchSummary.innerText = "";
const searchList = document.createElement("ul");
searchList.classList.add("search");
const out = document.getElementById("search-results");
Search.title = out.appendChild(searchText);
Search.dots = Search.title.appendChild(document.createElement("span"));
Search.status = out.appendChild(searchSummary);
Search.output = out.appendChild(searchList);
const searchProgress = document.getElementById("search-progress");
// Some themes don't use the search progress node
if (searchProgress) {
searchProgress.innerText = _("Preparing search...");
}
Search.startPulse();
// index already loaded, the browser was quick!
if (Search.hasIndex()) Search.query(query);
else Search.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query: (query) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const allTitles = Search._index.alltitles;
const indexEntries = Search._index.indexentries;
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
const excludedTerms = new Set();
const highlightTerms = new Set();
const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
splitQuery(query.trim()).forEach((queryTerm) => {
const queryTermLower = queryTerm.toLowerCase();
// maybe skip this "word"
// stopwords array is from language_data.js
if (
stopwords.indexOf(queryTermLower) !== -1 ||
queryTerm.match(/^\d+$/)
)
return;
// stem the word
let word = stemmer.stemWord(queryTermLower);
// select the correct list
if (word[0] === "-") excludedTerms.add(word.substr(1));
else {
searchTerms.add(word);
highlightTerms.add(queryTermLower);
}
});
if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
}
// console.debug("SEARCH: searching for:");
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
// array of [docname, title, anchor, descr, score, filename]
let results = [];
_removeChildren(document.getElementById("search-progress"));
const queryLower = query.toLowerCase();
for (const [title, foundTitles] of Object.entries(allTitles)) {
if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
results.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
for (const [file, id] of foundEntries) {
let score = Math.round(100 * queryLower.length / entry.length)
results.push([
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// lookup as object
objectTerms.forEach((term) =>
results.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort((a, b) => {
const leftScore = a[4];
const rightScore = b[4];
if (leftScore === rightScore) {
// same score: sort alphabetically
const leftTitle = a[1].toLowerCase();
const rightTitle = b[1].toLowerCase();
if (leftTitle === rightTitle) return 0;
return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
}
return leftScore > rightScore ? 1 : -1;
});
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
let seen = new Set();
results = results.reverse().reduce((acc, result) => {
let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
if (!seen.has(resultStr)) {
acc.push(result);
seen.add(resultStr);
}
return acc;
}, []);
results = results.reverse();
// for debugging
//Search.lastresults = results.slice(); // a copy
// console.info("search results:", Search.lastresults);
// print the results
_displayNextItem(results, results.length, searchTerms);
},
/**
* search for object names
*/
performObjectSearch: (object, objectTerms) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const objects = Search._index.objects;
const objNames = Search._index.objnames;
const titles = Search._index.titles;
const results = [];
const objectSearchCallback = (prefix, match) => {
const name = match[4]
const fullname = (prefix ? prefix + "." : "") + name;
const fullnameLower = fullname.toLowerCase();
if (fullnameLower.indexOf(object) < 0) return;
let score = 0;
const parts = fullnameLower.split(".");
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower === object || parts.slice(-1)[0] === object)
score += Scorer.objNameMatch;
else if (parts.slice(-1)[0].indexOf(object) > -1)
score += Scorer.objPartialMatch; // matches in last name
const objName = objNames[match[1]][2];
const title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
const otherTerms = new Set(objectTerms);
otherTerms.delete(object);
if (otherTerms.size > 0) {
const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
if (
[...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
)
return;
}
let anchor = match[3];
if (anchor === "") anchor = fullname;
else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
const descr = objName + _(", in ") + title;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2]))
score += Scorer.objPrio[match[2]];
else score += Scorer.objPrioDefault;
results.push([
docNames[match[0]],
fullname,
"#" + anchor,
descr,
score,
filenames[match[0]],
]);
};
Object.keys(objects).forEach((prefix) =>
objects[prefix].forEach((array) =>
objectSearchCallback(prefix, array)
)
);
return results;
},
/**
* search for full-text terms in the index
*/
performTermsSearch: (searchTerms, excludedTerms) => {
// prepare search
const terms = Search._index.terms;
const titleTerms = Search._index.titleterms;
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const scoreMap = new Map();
const fileMap = new Map();
// perform the search on the required terms
searchTerms.forEach((word) => {
const files = [];
const arr = [
{ files: terms[word], score: Scorer.term },
{ files: titleTerms[word], score: Scorer.title },
];
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
Object.keys(terms).forEach((term) => {
if (term.match(escapedWord) && !terms[word])
arr.push({ files: terms[term], score: Scorer.partialTerm });
});
Object.keys(titleTerms).forEach((term) => {
if (term.match(escapedWord) && !titleTerms[word])
arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
});
}
// no match but word was a required one
if (arr.every((record) => record.files === undefined)) return;
// found search word in contents
arr.forEach((record) => {
if (record.files === undefined) return;
let recordFiles = record.files;
if (recordFiles.length === undefined) recordFiles = [recordFiles];
files.push(...recordFiles);
// set score for the word in each file
recordFiles.forEach((file) => {
if (!scoreMap.has(file)) scoreMap.set(file, {});
scoreMap.get(file)[word] = record.score;
});
});
// create the mapping
files.forEach((file) => {
if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
fileMap.get(file).push(word);
else fileMap.set(file, [word]);
});
});
// now check if the files don't contain excluded terms
const results = [];
for (const [file, wordList] of fileMap) {
// check if all requirements are matched
// as search terms with length < 3 are discarded
const filteredTermCount = [...searchTerms].filter(
(term) => term.length > 2
).length;
if (
wordList.length !== searchTerms.size &&
wordList.length !== filteredTermCount
)
continue;
// ensure that none of the excluded terms is in the search result
if (
[...excludedTerms].some(
(term) =>
terms[term] === file ||
titleTerms[term] === file ||
(terms[term] || []).includes(file) ||
(titleTerms[term] || []).includes(file)
)
)
break;
// select one (max) score for the file.
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
// add result to the result list
results.push([
docNames[file],
titles[file],
"",
null,
score,
filenames[file],
]);
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words.
*/
makeSearchSummary: (htmlText, keywords) => {
const text = Search.htmlToText(htmlText);
if (text === "") return null;
const textLower = text.toLowerCase();
const actualStartPosition = [...keywords]
.map((k) => textLower.indexOf(k.toLowerCase()))
.filter((i) => i > -1)
.slice(-1)[0];
const startWithContext = Math.max(actualStartPosition - 120, 0);
const top = startWithContext === 0 ? "" : "...";
const tail = startWithContext + 240 < text.length ? "..." : "";
let summary = document.createElement("p");
summary.classList.add("context");
summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
return summary;
},
};
_ready(Search.init);

View File

@@ -1,144 +0,0 @@
/* Highlighting utilities for Sphinx HTML documentation. */
"use strict";
const SPHINX_HIGHLIGHT_ENABLED = true
/**
* highlight a given string on a node by wrapping it in
* span elements with the given class name.
*/
const _highlight = (node, addItems, text, className) => {
if (node.nodeType === Node.TEXT_NODE) {
const val = node.nodeValue;
const parent = node.parentNode;
const pos = val.toLowerCase().indexOf(text);
if (
pos >= 0 &&
!parent.classList.contains(className) &&
!parent.classList.contains("nohighlight")
) {
let span;
const closestNode = parent.closest("body, svg, foreignObject");
const isInSVG = closestNode && closestNode.matches("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.classList.add(className);
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
parent.insertBefore(
span,
parent.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling
)
);
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
const rect = document.createElementNS(
"http://www.w3.org/2000/svg",
"rect"
);
const bbox = parent.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute("class", className);
addItems.push({ parent: parent, target: rect });
}
}
} else if (node.matches && !node.matches("button, select, textarea")) {
node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
}
};
const _highlightText = (thisNode, text, className) => {
let addItems = [];
_highlight(thisNode, addItems, text, className);
addItems.forEach((obj) =>
obj.parent.insertAdjacentElement("beforebegin", obj.target)
);
};
/**
* Small JavaScript module for the documentation.
*/
const SphinxHighlight = {
/**
* highlight the search words provided in localstorage in the text
*/
highlightSearchWords: () => {
if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
// get and clear terms from localstorage
const url = new URL(window.location);
const highlight =
localStorage.getItem("sphinx_highlight_terms")
|| url.searchParams.get("highlight")
|| "";
localStorage.removeItem("sphinx_highlight_terms")
url.searchParams.delete("highlight");
window.history.replaceState({}, "", url);
// get individual terms from highlight string
const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
if (terms.length === 0) return; // nothing to do
// There should never be more than one element matching "div.body"
const divBody = document.querySelectorAll("div.body");
const body = divBody.length ? divBody[0] : document.querySelector("body");
window.setTimeout(() => {
terms.forEach((term) => _highlightText(body, term, "highlighted"));
}, 10);
const searchBox = document.getElementById("searchbox");
if (searchBox === null) return;
searchBox.appendChild(
document
.createRange()
.createContextualFragment(
'<p class="highlight-link">' +
'<a href="javascript:SphinxHighlight.hideSearchWords()">' +
_("Hide Search Matches") +
"</a></p>"
)
);
},
/**
* helper function to hide the search marks again
*/
hideSearchWords: () => {
document
.querySelectorAll("#searchbox .highlight-link")
.forEach((el) => el.remove());
document
.querySelectorAll("span.highlighted")
.forEach((el) => el.classList.remove("highlighted"));
localStorage.removeItem("sphinx_highlight_terms")
},
initEscapeListener: () => {
// only install a listener if it is really needed
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
SphinxHighlight.hideSearchWords();
event.preventDefault();
}
});
},
};
_ready(SphinxHighlight.highlightSearchWords);
_ready(SphinxHighlight.initEscapeListener);

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -1,102 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Index &#8212; dbt-core documentation</title>
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
<link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/sphinx_highlight.js"></script>
<link rel="index" title="Index" href="#" />
<link rel="search" title="Search" href="search.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<h1 id="index">Index</h1>
<div class="genindex-jumpbox">
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">dbt-core</a></h1>
<h3>Navigation</h3>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>document.getElementById('searchbox').style.display = "block"</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
&copy;2022, dbt Labs.
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 5.3.0</a>
&amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
</div>
</body>
</html>

View File

@@ -1,855 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="generator" content="Docutils 0.19: https://docutils.sourceforge.io/" />
<title>dbt-cores API documentation &#8212; dbt-core documentation</title>
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
<link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/sphinx_highlight.js"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<section id="dbt-core-s-api-documentation">
<h1>dbt-cores API documentation<a class="headerlink" href="#dbt-core-s-api-documentation" title="Permalink to this heading"></a></h1>
<section id="dbt-section">
<h2>Command: build<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="build|defer">
<h3>defer<a class="headerlink" href="#build|defer" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If set, defer to the state variable for resolving unselected nodes.</p>
</section>
<section id="build|exclude">
<h3>exclude<a class="headerlink" href="#build|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="build|fail_fast">
<h3>fail_fast<a class="headerlink" href="#build|fail_fast" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Stop execution on first failure.</p>
</section>
<section id="build|full_refresh">
<h3>full_refresh<a class="headerlink" href="#build|full_refresh" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.</p>
</section>
<section id="build|indirect_selection">
<h3>indirect_selection<a class="headerlink" href="#build|indirect_selection" title="Permalink to this heading"></a></h3>
<p>Type: choice: [eager, cautious]</p>
<p>Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.</p>
</section>
<section id="build|log_path">
<h3>log_path<a class="headerlink" href="#build|log_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the log-path. Only applies this setting for the current run. Overrides the DBT_LOG_PATH if it is set.</p>
</section>
<section id="build|models">
<h3>models<a class="headerlink" href="#build|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="build|profile">
<h3>profile<a class="headerlink" href="#build|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="build|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#build|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="build|project_dir">
<h3>project_dir<a class="headerlink" href="#build|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="build|selector">
<h3>selector<a class="headerlink" href="#build|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="build|show">
<h3>show<a class="headerlink" href="#build|show" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Show a sample of the loaded data in the terminal</p>
</section>
<section id="build|state">
<h3>state<a class="headerlink" href="#build|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="build|store_failures">
<h3>store_failures<a class="headerlink" href="#build|store_failures" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Store test results (failing rows) in the database</p>
</section>
<section id="build|target">
<h3>target<a class="headerlink" href="#build|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="build|target_path">
<h3>target_path<a class="headerlink" href="#build|target_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the target-path. Only applies this setting for the current run. Overrides the DBT_TARGET_PATH if it is set.</p>
</section>
<section id="build|threads">
<h3>threads<a class="headerlink" href="#build|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="build|vars">
<h3>vars<a class="headerlink" href="#build|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="build|version_check">
<h3>version_check<a class="headerlink" href="#build|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
<h2>Command: clean<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="clean|profile">
<h3>profile<a class="headerlink" href="#clean|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="clean|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#clean|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="clean|project_dir">
<h3>project_dir<a class="headerlink" href="#clean|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="clean|target">
<h3>target<a class="headerlink" href="#clean|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="clean|vars">
<h3>vars<a class="headerlink" href="#clean|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<h2>Command: compile<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="compile|defer">
<h3>defer<a class="headerlink" href="#compile|defer" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If set, defer to the state variable for resolving unselected nodes.</p>
</section>
<section id="compile|exclude">
<h3>exclude<a class="headerlink" href="#compile|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="compile|full_refresh">
<h3>full_refresh<a class="headerlink" href="#compile|full_refresh" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.</p>
</section>
<section id="compile|log_path">
<h3>log_path<a class="headerlink" href="#compile|log_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the log-path. Only applies this setting for the current run. Overrides the DBT_LOG_PATH if it is set.</p>
</section>
<section id="compile|models">
<h3>models<a class="headerlink" href="#compile|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="compile|parse_only">
<h3>parse_only<a class="headerlink" href="#compile|parse_only" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>TODO: No help text currently available</p>
</section>
<section id="compile|profile">
<h3>profile<a class="headerlink" href="#compile|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="compile|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#compile|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="compile|project_dir">
<h3>project_dir<a class="headerlink" href="#compile|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="compile|selector">
<h3>selector<a class="headerlink" href="#compile|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="compile|state">
<h3>state<a class="headerlink" href="#compile|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="compile|target">
<h3>target<a class="headerlink" href="#compile|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="compile|target_path">
<h3>target_path<a class="headerlink" href="#compile|target_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the target-path. Only applies this setting for the current run. Overrides the DBT_TARGET_PATH if it is set.</p>
</section>
<section id="compile|threads">
<h3>threads<a class="headerlink" href="#compile|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="compile|vars">
<h3>vars<a class="headerlink" href="#compile|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="compile|version_check">
<h3>version_check<a class="headerlink" href="#compile|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
<h2>Command: debug<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="debug|config_dir">
<h3>config_dir<a class="headerlink" href="#debug|config_dir" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>If specified, DBT will show path information for this project</p>
</section>
<section id="debug|profile">
<h3>profile<a class="headerlink" href="#debug|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="debug|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#debug|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="debug|project_dir">
<h3>project_dir<a class="headerlink" href="#debug|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="debug|target">
<h3>target<a class="headerlink" href="#debug|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="debug|vars">
<h3>vars<a class="headerlink" href="#debug|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="debug|version_check">
<h3>version_check<a class="headerlink" href="#debug|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
<h2>Command: deps<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="deps|profile">
<h3>profile<a class="headerlink" href="#deps|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="deps|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#deps|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="deps|project_dir">
<h3>project_dir<a class="headerlink" href="#deps|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="deps|target">
<h3>target<a class="headerlink" href="#deps|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="deps|vars">
<h3>vars<a class="headerlink" href="#deps|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<h2>Command: docs<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<h2>Command: init<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="init|profile">
<h3>profile<a class="headerlink" href="#init|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="init|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#init|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="init|project_dir">
<h3>project_dir<a class="headerlink" href="#init|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="init|skip_profile_setup">
<h3>skip_profile_setup<a class="headerlink" href="#init|skip_profile_setup" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Skip interative profile setup.</p>
</section>
<section id="init|target">
<h3>target<a class="headerlink" href="#init|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="init|vars">
<h3>vars<a class="headerlink" href="#init|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<h2>Command: list<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="list|exclude">
<h3>exclude<a class="headerlink" href="#list|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="list|indirect_selection">
<h3>indirect_selection<a class="headerlink" href="#list|indirect_selection" title="Permalink to this heading"></a></h3>
<p>Type: choice: [eager, cautious]</p>
<p>Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.</p>
</section>
<section id="list|models">
<h3>models<a class="headerlink" href="#list|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="list|output">
<h3>output<a class="headerlink" href="#list|output" title="Permalink to this heading"></a></h3>
<p>Type: choice: [json, name, path, selector]</p>
<p>TODO: No current help text</p>
</section>
<section id="list|output_keys">
<h3>output_keys<a class="headerlink" href="#list|output_keys" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>TODO: No current help text</p>
</section>
<section id="list|profile">
<h3>profile<a class="headerlink" href="#list|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="list|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#list|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="list|project_dir">
<h3>project_dir<a class="headerlink" href="#list|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="list|resource_type">
<h3>resource_type<a class="headerlink" href="#list|resource_type" title="Permalink to this heading"></a></h3>
<p>Type: choice: [metric, source, analysis, model, test, exposure, snapshot, seed, default, all]</p>
<p>TODO: No current help text</p>
</section>
<section id="list|selector">
<h3>selector<a class="headerlink" href="#list|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="list|state">
<h3>state<a class="headerlink" href="#list|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="list|target">
<h3>target<a class="headerlink" href="#list|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="list|vars">
<h3>vars<a class="headerlink" href="#list|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<h2>Command: parse<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="parse|compile">
<h3>compile<a class="headerlink" href="#parse|compile" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>TODO: No help text currently available</p>
</section>
<section id="parse|log_path">
<h3>log_path<a class="headerlink" href="#parse|log_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the log-path. Only applies this setting for the current run. Overrides the DBT_LOG_PATH if it is set.</p>
</section>
<section id="parse|profile">
<h3>profile<a class="headerlink" href="#parse|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="parse|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#parse|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="parse|project_dir">
<h3>project_dir<a class="headerlink" href="#parse|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="parse|target">
<h3>target<a class="headerlink" href="#parse|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="parse|target_path">
<h3>target_path<a class="headerlink" href="#parse|target_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the target-path. Only applies this setting for the current run. Overrides the DBT_TARGET_PATH if it is set.</p>
</section>
<section id="parse|threads">
<h3>threads<a class="headerlink" href="#parse|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="parse|vars">
<h3>vars<a class="headerlink" href="#parse|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="parse|version_check">
<h3>version_check<a class="headerlink" href="#parse|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
<section id="parse|write_manifest">
<h3>write_manifest<a class="headerlink" href="#parse|write_manifest" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>TODO: No help text currently available</p>
</section>
<h2>Command: run<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="run|defer">
<h3>defer<a class="headerlink" href="#run|defer" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If set, defer to the state variable for resolving unselected nodes.</p>
</section>
<section id="run|exclude">
<h3>exclude<a class="headerlink" href="#run|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="run|fail_fast">
<h3>fail_fast<a class="headerlink" href="#run|fail_fast" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Stop execution on first failure.</p>
</section>
<section id="run|full_refresh">
<h3>full_refresh<a class="headerlink" href="#run|full_refresh" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.</p>
</section>
<section id="run|log_path">
<h3>log_path<a class="headerlink" href="#run|log_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the log-path. Only applies this setting for the current run. Overrides the DBT_LOG_PATH if it is set.</p>
</section>
<section id="run|models">
<h3>models<a class="headerlink" href="#run|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="run|profile">
<h3>profile<a class="headerlink" href="#run|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="run|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#run|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="run|project_dir">
<h3>project_dir<a class="headerlink" href="#run|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="run|selector">
<h3>selector<a class="headerlink" href="#run|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="run|state">
<h3>state<a class="headerlink" href="#run|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="run|target">
<h3>target<a class="headerlink" href="#run|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="run|target_path">
<h3>target_path<a class="headerlink" href="#run|target_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the target-path. Only applies this setting for the current run. Overrides the DBT_TARGET_PATH if it is set.</p>
</section>
<section id="run|threads">
<h3>threads<a class="headerlink" href="#run|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="run|vars">
<h3>vars<a class="headerlink" href="#run|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="run|version_check">
<h3>version_check<a class="headerlink" href="#run|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
<h2>Command: run_operation<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="run-operation|args">
<h3>args<a class="headerlink" href="#run-operation|args" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="run-operation|profile">
<h3>profile<a class="headerlink" href="#run-operation|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="run-operation|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#run-operation|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="run-operation|project_dir">
<h3>project_dir<a class="headerlink" href="#run-operation|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="run-operation|target">
<h3>target<a class="headerlink" href="#run-operation|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="run-operation|vars">
<h3>vars<a class="headerlink" href="#run-operation|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<h2>Command: seed<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="seed|exclude">
<h3>exclude<a class="headerlink" href="#seed|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="seed|full_refresh">
<h3>full_refresh<a class="headerlink" href="#seed|full_refresh" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.</p>
</section>
<section id="seed|log_path">
<h3>log_path<a class="headerlink" href="#seed|log_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the log-path. Only applies this setting for the current run. Overrides the DBT_LOG_PATH if it is set.</p>
</section>
<section id="seed|models">
<h3>models<a class="headerlink" href="#seed|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="seed|profile">
<h3>profile<a class="headerlink" href="#seed|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="seed|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#seed|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="seed|project_dir">
<h3>project_dir<a class="headerlink" href="#seed|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="seed|selector">
<h3>selector<a class="headerlink" href="#seed|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="seed|show">
<h3>show<a class="headerlink" href="#seed|show" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Show a sample of the loaded data in the terminal</p>
</section>
<section id="seed|state">
<h3>state<a class="headerlink" href="#seed|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="seed|target">
<h3>target<a class="headerlink" href="#seed|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="seed|target_path">
<h3>target_path<a class="headerlink" href="#seed|target_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the target-path. Only applies this setting for the current run. Overrides the DBT_TARGET_PATH if it is set.</p>
</section>
<section id="seed|threads">
<h3>threads<a class="headerlink" href="#seed|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="seed|vars">
<h3>vars<a class="headerlink" href="#seed|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="seed|version_check">
<h3>version_check<a class="headerlink" href="#seed|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
<h2>Command: snapshot<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="snapshot|defer">
<h3>defer<a class="headerlink" href="#snapshot|defer" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If set, defer to the state variable for resolving unselected nodes.</p>
</section>
<section id="snapshot|exclude">
<h3>exclude<a class="headerlink" href="#snapshot|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="snapshot|models">
<h3>models<a class="headerlink" href="#snapshot|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="snapshot|profile">
<h3>profile<a class="headerlink" href="#snapshot|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="snapshot|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#snapshot|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="snapshot|project_dir">
<h3>project_dir<a class="headerlink" href="#snapshot|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="snapshot|selector">
<h3>selector<a class="headerlink" href="#snapshot|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="snapshot|state">
<h3>state<a class="headerlink" href="#snapshot|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="snapshot|target">
<h3>target<a class="headerlink" href="#snapshot|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="snapshot|threads">
<h3>threads<a class="headerlink" href="#snapshot|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="snapshot|vars">
<h3>vars<a class="headerlink" href="#snapshot|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<h2>Command: source<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<h2>Command: test<a class="headerlink" href="#dbt-section" title="Permalink to this heading"></a></h2>
<section id="test|defer">
<h3>defer<a class="headerlink" href="#test|defer" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>If set, defer to the state variable for resolving unselected nodes.</p>
</section>
<section id="test|exclude">
<h3>exclude<a class="headerlink" href="#test|exclude" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to exclude.</p>
</section>
<section id="test|fail_fast">
<h3>fail_fast<a class="headerlink" href="#test|fail_fast" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Stop execution on first failure.</p>
</section>
<section id="test|indirect_selection">
<h3>indirect_selection<a class="headerlink" href="#test|indirect_selection" title="Permalink to this heading"></a></h3>
<p>Type: choice: [eager, cautious]</p>
<p>Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.</p>
</section>
<section id="test|log_path">
<h3>log_path<a class="headerlink" href="#test|log_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the log-path. Only applies this setting for the current run. Overrides the DBT_LOG_PATH if it is set.</p>
</section>
<section id="test|models">
<h3>models<a class="headerlink" href="#test|models" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Specify the nodes to include.</p>
</section>
<section id="test|profile">
<h3>profile<a class="headerlink" href="#test|profile" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which profile to load. Overrides setting in dbt_project.yml.</p>
</section>
<section id="test|profiles_dir">
<h3>profiles_dir<a class="headerlink" href="#test|profiles_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/</p>
</section>
<section id="test|project_dir">
<h3>project_dir<a class="headerlink" href="#test|project_dir" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.</p>
</section>
<section id="test|selector">
<h3>selector<a class="headerlink" href="#test|selector" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>The selector name to use, as defined in selectors.yml</p>
</section>
<section id="test|state">
<h3>state<a class="headerlink" href="#test|state" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>If set, use the given directory as the source for json files to compare with this project.</p>
</section>
<section id="test|store_failures">
<h3>store_failures<a class="headerlink" href="#test|store_failures" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Store test results (failing rows) in the database</p>
</section>
<section id="test|target">
<h3>target<a class="headerlink" href="#test|target" title="Permalink to this heading"></a></h3>
<p>Type: string</p>
<p>Which target to load for the given profile</p>
</section>
<section id="test|target_path">
<h3>target_path<a class="headerlink" href="#test|target_path" title="Permalink to this heading"></a></h3>
<p>Type: path</p>
<p>Configure the target-path. Only applies this setting for the current run. Overrides the DBT_TARGET_PATH if it is set.</p>
</section>
<section id="test|threads">
<h3>threads<a class="headerlink" href="#test|threads" title="Permalink to this heading"></a></h3>
<p>Type: int</p>
<p>Specify number of threads to use while executing models. Overrides settings in profiles.yml.</p>
</section>
<section id="test|vars">
<h3>vars<a class="headerlink" href="#test|vars" title="Permalink to this heading"></a></h3>
<p>Type: YAML</p>
<p>Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. {my_variable: my_value}</p>
</section>
<section id="test|version_check">
<h3>version_check<a class="headerlink" href="#test|version_check" title="Permalink to this heading"></a></h3>
<p>Type: boolean</p>
<p>Ensure dbts version matches the one specified in the dbt_project.yml file (require-dbt-version)</p>
</section>
</section>
</section>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="#">dbt-core</a></h1>
<h3>Navigation</h3>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="#">Documentation overview</a><ul>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>document.getElementById('searchbox').style.display = "block"</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
&copy;2022, dbt Labs.
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 5.3.0</a>
&amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/index.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html>

Binary file not shown.

View File

@@ -1,121 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Search &#8212; dbt-core documentation</title>
<link rel="stylesheet" type="text/css" href="_static/pygments.css" />
<link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
<script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/_sphinx_javascript_frameworks_compat.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/sphinx_highlight.js"></script>
<script src="_static/searchtools.js"></script>
<script src="_static/language_data.js"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="#" />
<script src="searchindex.js" defer></script>
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<h1 id="search-documentation">Search</h1>
<noscript>
<div class="admonition warning">
<p>
Please activate JavaScript to enable the search
functionality.
</p>
</div>
</noscript>
<p>
Searching for multiple words only shows matches that contain
all words.
</p>
<form action="" method="get">
<input type="text" name="q" aria-labelledby="search-documentation" value="" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
<input type="submit" value="search" />
<span id="search-progress" style="padding-left: 10px"></span>
</form>
<div id="search-results">
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">dbt-core</a></h1>
<h3>Navigation</h3>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
</ul></li>
</ul>
</div>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
&copy;2022, dbt Labs.
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 5.3.0</a>
&amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
</div>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@@ -7,7 +7,7 @@ import typing as t
# For the full list of built-in configuration values, see the documentation: # For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html # https://www.sphinx-doc.org/en/master/usage/configuration.html
sys.path.insert(0, os.path.abspath("../../..")) sys.path.insert(0, os.path.abspath("../.."))
sys.path.insert(0, os.path.abspath("./_ext")) sys.path.insert(0, os.path.abspath("./_ext"))
# -- Project information ----------------------------------------------------- # -- Project information -----------------------------------------------------

View File

@@ -908,6 +908,7 @@ class PartialParsingDeletedMetric(betterproto.Message):
info: "EventInfo" = betterproto.message_field(1) info: "EventInfo" = betterproto.message_field(1)
unique_id: str = betterproto.string_field(2) unique_id: str = betterproto.string_field(2)
@dataclass @dataclass
class ManifestWrongMetadataVersion(betterproto.Message): class ManifestWrongMetadataVersion(betterproto.Message):
"""I022""" """I022"""
@@ -1248,12 +1249,6 @@ class JinjaLogWarning(betterproto.Message):
info: "EventInfo" = betterproto.message_field(1) info: "EventInfo" = betterproto.message_field(1)
msg: str = betterproto.string_field(2) msg: str = betterproto.string_field(2)
@dataclass
class PartialParsingDeletedEntity(betterproto.Message):
"""I062"""
info: "EventInfo" = betterproto.message_field(1)
unique_id: str = betterproto.string_field(2)
@dataclass @dataclass
class GitSparseCheckoutSubdirectory(betterproto.Message): class GitSparseCheckoutSubdirectory(betterproto.Message):

View File

@@ -1526,14 +1526,6 @@ class JinjaLogWarning(WarnLevel, pt.JinjaLogWarning):
def message(self) -> str: def message(self) -> str:
return self.msg return self.msg
@dataclass
class PartialParsingDeletedEntity(DebugLevel, pt.PartialParsingDeletedEntity):
def code(self):
return "I062"
def message(self) -> str:
return f"Partial parsing: deleted entity {self.unique_id}"
# ======================================================= # =======================================================
# M - Deps generation # M - Deps generation

View File

@@ -20,7 +20,7 @@ from .selector_spec import (
INTERSECTION_DELIMITER = "," INTERSECTION_DELIMITER = ","
DEFAULT_INCLUDES: List[str] = ["fqn:*", "source:*", "exposure:*", "metric:*", "entity:*"] DEFAULT_INCLUDES: List[str] = ["fqn:*", "source:*", "exposure:*", "metric:*"]
DEFAULT_EXCLUDES: List[str] = [] DEFAULT_EXCLUDES: List[str] = []

View File

@@ -5,7 +5,7 @@ from queue import PriorityQueue
from typing import Dict, Set, List, Generator, Optional from typing import Dict, Set, List, Generator, Optional
from .graph import UniqueId from .graph import UniqueId
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure, ParsedMetric, ParsedEntity from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure, ParsedMetric
from dbt.contracts.graph.compiled import GraphMemberNode from dbt.contracts.graph.compiled import GraphMemberNode
from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.manifest import Manifest
from dbt.node_types import NodeType from dbt.node_types import NodeType
@@ -48,7 +48,7 @@ class GraphQueue:
if node.resource_type != NodeType.Model: if node.resource_type != NodeType.Model:
return False return False
# must be a Model - tell mypy this won't be a Source or Exposure or Metric # must be a Model - tell mypy this won't be a Source or Exposure or Metric
assert not isinstance(node, (ParsedSourceDefinition, ParsedExposure, ParsedMetric, ParsedEntity)) assert not isinstance(node, (ParsedSourceDefinition, ParsedExposure, ParsedMetric))
if node.is_ephemeral: if node.is_ephemeral:
return False return False
return True return True

View File

@@ -161,9 +161,6 @@ class NodeSelector(MethodManager):
elif unique_id in self.manifest.metrics: elif unique_id in self.manifest.metrics:
metric = self.manifest.metrics[unique_id] metric = self.manifest.metrics[unique_id]
return metric.config.enabled return metric.config.enabled
elif unique_id in self.manifest.entities:
metric = self.manifest.entities[unique_id]
return metric.config.enabled
node = self.manifest.nodes[unique_id] node = self.manifest.nodes[unique_id]
return not node.empty and node.config.enabled return not node.empty and node.config.enabled
@@ -183,8 +180,6 @@ class NodeSelector(MethodManager):
node = self.manifest.exposures[unique_id] node = self.manifest.exposures[unique_id]
elif unique_id in self.manifest.metrics: elif unique_id in self.manifest.metrics:
node = self.manifest.metrics[unique_id] node = self.manifest.metrics[unique_id]
elif unique_id in self.manifest.entities:
node = self.manifest.entities[unique_id]
else: else:
raise InternalException(f"Node {unique_id} not found in the manifest!") raise InternalException(f"Node {unique_id} not found in the manifest!")
return self.node_is_match(node) return self.node_is_match(node)

View File

@@ -19,7 +19,6 @@ from dbt.contracts.graph.parsed import (
ParsedSingularTestNode, ParsedSingularTestNode,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
ParsedGenericTestNode, ParsedGenericTestNode,
ParsedSourceDefinition, ParsedSourceDefinition,
) )
@@ -49,7 +48,6 @@ class MethodName(StrEnum):
State = "state" State = "state"
Exposure = "exposure" Exposure = "exposure"
Metric = "metric" Metric = "metric"
Entity = "entity"
Result = "result" Result = "result"
SourceStatus = "source_status" SourceStatus = "source_status"
@@ -78,7 +76,7 @@ def is_selected_node(fqn: List[str], node_selector: str):
return True return True
SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric, ParsedEntity] SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric]
class SelectorMethod(metaclass=abc.ABCMeta): class SelectorMethod(metaclass=abc.ABCMeta):
@@ -129,16 +127,6 @@ class SelectorMethod(metaclass=abc.ABCMeta):
continue continue
yield unique_id, metric yield unique_id, metric
def entity_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, ParsedEntity]]:
for key, metric in self.manifest.entities.items():
unique_id = UniqueId(key)
if unique_id not in included_nodes:
continue
yield unique_id, metric
def all_nodes( def all_nodes(
self, included_nodes: Set[UniqueId] self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, SelectorTarget]]: ) -> Iterator[Tuple[UniqueId, SelectorTarget]]:
@@ -147,7 +135,6 @@ class SelectorMethod(metaclass=abc.ABCMeta):
self.source_nodes(included_nodes), self.source_nodes(included_nodes),
self.exposure_nodes(included_nodes), self.exposure_nodes(included_nodes),
self.metric_nodes(included_nodes), self.metric_nodes(included_nodes),
self.entity_nodes(included_nodes),
) )
def configurable_nodes( def configurable_nodes(
@@ -158,12 +145,11 @@ class SelectorMethod(metaclass=abc.ABCMeta):
def non_source_nodes( def non_source_nodes(
self, self,
included_nodes: Set[UniqueId], included_nodes: Set[UniqueId],
) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric, ParsedEntity]]]: ) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric]]]:
yield from chain( yield from chain(
self.parsed_nodes(included_nodes), self.parsed_nodes(included_nodes),
self.exposure_nodes(included_nodes), self.exposure_nodes(included_nodes),
self.metric_nodes(included_nodes), self.metric_nodes(included_nodes),
self.entity_nodes(included_nodes),
) )
@abc.abstractmethod @abc.abstractmethod
@@ -292,30 +278,6 @@ class MetricSelectorMethod(SelectorMethod):
yield node yield node
class EntitySelectorMethod(SelectorMethod):
"""TODO: Add a description of what this selector method is doing"""
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
parts = selector.split(".")
target_package = SELECTOR_GLOB
if len(parts) == 1:
target_name = parts[0]
elif len(parts) == 2:
target_package, target_name = parts
else:
msg = (
'Invalid entity selector value "{}". Entities must be of '
"the form ${{entity_name}} or "
"${{entity_package.entity_name}}"
).format(selector)
raise RuntimeException(msg)
for node, real_node in self.entity_nodes(included_nodes):
if target_package not in (real_node.package_name, SELECTOR_GLOB):
continue
if target_name not in (real_node.name, SELECTOR_GLOB):
continue
yield node
class PathSelectorMethod(SelectorMethod): class PathSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
@@ -574,8 +536,6 @@ class StateSelectorMethod(SelectorMethod):
previous_node = manifest.exposures[node] previous_node = manifest.exposures[node]
elif node in manifest.metrics: elif node in manifest.metrics:
previous_node = manifest.metrics[node] previous_node = manifest.metrics[node]
elif node in manifest.entities:
previous_node = manifest.entities[node]
if checker(previous_node, real_node): if checker(previous_node, real_node):
yield node yield node
@@ -664,7 +624,6 @@ class MethodManager:
MethodName.State: StateSelectorMethod, MethodName.State: StateSelectorMethod,
MethodName.Exposure: ExposureSelectorMethod, MethodName.Exposure: ExposureSelectorMethod,
MethodName.Metric: MetricSelectorMethod, MethodName.Metric: MetricSelectorMethod,
MethodName.Entity: EntitySelectorMethod,
MethodName.Result: ResultSelectorMethod, MethodName.Result: ResultSelectorMethod,
MethodName.SourceStatus: SourceStatusSelectorMethod, MethodName.SourceStatus: SourceStatusSelectorMethod,
} }

View File

@@ -78,9 +78,12 @@ class SelectionCriteria:
@classmethod @classmethod
def default_method(cls, value: str) -> MethodName: def default_method(cls, value: str) -> MethodName:
from dbt.parser.languages import get_file_extensions
extensions = tuple(get_file_extensions() + [".csv"])
if _probably_path(value): if _probably_path(value):
return MethodName.Path return MethodName.Path
elif value.lower().endswith((".sql", ".py", ".csv")): elif value.lower().endswith(extensions):
return MethodName.File return MethodName.File
else: else:
return MethodName.FQN return MethodName.FQN

View File

@@ -32,7 +32,7 @@ def source(*args, dbt_load_df_function):
{%- set config_dict = {} -%} {%- set config_dict = {} -%}
{%- for key in model.config.config_keys_used -%} {%- for key in model.config.config_keys_used -%}
{# weird type testing with enum, would be much easier to write this logic in Python! #} {# weird type testing with enum, would be much easier to write this logic in Python! #}
{%- if key == 'language' -%} {%- if key in ('language', 'compiled_language') -%}
{%- set value = 'python' -%} {%- set value = 'python' -%}
{%- endif -%} {%- endif -%}
{%- set value = model.config[key] -%} {%- set value = model.config[key] -%}

View File

@@ -18,7 +18,6 @@ class NodeType(StrEnum):
Macro = "macro" Macro = "macro"
Exposure = "exposure" Exposure = "exposure"
Metric = "metric" Metric = "metric"
Entity = "entity"
@classmethod @classmethod
def executable(cls) -> List["NodeType"]: def executable(cls) -> List["NodeType"]:
@@ -53,14 +52,11 @@ class NodeType(StrEnum):
cls.Analysis, cls.Analysis,
cls.Exposure, cls.Exposure,
cls.Metric, cls.Metric,
cls.Entity,
] ]
def pluralize(self) -> str: def pluralize(self) -> str:
if self is self.Analysis: if self is self.Analysis:
return "analyses" return "analyses"
if self is self.Entity:
return "entities"
return f"{self}s" return f"{self}s"
@@ -70,5 +66,8 @@ class RunHookType(StrEnum):
class ModelLanguage(StrEnum): class ModelLanguage(StrEnum):
# TODO: how to make this dynamic?
python = "python" python = "python"
sql = "sql" sql = "sql"
ibis = "ibis"
prql = "prql"

View File

@@ -23,6 +23,8 @@ from dbt import hooks
from dbt.node_types import NodeType, ModelLanguage from dbt.node_types import NodeType, ModelLanguage
from dbt.parser.search import FileBlock from dbt.parser.search import FileBlock
from dbt.parser.languages import get_language_providers, get_language_provider_by_name
# internally, the parser may store a less-restrictive type that will be # internally, the parser may store a less-restrictive type that will be
# transformed into the final type. But it will have to be derived from # transformed into the final type. But it will have to be derived from
# ParsedNode to be operable. # ParsedNode to be operable.
@@ -157,7 +159,7 @@ class ConfiguredParser(
config[key] = [hooks.get_hook_dict(h) for h in config[key]] config[key] = [hooks.get_hook_dict(h) for h in config[key]]
def _create_error_node( def _create_error_node(
self, name: str, path: str, original_file_path: str, raw_code: str, language: str = "sql" self, name: str, path: str, original_file_path: str, raw_code: str, language: str
) -> UnparsedNode: ) -> UnparsedNode:
"""If we hit an error before we've actually parsed a node, provide some """If we hit an error before we've actually parsed a node, provide some
level of useful information by attaching this to the exception. level of useful information by attaching this to the exception.
@@ -189,11 +191,14 @@ class ConfiguredParser(
""" """
if name is None: if name is None:
name = block.name name = block.name
if block.path.relative_path.endswith(".py"):
language = ModelLanguage.python # this is pretty silly, but we need "sql" to be the default
else: # even for seeds etc (.csv)
# this is not ideal but we have a lot of tests to adjust if don't do it # otherwise this breaks a lot of tests
language = ModelLanguage.sql language = ModelLanguage.sql
for provider in get_language_providers():
if block.path.relative_path.endswith(provider.file_ext()):
language = ModelLanguage[provider.name()]
dct = { dct = {
"alias": name, "alias": name,
@@ -223,23 +228,13 @@ class ConfiguredParser(
path=path, path=path,
original_file_path=block.path.original_file_path, original_file_path=block.path.original_file_path,
raw_code=block.contents, raw_code=block.contents,
language=language,
) )
raise ParsingException(msg, node=node) raise ParsingException(msg, node=node)
def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]: def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]:
return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config)
def render_with_context(self, parsed_node: IntermediateNode, config: ContextConfig):
# Given the parsed node and a ContextConfig to use during parsing,
# render the node's sql with macro capture enabled.
# Note: this mutates the config object when config calls are rendered.
context = self._context_for(parsed_node, config)
# this goes through the process of rendering, but just throws away
# the rendered result. The "macro capture" is the point?
get_rendered(parsed_node.raw_code, context, parsed_node, capture_macros=True)
return context
# This is taking the original config for the node, converting it to a dict, # This is taking the original config for the node, converting it to a dict,
# updating the config with new config passed in, then re-creating the # updating the config with new config passed in, then re-creating the
# config from the dict in the node. # config from the dict in the node.
@@ -358,7 +353,10 @@ class ConfiguredParser(
def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: def render_update(self, node: IntermediateNode, config: ContextConfig) -> None:
try: try:
context = self.render_with_context(node, config) provider = get_language_provider_by_name(node.language)
provider.validate_raw_code(node)
context = self._context_for(node, config)
context = provider.update_context(node, config, context)
self.update_parsed_node_config(node, config, context=context) self.update_parsed_node_config(node, config, context=context)
except ValidationError as exc: except ValidationError as exc:
# we got a ValidationError - probably bad types in config() # we got a ValidationError - probably bad types in config()
@@ -405,6 +403,18 @@ class SimpleParser(
return node return node
# TODO: rename these to be more generic (not just SQL)
# The full inheritance order for models is:
# dbt.parser.models.ModelParser,
# dbt.parser.base.SimpleSQLParser,
# dbt.parser.base.SQLParser,
# dbt.parser.base.ConfiguredParser,
# dbt.parser.base.Parser,
# dbt.parser.base.BaseParser,
# These fine-grained class distinctions exist to support other parsers
# e.g. SnapshotParser overrides both 'parse_file' + 'transform'
class SQLParser( class SQLParser(
ConfiguredParser[FileBlock, IntermediateNode, FinalNode], Generic[IntermediateNode, FinalNode] ConfiguredParser[FileBlock, IntermediateNode, FinalNode], Generic[IntermediateNode, FinalNode]
): ):

View File

@@ -0,0 +1,25 @@
from .provider import LanguageProvider # noqa
from .jinja_sql import JinjaSQLProvider # noqa
from .python import PythonProvider # noqa
# TODO: how to make this discovery/registration pluggable?
from .prql import PrqlProvider # noqa
from .ibis import IbisProvider # noqa
def get_language_providers():
return LanguageProvider.__subclasses__()
def get_language_names():
return [provider.name() for provider in get_language_providers()]
def get_file_extensions():
return [provider.file_ext() for provider in get_language_providers()]
def get_language_provider_by_name(language_name: str) -> LanguageProvider:
return next(
iter(provider for provider in get_language_providers() if provider.name() == language_name)
)

View File

@@ -0,0 +1,116 @@
import ibis
import ast
from dbt.parser.languages.provider import LanguageProvider, dbt_function_calls
from dbt.parser.languages.python import PythonParseVisitor
from dbt.contracts.graph.compiled import ManifestNode
from dbt.exceptions import ParsingException, validator_error_message
from typing import Any, Dict
class IbisProvider(LanguageProvider):
@classmethod
def name(self) -> str:
return "ibis"
@classmethod
def file_ext(self) -> str:
return ".ibis"
@classmethod
def compiled_language(self) -> str:
return "sql"
@classmethod
def validate_raw_code(self, node) -> None:
# don't require the 'model' function for now
pass
@classmethod
def extract_dbt_function_calls(self, node: Any) -> dbt_function_calls:
"""
List all references (refs, sources, configs) in a given block.
"""
try:
tree = ast.parse(node.raw_code, filename=node.original_file_path)
except SyntaxError as exc:
msg = validator_error_message(exc)
raise ParsingException(f"{msg}\n{exc.text}", node=node) from exc
# don't worry about the 'model' function for now
# dbtValidator = PythonValidationVisitor()
# dbtValidator.visit(tree)
# dbtValidator.check_error(node)
dbtParser = PythonParseVisitor(node)
dbtParser.visit(tree)
return dbtParser.dbt_function_calls
@classmethod
def needs_compile_time_connection(self) -> bool:
# TODO: this is technically true, but Ibis won't actually use dbt's connection, it will make its own
return True
@classmethod
def get_compiled_code(self, node: ManifestNode, context: Dict[str, Any]) -> str:
resolved_references = self.get_resolved_references(node, context)
def ref(*args, dbt_load_df_function):
refs = resolved_references["refs"]
key = tuple(args)
return dbt_load_df_function(refs[key])
def source(*args, dbt_load_df_function):
sources = resolved_references["sources"]
key = tuple(args)
return dbt_load_df_function(sources[key])
config_dict = {}
for key in node.config.get("config_keys_used", []):
value = node.config[key]
config_dict.update({key: value})
class config:
def __init__(self, *args, **kwargs):
pass
@staticmethod
def get(key, default=None):
return config_dict.get(key, default)
class this:
"""dbt.this() or dbt.this.identifier"""
database = node.database
schema = node.schema
identifier = node.identifier
def __repr__(self):
return node.relation_name
class dbtObj:
def __init__(self, load_df_function) -> None:
self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)
self.ref = lambda *args: ref(*args, dbt_load_df_function=load_df_function)
self.config = config
self.this = this()
# self.is_incremental = TODO
# https://ibis-project.org/docs/dev/backends/PostgreSQL/#ibis.backends.postgres.Backend.do_connect
# TODO: this would need to live in the adapter somehow
target = context["target"]
con = ibis.postgres.connect(
database=target["database"],
user=target["user"],
)
# use for dbt.ref(), dbt.source(), etc
dbt = dbtObj(con.table) # noqa
# TODO: this is unsafe in so many ways
exec(node.raw_code)
compiled = str(eval(f"ibis.{context['target']['type']}.compile(model)"))
return compiled

View File

@@ -0,0 +1,34 @@
from dbt.clients import jinja
from dbt.context.context_config import ContextConfig
from dbt.parser.languages.provider import LanguageProvider
from dbt.contracts.graph.compiled import ManifestNode
from typing import Dict, Any
class JinjaSQLProvider(LanguageProvider):
@classmethod
def name(self) -> str:
return "sql"
@classmethod
def update_context(
cls, node: Any, config: ContextConfig, context: Dict[str, Any]
) -> Dict[str, Any]:
# this goes through the process of rendering, but we don't keep the rendered result
# the goal is to capture macros + update context as side effect
jinja.get_rendered(node.raw_code, context, node, capture_macros=True)
return context
@classmethod
def get_compiled_code(self, node: ManifestNode, context: Dict[str, Any]) -> str:
compiled_code = jinja.get_rendered(
node.raw_code,
context,
node,
)
return compiled_code
@classmethod
def needs_compile_time_connection(self) -> bool:
return True

View File

@@ -0,0 +1,97 @@
from __future__ import annotations
from typing import Dict, Tuple, List, Any
import abc
# for type hints
from dbt.contracts.graph.compiled import ManifestNode
from dbt.context.providers import RelationProxy
from dbt.context.context_config import ContextConfig
dbt_function_calls = List[Tuple[str, List[str], Dict[str, Any]]]
references_type = Dict[str, Dict[Tuple[str, ...], RelationProxy]]
class LanguageProvider(metaclass=abc.ABCMeta):
"""
A LanguageProvider is a class that can parse & compile a given language.
"""
@classmethod
def name(self) -> str:
return ""
@classmethod
def file_ext(self) -> str:
return f".{self.name()}"
@classmethod
def compiled_language(self) -> str:
return self.name()
@classmethod
@abc.abstractmethod
# TODO add type hints
def extract_dbt_function_calls(self, node: Any) -> dbt_function_calls:
"""
List all dbt function calls (ref, source, config) and their args/kwargs
"""
raise NotImplementedError("extract_dbt_function_calls")
@classmethod
def validate_raw_code(self, node: Any) -> None:
pass
@classmethod
def update_context(
cls, node: Any, config: ContextConfig, context: Dict[str, Any]
) -> Dict[str, Any]:
dbt_function_calls = cls.extract_dbt_function_calls(node)
config_keys_used = []
for (func, args, kwargs) in dbt_function_calls:
if func == "get":
config_keys_used.append(args[0])
continue
context[func](*args, **kwargs)
if config_keys_used:
# this is being used in macro build_config_dict
context["config"](config_keys_used=config_keys_used)
return context
@classmethod
@abc.abstractmethod
def needs_compile_time_connection(self) -> bool:
"""
Does this modeling language support introspective queries (requiring a database connection)
at compile time?
"""
raise NotImplementedError("needs_compile_time_connection")
@classmethod
def get_resolved_references(
self, node: ManifestNode, context: Dict[str, Any]
) -> references_type:
resolved_references: references_type = {
"sources": {},
"refs": {},
}
# TODO: do we need to support custom 'ref' + 'source' resolution logic for non-JinjaSQL languages?
# (i.e. user-defined 'ref' + 'source' macros)
# this approach will not work for that
refs: List[List[str]] = node.refs
sources: List[List[str]] = node.sources
for ref in refs:
resolved_ref: RelationProxy = context["ref"](*ref)
resolved_references["refs"].update({tuple(ref): resolved_ref})
for source in sources:
resolved_src: RelationProxy = context["source"](*source)
resolved_references["sources"].update({tuple(source): resolved_src})
return resolved_references
@classmethod
@abc.abstractmethod
def get_compiled_code(self, node: ManifestNode, context: Dict[str, Any]) -> str:
"""
For a given ManifestNode, return its compiled code.
"""
raise NotImplementedError("get_compiled_code")

View File

@@ -0,0 +1,170 @@
"""
This will be in the `dbt-prql` package, but including here during inital code review, so
we can test it without coordinating dependencies.
"""
from __future__ import annotations
import logging
import re
from dbt.parser.languages.provider import LanguageProvider, dbt_function_calls, references_type
# import prql_python
# This mocks the prqlc output for two cases which we currently use in tests, so we can
# test this without configuring dependencies. (Obv fix as we expand the tests, way
# before we merge.)
class prql_python: # type: ignore
@staticmethod
def to_sql(prql) -> str:
query_1 = "from employees"
query_1_compiled = """
SELECT
employees.*
FROM
employees
""".strip()
query_2 = """
from (dbt source.whatever.some_tbl)
join (dbt ref.test.foo) [id]
filter salary > 100
""".strip()
# hard coded for Jerco's Postgres database
query_2_resolved = """
from ("jerco"."salesforce"."in_process")
join ("jerco"."dbt_jcohen"."foo") [id]
filter salary > 100
""".strip()
query_2_compiled = """
SELECT
"jerco"."whatever"."some_tbl".*,
"jerco"."dbt_jcohen"."foo".*,
id
FROM
"jerco"."salesforce"."in_process"
JOIN "jerco"."dbt_jcohen"."foo" USING(id)
WHERE
salary > 100
""".strip()
lookup = dict(
{
query_1: query_1_compiled,
query_2: query_2_compiled,
query_2_resolved: query_2_compiled,
}
)
return lookup[prql]
logger = logging.getLogger(__name__)
word_regex = r"[\w\.\-_]+"
# TODO support single-argument form of 'ref'
references_regex = rf"\bdbt `?(\w+)\.({word_regex})\.({word_regex})`?"
def hack_compile(prql: str, references: references_type) -> str:
"""
>>> print(compile(
... "from (dbt source.salesforce.in_process) | join (dbt ref.foo.bar) [id]",
... references=dict(
... sources={('salesforce', 'in_process'): 'salesforce_schema.in_process_tbl'},
... refs={('foo', 'bar'): 'foo_schema.bar_tbl'}
... )
... ))
SELECT
"{{ source('salesforce', 'in_process') }}".*,
"{{ ref('foo', 'bar') }}".*,
id
FROM
{{ source('salesforce', 'in_process') }}
JOIN {{ ref('foo', 'bar') }} USING(id)
"""
subs = []
for k, v in references["sources"].items():
key = ".".join(k)
lookup = f"dbt source.{key}"
subs.append((lookup, str(v)))
for k, v in references["refs"].items():
key = ".".join(k)
lookup = f"dbt ref.{key}"
subs.append((lookup, str(v)))
for lookup, resolved in subs:
prql = prql.replace(lookup, resolved)
sql = prql_python.to_sql(prql)
return sql
def hack_list_references(prql):
"""
List all references (e.g. sources / refs) in a given block.
We need to decide:
— What should prqlc return given `dbt source.foo.bar`, so dbt-prql can find the
references?
 Should it just fill in something that looks like jinja for expediancy? (We
don't support jinja though)
>>> references = list_references("from (dbt source.salesforce.in_process) | join (dbt ref.foo.bar)")
>>> dict(references)
{'source': [('salesforce', 'in_process')], 'ref': [('foo', 'bar')]}
"""
out = []
for t, package, model in _hack_references_of_prql_query(prql):
out.append((t, [package, model], {}))
return out
def _hack_references_of_prql_query(prql) -> list[tuple[str, str, str]]:
"""
List the references in a prql query.
This would be implemented by prqlc.
>>> _hack_references_of_prql_query("from (dbt source.salesforce.in_process) | join (dbt ref.foo.bar)")
[('source', 'salesforce', 'in_process'), ('ref', 'foo', 'bar')]
"""
return re.findall(references_regex, prql)
class PrqlProvider(LanguageProvider):
def __init__(self) -> None:
# TODO: Uncomment when dbt-prql is released
# if not dbt_prql:
# raise ImportError(
# "dbt_prql is required and not found; try running `pip install dbt_prql`"
# )
pass
@classmethod
def name(self) -> str:
return "prql"
@classmethod
def compiled_language(self) -> str:
return "sql"
@classmethod
def extract_dbt_function_calls(self, node) -> dbt_function_calls:
return hack_list_references(node.raw_code)
@classmethod
def needs_compile_time_connection(self) -> bool:
return False
@classmethod
def get_compiled_code(self, node, context) -> str:
resolved_references = self.get_resolved_references(node, context)
return hack_compile(node.raw_code, references=resolved_references)

View File

@@ -0,0 +1,195 @@
import ast
from dbt.parser.languages.provider import LanguageProvider, dbt_function_calls
from dbt.exceptions import UndefinedMacroException, ParsingException, validator_error_message
from dbt.contracts.graph.compiled import ManifestNode
from typing import Dict, Any
dbt_function_key_words = set(["ref", "source", "config", "get"])
dbt_function_full_names = set(["dbt.ref", "dbt.source", "dbt.config", "dbt.config.get"])
class PythonValidationVisitor(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.dbt_errors = []
self.num_model_def = 0
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
if node.name == "model":
self.num_model_def += 1
if node.args.args and not node.args.args[0].arg == "dbt":
self.dbt_errors.append("'dbt' not provided for model as the first argument")
if len(node.args.args) != 2:
self.dbt_errors.append(
"model function should have two args, `dbt` and a session to current warehouse"
)
# check we have a return and only one
if not isinstance(node.body[-1], ast.Return) or isinstance(
node.body[-1].value, ast.Tuple
):
self.dbt_errors.append(
"In current version, model function should return only one dataframe object"
)
def check_error(self, node):
if self.num_model_def != 1:
raise ParsingException("dbt only allow one model defined per python file", node=node)
if len(self.dbt_errors) != 0:
raise ParsingException("\n".join(self.dbt_errors), node=node)
class PythonParseVisitor(ast.NodeVisitor):
def __init__(self, dbt_node):
super().__init__()
self.dbt_node = dbt_node
self.dbt_function_calls = []
self.packages = []
@classmethod
def _flatten_attr(cls, node):
if isinstance(node, ast.Attribute):
return str(cls._flatten_attr(node.value)) + "." + node.attr
elif isinstance(node, ast.Name):
return str(node.id)
else:
pass
def _safe_eval(self, node):
try:
return ast.literal_eval(node)
except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc:
msg = validator_error_message(
f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n"
"https://docs.python.org/3/library/ast.html#ast.literal_eval\n"
"In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures"
)
raise ParsingException(msg, node=self.dbt_node) from exc
def _get_call_literals(self, node):
# List of literals
arg_literals = []
kwarg_literals = {}
# TODO : Make sure this throws (and that we catch it)
# for non-literal inputs
for arg in node.args:
rendered = self._safe_eval(arg)
arg_literals.append(rendered)
for keyword in node.keywords:
key = keyword.arg
rendered = self._safe_eval(keyword.value)
kwarg_literals[key] = rendered
return arg_literals, kwarg_literals
def visit_Call(self, node: ast.Call) -> None:
# check weather the current call could be a dbt function call
if isinstance(node.func, ast.Attribute) and node.func.attr in dbt_function_key_words:
func_name = self._flatten_attr(node.func)
# check weather the current call really is a dbt function call
if func_name in dbt_function_full_names:
# drop the dot-dbt prefix
func_name = func_name.split(".")[-1]
args, kwargs = self._get_call_literals(node)
self.dbt_function_calls.append((func_name, args, kwargs))
# no matter what happened above, we should keep visiting the rest of the tree
# visit args and kwargs to see if there's call in it
for obj in node.args + [kwarg.value for kwarg in node.keywords]:
if isinstance(obj, ast.Call):
self.visit_Call(obj)
# support dbt.ref in list args, kwargs
elif isinstance(obj, ast.List) or isinstance(obj, ast.Tuple):
for el in obj.elts:
if isinstance(el, ast.Call):
self.visit_Call(el)
# support dbt.ref in dict args, kwargs
elif isinstance(obj, ast.Dict):
for value in obj.values:
if isinstance(value, ast.Call):
self.visit_Call(value)
# visit node.func.value if we are at an call attr
if isinstance(node.func, ast.Attribute):
self.attribute_helper(node.func)
def attribute_helper(self, node: ast.Attribute) -> None:
while isinstance(node, ast.Attribute):
node = node.value # type: ignore
if isinstance(node, ast.Call):
self.visit_Call(node)
def visit_Import(self, node: ast.Import) -> None:
for n in node.names:
self.packages.append(n.name.split(".")[0])
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module:
self.packages.append(node.module.split(".")[0])
class PythonProvider(LanguageProvider):
@classmethod
def name(self) -> str:
return "python"
@classmethod
def file_ext(self) -> str:
return ".py"
@classmethod
def extract_dbt_function_calls(self, node) -> dbt_function_calls:
"""
List all references (refs, sources, configs) in a given block.
"""
try:
tree = ast.parse(node.raw_code, filename=node.original_file_path)
except SyntaxError as exc:
msg = validator_error_message(exc)
raise ParsingException(f"{msg}\n{exc.text}", node=node) from exc
# We are doing a validator and a parser because visit_FunctionDef in parser
# would actually make the parser not doing the visit_Calls any more
dbtValidator = PythonValidationVisitor()
dbtValidator.visit(tree)
dbtValidator.check_error(node)
dbtParser = PythonParseVisitor(node)
dbtParser.visit(tree)
return dbtParser.dbt_function_calls
@classmethod
def validate_raw_code(self, node) -> None:
from dbt.clients.jinja import get_rendered
# TODO: add a test for this
try:
rendered_python = get_rendered(
node.raw_code,
{},
node,
)
if rendered_python != node.raw_code:
raise ParsingException("")
except (UndefinedMacroException, ParsingException):
raise ParsingException("No jinja in python model code is allowed", node=node)
@classmethod
def get_compiled_code(self, node: ManifestNode, context: Dict[str, Any]) -> str:
# needed for compilation - bad!!
from dbt.clients import jinja
postfix = jinja.get_rendered(
"{{ py_script_postfix(model) }}",
context,
node,
)
# we should NOT jinja render the python model's 'raw code'
return f"{node.raw_code}\n\n{postfix}"
@classmethod
def needs_compile_time_connection(self) -> bool:
return False

View File

@@ -68,7 +68,6 @@ from dbt.contracts.graph.parsed import (
ColumnInfo, ColumnInfo,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
) )
from dbt.contracts.util import Writable from dbt.contracts.util import Writable
from dbt.exceptions import ( from dbt.exceptions import (
@@ -345,7 +344,7 @@ class ManifestLoader:
project, project_parser_files[project.project_name], parser_types project, project_parser_files[project.project_name], parser_types
) )
# Now that we've loaded most of the nodes (except for schema tests, sources, metrics, entities) # Now that we've loaded most of the nodes (except for schema tests, sources, metrics)
# load up the Lookup objects to resolve them by name, so the SourceFiles store # load up the Lookup objects to resolve them by name, so the SourceFiles store
# the unique_id instead of the name. Sources are loaded from yaml files, so # the unique_id instead of the name. Sources are loaded from yaml files, so
# aren't in place yet # aren't in place yet
@@ -389,7 +388,6 @@ class ManifestLoader:
self.process_refs(self.root_project.project_name) self.process_refs(self.root_project.project_name)
self.process_docs(self.root_project) self.process_docs(self.root_project)
self.process_metrics(self.root_project) self.process_metrics(self.root_project)
self.process_entities(self.root_project)
# update tracking data # update tracking data
self._perf_info.process_manifest_elapsed = time.perf_counter() - start_process self._perf_info.process_manifest_elapsed = time.perf_counter() - start_process
@@ -548,7 +546,6 @@ class ManifestLoader:
ManifestWrongMetadataVersion(version=self.manifest.metadata.dbt_version) ManifestWrongMetadataVersion(version=self.manifest.metadata.dbt_version)
) )
self.manifest.metadata.dbt_version = __version__ self.manifest.metadata.dbt_version = __version__
breakpoint()
manifest_msgpack = self.manifest.to_msgpack() manifest_msgpack = self.manifest.to_msgpack()
make_directory(os.path.dirname(path)) make_directory(os.path.dirname(path))
with open(path, "wb") as fp: with open(path, "wb") as fp:
@@ -839,10 +836,6 @@ class ManifestLoader:
if metric.created_at < self.started_at: if metric.created_at < self.started_at:
continue continue
_process_refs_for_metric(self.manifest, current_project, metric) _process_refs_for_metric(self.manifest, current_project, metric)
for entity in self.manifest.entities.values():
if entity.created_at < self.started_at:
continue
_process_refs_for_entity(self.manifest, current_project, entity)
# Takes references in 'metrics' array of nodes and exposures, finds the target # Takes references in 'metrics' array of nodes and exposures, finds the target
# node, and updates 'depends_on.nodes' with the unique id # node, and updates 'depends_on.nodes' with the unique id
@@ -858,27 +851,6 @@ class ManifestLoader:
if metric.created_at < self.started_at: if metric.created_at < self.started_at:
continue continue
_process_metrics_for_node(self.manifest, current_project, metric) _process_metrics_for_node(self.manifest, current_project, metric)
for exposure in self.manifest.exposures.values():
if exposure.created_at < self.started_at:
continue
_process_metrics_for_node(self.manifest, current_project, exposure)
# Takes references in 'entities' array of nodes and exposures, finds the target
# node, and updates 'depends_on.nodes' with the unique id
def process_entities(self, config: RuntimeConfig):
current_project = config.project_name
for node in self.manifest.nodes.values():
if node.created_at < self.started_at:
continue
_process_entities_for_node(self.manifest, current_project, node)
for entity in self.manifest.entities.values():
if entity.created_at < self.started_at:
continue
_process_entities_for_node(self.manifest, current_project, entity)
for exposure in self.manifest.exposures.values():
if exposure.created_at < self.started_at:
continue
_process_entities_for_node(self.manifest, current_project, exposure)
# nodes: node and column descriptions # nodes: node and column descriptions
# sources: source and table descriptions, column descriptions # sources: source and table descriptions, column descriptions
@@ -935,16 +907,6 @@ class ManifestLoader:
config.project_name, config.project_name,
) )
_process_docs_for_metrics(ctx, metric) _process_docs_for_metrics(ctx, metric)
for entity in self.manifest.entities.values():
if entity.created_at < self.started_at:
continue
ctx = generate_runtime_docs_context(
config,
entity,
self.manifest,
config.project_name,
)
_process_docs_for_entities(ctx, entity)
# Loops through all nodes and exposures, for each element in # Loops through all nodes and exposures, for each element in
# 'sources' array finds the source node and updates the # 'sources' array finds the source node and updates the
@@ -1131,8 +1093,6 @@ def _process_docs_for_exposure(context: Dict[str, Any], exposure: ParsedExposure
def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None:
metric.description = get_rendered(metric.description, context) metric.description = get_rendered(metric.description, context)
def _process_docs_for_entities(context: Dict[str, Any], entity: ParsedEntity) -> None:
entity.description = get_rendered(entity.description, context)
def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure): def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure):
"""Given a manifest and exposure in that manifest, process its refs""" """Given a manifest and exposure in that manifest, process its refs"""
@@ -1218,51 +1178,9 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P
metric.depends_on.nodes.append(target_model_id) metric.depends_on.nodes.append(target_model_id)
manifest.update_metric(metric) manifest.update_metric(metric)
def _process_refs_for_entity(manifest: Manifest, current_project: str, entity: ParsedEntity):
"""Given a manifest and an entity in that manifest, process its refs"""
for ref in entity.refs:
target_model: Optional[Union[Disabled, ManifestNode]] = None
target_model_name: str
target_model_package: Optional[str] = None
if len(ref) == 1:
target_model_name = ref[0]
elif len(ref) == 2:
target_model_package, target_model_name = ref
else:
raise dbt.exceptions.InternalException(
f"Refs should always be 1 or 2 arguments - got {len(ref)}"
)
target_model = manifest.resolve_ref(
target_model_name,
target_model_package,
current_project,
entity.package_name,
)
if target_model is None or isinstance(target_model, Disabled):
# This may raise. Even if it doesn't, we don't want to add
# this entity to the graph b/c there is no destination entity
entity.config.enabled = False
invalid_target_fail_unless_test(
node=entity,
target_name=target_model_name,
target_kind="node",
target_package=target_model_package,
disabled=(isinstance(target_model, Disabled)),
)
continue
target_model_id = target_model.unique_id
entity.depends_on.nodes.append(target_model_id)
manifest.update_entity(entity)
def _process_metrics_for_node( def _process_metrics_for_node(
manifest: Manifest, manifest: Manifest, current_project: str, node: Union[ManifestNode, ParsedMetric]
current_project: str,
node: Union[ManifestNode, ParsedMetric, ParsedExposure],
): ):
"""Given a manifest and a node in that manifest, process its metrics""" """Given a manifest and a node in that manifest, process its metrics"""
for metric in node.metrics: for metric in node.metrics:
@@ -1303,49 +1221,6 @@ def _process_metrics_for_node(
node.depends_on.nodes.append(target_metric_id) node.depends_on.nodes.append(target_metric_id)
def _process_entities_for_node(
manifest: Manifest,
current_project: str,
node: Union[ManifestNode, ParsedEntity, ParsedExposure],
):
"""Given a manifest and a node in that manifest, process its entities"""
for entity in node.entities:
target_entity: Optional[Union[Disabled, ParsedEntity]] = None
target_entity_name: str
target_entity_package: Optional[str] = None
if len(entity) == 1:
target_entity_name = entity[0]
elif len(entity) == 2:
target_entity_package, target_entity_name = entity
else:
raise dbt.exceptions.InternalException(
f"Entity references should always be 1 or 2 arguments - got {len(entity)}"
)
target_entity = manifest.resolve_entity(
target_entity_name,
target_entity_package,
current_project,
node.package_name,
)
if target_entity is None or isinstance(target_entity, Disabled):
# This may raise. Even if it doesn't, we don't want to add
# this node to the graph b/c there is no destination node
node.config.enabled = False
invalid_target_fail_unless_test(
node=node,
target_name=target_entity_name,
target_kind="source",
target_package=target_entity_package,
disabled=(isinstance(target_entity, Disabled)),
)
continue
target_entity_id = target_entity.unique_id
node.depends_on.nodes.append(target_entity_id)
def _process_refs_for_node(manifest: Manifest, current_project: str, node: ManifestNode): def _process_refs_for_node(manifest: Manifest, current_project: str, node: ManifestNode):
"""Given a manifest and a node in that manifest, process its refs""" """Given a manifest and a node in that manifest, process its refs"""
@@ -1417,7 +1292,7 @@ def _process_sources_for_exposure(
exposure.depends_on.nodes.append(target_source_id) exposure.depends_on.nodes.append(target_source_id)
manifest.update_exposure(exposure) manifest.update_exposure(exposure)
## TODO: Remove this code because metrics can't be based on sources
def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric):
target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None
for source_name, table_name in metric.sources: for source_name, table_name in metric.sources:

View File

@@ -17,7 +17,6 @@ from dbt.events.types import (
from dbt.node_types import NodeType, ModelLanguage from dbt.node_types import NodeType, ModelLanguage
from dbt.parser.base import SimpleSQLParser from dbt.parser.base import SimpleSQLParser
from dbt.parser.search import FileBlock from dbt.parser.search import FileBlock
from dbt.clients.jinja import get_rendered
import dbt.tracking as tracking import dbt.tracking as tracking
from dbt import utils from dbt import utils
from dbt_extractor import ExtractionError, py_extract_from_source # type: ignore from dbt_extractor import ExtractionError, py_extract_from_source # type: ignore
@@ -26,160 +25,6 @@ from itertools import chain
import random import random
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
# New for Python models :p
import ast
from dbt.dataclass_schema import ValidationError
from dbt.exceptions import ParsingException, validator_error_message, UndefinedMacroException
dbt_function_key_words = set(["ref", "source", "config", "get"])
dbt_function_full_names = set(["dbt.ref", "dbt.source", "dbt.config", "dbt.config.get"])
class PythonValidationVisitor(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.dbt_errors = []
self.num_model_def = 0
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
if node.name == "model":
self.num_model_def += 1
if node.args.args and not node.args.args[0].arg == "dbt":
self.dbt_errors.append("'dbt' not provided for model as the first argument")
if len(node.args.args) != 2:
self.dbt_errors.append(
"model function should have two args, `dbt` and a session to current warehouse"
)
# check we have a return and only one
if not isinstance(node.body[-1], ast.Return) or isinstance(
node.body[-1].value, ast.Tuple
):
self.dbt_errors.append(
"In current version, model function should return only one dataframe object"
)
def check_error(self, node):
if self.num_model_def != 1:
raise ParsingException(
f"dbt allows exactly one model defined per python file, found {self.num_model_def}",
node=node,
)
if len(self.dbt_errors) != 0:
raise ParsingException("\n".join(self.dbt_errors), node=node)
class PythonParseVisitor(ast.NodeVisitor):
def __init__(self, dbt_node):
super().__init__()
self.dbt_node = dbt_node
self.dbt_function_calls = []
self.packages = []
@classmethod
def _flatten_attr(cls, node):
if isinstance(node, ast.Attribute):
return str(cls._flatten_attr(node.value)) + "." + node.attr
elif isinstance(node, ast.Name):
return str(node.id)
else:
pass
def _safe_eval(self, node):
try:
return ast.literal_eval(node)
except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc:
msg = validator_error_message(
f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n"
"https://docs.python.org/3/library/ast.html#ast.literal_eval\n"
"In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures"
)
raise ParsingException(msg, node=self.dbt_node) from exc
def _get_call_literals(self, node):
# List of literals
arg_literals = []
kwarg_literals = {}
# TODO : Make sure this throws (and that we catch it)
# for non-literal inputs
for arg in node.args:
rendered = self._safe_eval(arg)
arg_literals.append(rendered)
for keyword in node.keywords:
key = keyword.arg
rendered = self._safe_eval(keyword.value)
kwarg_literals[key] = rendered
return arg_literals, kwarg_literals
def visit_Call(self, node: ast.Call) -> None:
# check weather the current call could be a dbt function call
if isinstance(node.func, ast.Attribute) and node.func.attr in dbt_function_key_words:
func_name = self._flatten_attr(node.func)
# check weather the current call really is a dbt function call
if func_name in dbt_function_full_names:
# drop the dot-dbt prefix
func_name = func_name.split(".")[-1]
args, kwargs = self._get_call_literals(node)
self.dbt_function_calls.append((func_name, args, kwargs))
# no matter what happened above, we should keep visiting the rest of the tree
# visit args and kwargs to see if there's call in it
for obj in node.args + [kwarg.value for kwarg in node.keywords]:
if isinstance(obj, ast.Call):
self.visit_Call(obj)
# support dbt.ref in list args, kwargs
elif isinstance(obj, ast.List) or isinstance(obj, ast.Tuple):
for el in obj.elts:
if isinstance(el, ast.Call):
self.visit_Call(el)
# support dbt.ref in dict args, kwargs
elif isinstance(obj, ast.Dict):
for value in obj.values:
if isinstance(value, ast.Call):
self.visit_Call(value)
# visit node.func.value if we are at an call attr
if isinstance(node.func, ast.Attribute):
self.attribute_helper(node.func)
def attribute_helper(self, node: ast.Attribute) -> None:
while isinstance(node, ast.Attribute):
node = node.value # type: ignore
if isinstance(node, ast.Call):
self.visit_Call(node)
def visit_Import(self, node: ast.Import) -> None:
for n in node.names:
self.packages.append(n.name.split(".")[0])
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module:
self.packages.append(node.module.split(".")[0])
def merge_packages(original_packages_with_version, new_packages):
original_packages = [package.split("==")[0] for package in original_packages_with_version]
additional_packages = [package for package in new_packages if package not in original_packages]
return original_packages_with_version + list(set(additional_packages))
def verify_python_model_code(node):
# TODO: add a test for this
try:
rendered_python = get_rendered(
node.raw_code,
{},
node,
)
if rendered_python != node.raw_code:
raise ParsingException("")
except (UndefinedMacroException, ParsingException):
raise ParsingException("No jinja in python model code is allowed", node=node)
class ModelParser(SimpleSQLParser[ParsedModelNode]): class ModelParser(SimpleSQLParser[ParsedModelNode]):
def parse_from_dict(self, dct, validate=True) -> ParsedModelNode: def parse_from_dict(self, dct, validate=True) -> ParsedModelNode:
@@ -195,49 +40,16 @@ class ModelParser(SimpleSQLParser[ParsedModelNode]):
def get_compiled_path(cls, block: FileBlock): def get_compiled_path(cls, block: FileBlock):
return block.path.relative_path return block.path.relative_path
def parse_python_model(self, node, config, context):
try:
tree = ast.parse(node.raw_code, filename=node.original_file_path)
except SyntaxError as exc:
msg = validator_error_message(exc)
raise ParsingException(f"{msg}\n{exc.text}", node=node) from exc
# We are doing a validator and a parser because visit_FunctionDef in parser
# would actually make the parser not doing the visit_Calls any more
dbtValidator = PythonValidationVisitor()
dbtValidator.visit(tree)
dbtValidator.check_error(node)
dbtParser = PythonParseVisitor(node)
dbtParser.visit(tree)
config_keys_used = []
for (func, args, kwargs) in dbtParser.dbt_function_calls:
if func == "get":
config_keys_used.append(args[0])
continue
context[func](*args, **kwargs)
if config_keys_used:
# this is being used in macro build_config_dict
context["config"](config_keys_used=config_keys_used)
def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None:
# TODO
if node.language != ModelLanguage.sql:
super().render_update(node, config)
# TODO move all the logic below into JinjaSQL provider
self.manifest._parsing_info.static_analysis_path_count += 1 self.manifest._parsing_info.static_analysis_path_count += 1
if node.language == ModelLanguage.python: if not flags.STATIC_PARSER:
try:
verify_python_model_code(node)
context = self._context_for(node, config)
self.parse_python_model(node, config, context)
self.update_parsed_node_config(node, config, context=context)
except ValidationError as exc:
# we got a ValidationError - probably bad types in config()
msg = validator_error_message(exc)
raise ParsingException(msg, node=node) from exc
return
elif not flags.STATIC_PARSER:
# jinja rendering # jinja rendering
super().render_update(node, config) super().render_update(node, config)
fire_event(StaticParserCausedJinjaRendering(path=node.path)) fire_event(StaticParserCausedJinjaRendering(path=node.path))

View File

@@ -20,7 +20,6 @@ from dbt.events.types import (
PartialParsingDeletedSource, PartialParsingDeletedSource,
PartialParsingDeletedExposure, PartialParsingDeletedExposure,
PartialParsingDeletedMetric, PartialParsingDeletedMetric,
PartialParsingDeletedEntity,
) )
from dbt.constants import DEFAULT_ENV_PLACEHOLDER from dbt.constants import DEFAULT_ENV_PLACEHOLDER
from dbt.node_types import NodeType from dbt.node_types import NodeType
@@ -247,7 +246,7 @@ class PartialParsing:
self.remove_source_override_target(source) self.remove_source_override_target(source)
def delete_disabled(self, unique_id, file_id): def delete_disabled(self, unique_id, file_id):
# This node/metric/entity/exposure is disabled. Find it and remove it from disabled dictionary. # This node/metric/exposure is disabled. Find it and remove it from disabled dictionary.
for dis_index, dis_node in enumerate(self.saved_manifest.disabled[unique_id]): for dis_index, dis_node in enumerate(self.saved_manifest.disabled[unique_id]):
if dis_node.file_id == file_id: if dis_node.file_id == file_id:
node = dis_node node = dis_node
@@ -453,18 +452,6 @@ class PartialParsing:
if metric_element: if metric_element:
self.delete_schema_metric(schema_file, metric_element) self.delete_schema_metric(schema_file, metric_element)
self.merge_patch(schema_file, "metrics", metric_element) self.merge_patch(schema_file, "metrics", metric_element)
elif unique_id in self.saved_manifest.entities:
entity = self.saved_manifest.entities[unique_id]
file_id = entity.file_id
if file_id in self.saved_files and file_id not in self.file_diff["deleted"]:
schema_file = self.saved_files[file_id]
entities = []
if "entities" in schema_file.dict_from_yaml:
entities = schema_file.dict_from_yaml["entities"]
entity_element = self.get_schema_element(entities, entity.name)
if entity_element:
self.delete_schema_entity(schema_file, entity_element)
self.merge_patch(schema_file, "entities", entity_element)
elif unique_id in self.saved_manifest.macros: elif unique_id in self.saved_manifest.macros:
macro = self.saved_manifest.macros[unique_id] macro = self.saved_manifest.macros[unique_id]
file_id = macro.file_id file_id = macro.file_id
@@ -770,29 +757,6 @@ class PartialParsing:
self.delete_schema_metric(schema_file, elem) self.delete_schema_metric(schema_file, elem)
self.merge_patch(schema_file, dict_key, elem) self.merge_patch(schema_file, dict_key, elem)
# entities
dict_key = "entities"
entity_diff = self.get_diff_for("entities", saved_yaml_dict, new_yaml_dict)
if entity_diff["changed"]:
for entity in entity_diff["changed"]:
self.delete_schema_entity(schema_file, entity)
self.merge_patch(schema_file, dict_key, entity)
if entity_diff["deleted"]:
for entity in entity_diff["deleted"]:
self.delete_schema_entity(schema_file, entity)
if entity_diff["added"]:
for entity in entity_diff["added"]:
self.merge_patch(schema_file, dict_key, entity)
# Handle schema file updates due to env_var changes
if dict_key in env_var_changes and dict_key in new_yaml_dict:
for name in env_var_changes[dict_key]:
if name in entity_diff["changed_or_deleted_names"]:
continue
elem = self.get_schema_element(new_yaml_dict[dict_key], name)
if elem:
self.delete_schema_entity(schema_file, elem)
self.merge_patch(schema_file, dict_key, elem)
# Take a "section" of the schema file yaml dictionary from saved and new schema files # Take a "section" of the schema file yaml dictionary from saved and new schema files
# and determine which parts have changed # and determine which parts have changed
def get_diff_for(self, key, saved_yaml_dict, new_yaml_dict): def get_diff_for(self, key, saved_yaml_dict, new_yaml_dict):
@@ -971,25 +935,6 @@ class PartialParsing:
elif unique_id in self.saved_manifest.disabled: elif unique_id in self.saved_manifest.disabled:
self.delete_disabled(unique_id, schema_file.file_id) self.delete_disabled(unique_id, schema_file.file_id)
# entities are created only from schema files, but also can be referred to by other nodes
def delete_schema_entity(self, schema_file, entity_dict):
entity_name = entity_dict["name"]
entities = schema_file.entities.copy()
for unique_id in entities:
if unique_id in self.saved_manifest.entities:
entity = self.saved_manifest.entities[unique_id]
if entity.name == entity_name:
# Need to find everything that referenced this entity and schedule for parsing
if unique_id in self.saved_manifest.child_map:
self.schedule_nodes_for_parsing(self.saved_manifest.child_map[unique_id])
self.deleted_manifest.entities[unique_id] = self.saved_manifest.entities.pop(
unique_id
)
schema_file.entities.remove(unique_id)
fire_event(PartialParsingDeletedEntity(unique_id=unique_id))
elif unique_id in self.saved_manifest.disabled:
self.delete_disabled(unique_id, schema_file.file_id)
def get_schema_element(self, elem_list, elem_name): def get_schema_element(self, elem_list, elem_name):
for element in elem_list: for element in elem_list:
if "name" in element and element["name"] == elem_name: if "name" in element and element["name"] == elem_name:

View File

@@ -171,11 +171,15 @@ def read_files(project, files, parser_files, saved_files):
dbt_ignore_spec, dbt_ignore_spec,
) )
from dbt.parser.languages import get_file_extensions
model_extensions = get_file_extensions()
project_files["ModelParser"] = read_files_for_parser( project_files["ModelParser"] = read_files_for_parser(
project, project,
files, files,
project.model_paths, project.model_paths,
[".sql", ".py"], model_extensions,
ParseFileType.Model, ParseFileType.Model,
saved_files, saved_files,
dbt_ignore_spec, dbt_ignore_spec,

View File

@@ -22,12 +22,11 @@ from dbt.context.configured import generate_schema_yml_context, SchemaYamlVars
from dbt.context.providers import ( from dbt.context.providers import (
generate_parse_exposure, generate_parse_exposure,
generate_parse_metrics, generate_parse_metrics,
generate_parse_entities,
generate_test_context, generate_test_context,
) )
from dbt.context.macro_resolver import MacroResolver from dbt.context.macro_resolver import MacroResolver
from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.files import FileHash, SchemaSourceFile
from dbt.contracts.graph.model_config import MetricConfig, ExposureConfig, EntityConfig from dbt.contracts.graph.model_config import MetricConfig, ExposureConfig
from dbt.contracts.graph.parsed import ( from dbt.contracts.graph.parsed import (
ParsedNodePatch, ParsedNodePatch,
ColumnInfo, ColumnInfo,
@@ -36,7 +35,6 @@ from dbt.contracts.graph.parsed import (
UnpatchedSourceDefinition, UnpatchedSourceDefinition,
ParsedExposure, ParsedExposure,
ParsedMetric, ParsedMetric,
ParsedEntity,
) )
from dbt.contracts.graph.unparsed import ( from dbt.contracts.graph.unparsed import (
HasColumnDocs, HasColumnDocs,
@@ -49,8 +47,6 @@ from dbt.contracts.graph.unparsed import (
UnparsedNodeUpdate, UnparsedNodeUpdate,
UnparsedExposure, UnparsedExposure,
UnparsedMetric, UnparsedMetric,
UnparsedEntity,
EntityDimension,
UnparsedSourceDefinition, UnparsedSourceDefinition,
) )
from dbt.exceptions import ( from dbt.exceptions import (
@@ -92,7 +88,6 @@ schema_file_keys = (
"analyses", "analyses",
"exposures", "exposures",
"metrics", "metrics",
"entities",
) )
@@ -132,7 +127,6 @@ class ParserRef:
def __init__(self): def __init__(self):
self.column_info: Dict[str, ColumnInfo] = {} self.column_info: Dict[str, ColumnInfo] = {}
## TODO: Mimic this for dimension information at the entity level
def add( def add(
self, self,
column: Union[HasDocs, UnparsedColumn], column: Union[HasDocs, UnparsedColumn],
@@ -168,7 +162,6 @@ class ParserRef:
return refs return refs
def _trimmed(inp: str) -> str: def _trimmed(inp: str) -> str:
if len(inp) < 50: if len(inp) < 50:
return inp return inp
@@ -277,6 +270,7 @@ class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]):
path=path, path=path,
original_file_path=target.original_file_path, original_file_path=target.original_file_path,
raw_code=raw_code, raw_code=raw_code,
language="sql",
) )
raise ParsingException(msg, node=node) from exc raise ParsingException(msg, node=node) from exc
@@ -507,8 +501,8 @@ class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]):
parser: YamlDocsReader parser: YamlDocsReader
# There are 9 kinds of parsers: # There are 7 kinds of parsers:
# Model, Seed, Snapshot, Source, Macro, Analysis, Exposures, Metrics, & Entities # Model, Seed, Snapshot, Source, Macro, Analysis, Exposures
# NonSourceParser.parse(), TestablePatchParser is a variety of # NonSourceParser.parse(), TestablePatchParser is a variety of
# NodePatchParser # NodePatchParser
@@ -558,10 +552,6 @@ class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]):
metric_parser = MetricParser(self, yaml_block) metric_parser = MetricParser(self, yaml_block)
metric_parser.parse() metric_parser.parse()
# parse entities
if "entities" in dct:
entity_parser = EntityParser(self, yaml_block)
entity_parser.parse()
def check_format_version(file_path, yaml_dct) -> None: def check_format_version(file_path, yaml_dct) -> None:
if "version" not in yaml_dct: if "version" not in yaml_dct:
@@ -1064,7 +1054,7 @@ class ExposureParser(YamlReader):
) )
depends_on_jinja = "\n".join("{{ " + line + "}}" for line in unparsed.depends_on) depends_on_jinja = "\n".join("{{ " + line + "}}" for line in unparsed.depends_on)
get_rendered(depends_on_jinja, ctx, parsed, capture_macros=True) get_rendered(depends_on_jinja, ctx, parsed, capture_macros=True)
# parsed now has a populated refs/sources/metrics # parsed now has a populated refs/sources
if parsed.config.enabled: if parsed.config.enabled:
self.manifest.add_exposure(self.yaml.file, parsed) self.manifest.add_exposure(self.yaml.file, parsed)
@@ -1220,120 +1210,3 @@ class MetricParser(YamlReader):
msg = error_context(self.yaml.path, self.key, data, exc) msg = error_context(self.yaml.path, self.key, data, exc)
raise ParsingException(msg) from exc raise ParsingException(msg) from exc
self.parse_metric(unparsed) self.parse_metric(unparsed)
class EntityParser(YamlReader):
def __init__(self, schema_parser: SchemaParser, yaml: YamlBlock):
super().__init__(schema_parser, yaml, NodeType.Entity.pluralize())
self.schema_parser = schema_parser
self.yaml = yaml
def clean_dimensions(self, unparsed: UnparsedEntity):
"""Mimicing the format of UnparsedColumn"""
print("filler")
def parse_entity(self, unparsed: UnparsedEntity):
package_name = self.project.project_name
unique_id = f"{NodeType.Entity}.{package_name}.{unparsed.name}"
path = self.yaml.path.relative_path
fqn = self.schema_parser.get_fqn_prefix(path)
fqn.append(unparsed.name)
config = self._generate_entity_config(
target=unparsed,
fqn=fqn,
package_name=package_name,
rendered=True,
)
config = config.finalize_and_validate()
unrendered_config = self._generate_entity_config(
target=unparsed,
fqn=fqn,
package_name=package_name,
rendered=False,
)
if not isinstance(config, EntityConfig):
raise InternalException(
f"Calculated a {type(config)} for an entity, but expected a EntityConfig"
)
## TODO: Remove or migrate the dimension mapping to this area
# parsed_dimensions = {}
# for dimension in unparsed.dimensions:
# breakpoint()
# if dimension:
# # breakpoint()
# parsed_dimensions[dimension.name] = dimension
parsed = ParsedEntity(
package_name=package_name,
path=path,
original_file_path=self.yaml.path.original_file_path,
unique_id=unique_id,
fqn=fqn,
model=unparsed.model,
name=unparsed.name,
description=unparsed.description,
dimensions= {dimension.name: dimension for dimension in unparsed.dimensions} if unparsed.dimensions else {},
meta=unparsed.meta,
tags=unparsed.tags,
config=config,
unrendered_config=unrendered_config,
)
ctx = generate_parse_entities(
parsed,
self.root_project,
self.schema_parser.manifest,
package_name,
)
if parsed.model is not None:
model_ref = "{{ " + parsed.model + " }}"
get_rendered(model_ref, ctx, parsed)
# if the metric is disabled we do not want it included in the manifest, only in the disabled dict
if parsed.config.enabled:
# self.manifest.add_metric(self.yaml.file, parsed)
self.manifest.add_entity(self.yaml.file, parsed)
else:
self.manifest.add_disabled(self.yaml.file, parsed)
def _generate_entity_config(
self, target: UnparsedEntity, fqn: List[str], package_name: str, rendered: bool
):
generator: BaseContextConfigGenerator
if rendered:
generator = ContextConfigGenerator(self.root_project)
else:
generator = UnrenderedConfigGenerator(self.root_project)
# configs with precendence set
precedence_configs = dict()
# first apply metric configs
precedence_configs.update(target.config)
return generator.calculate_node_config(
config_call_dict={},
fqn=fqn,
resource_type=NodeType.Entity,
project_name=package_name,
base=False,
patch_config_dict=precedence_configs,
)
def parse(self):
for data in self.get_key_dicts():
breakpoint()
try:
UnparsedEntity.validate(data)
unparsed = UnparsedEntity.from_dict(data)
except (ValidationError, JSONValidationException) as exc:
msg = error_context(self.yaml.path, self.key, data, exc)
raise ParsingException(msg) from exc
self.parse_entity(unparsed)

View File

@@ -309,9 +309,16 @@ class BaseRunner(metaclass=ABCMeta):
failures=None, failures=None,
) )
# some modeling languages don't need database connections for compilation,
# only for runtime (materialization)
def needs_connection(self):
return True
def compile_and_execute(self, manifest, ctx): def compile_and_execute(self, manifest, ctx):
from contextlib import nullcontext
result = None result = None
with self.adapter.connection_for(self.node): with self.adapter.connection_for(self.node) if self.needs_connection() else nullcontext():
ctx.node._event_status["node_status"] = RunningStatus.Compiling ctx.node._event_status["node_status"] = RunningStatus.Compiling
fire_event( fire_event(
NodeCompiling( NodeCompiling(

View File

@@ -20,6 +20,12 @@ class CompileRunner(BaseRunner):
def after_execute(self, result): def after_execute(self, result):
pass pass
def needs_connection(self):
from dbt.parser.languages import get_language_provider_by_name
provider = get_language_provider_by_name(self.node.language)
return provider.needs_compile_time_connection()
def execute(self, compiled_node, manifest): def execute(self, compiled_node, manifest):
return RunResult( return RunResult(
node=compiled_node, node=compiled_node,

View File

@@ -1,6 +1,6 @@
import json import json
from dbt.contracts.graph.parsed import ParsedExposure, ParsedSourceDefinition, ParsedMetric, ParsedEntity from dbt.contracts.graph.parsed import ParsedExposure, ParsedSourceDefinition, ParsedMetric
from dbt.graph import ResourceTypeSelector from dbt.graph import ResourceTypeSelector
from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.runnable import GraphRunnableTask, ManifestTask
from dbt.task.test import TestSelector from dbt.task.test import TestSelector
@@ -23,7 +23,6 @@ class ListTask(GraphRunnableTask):
NodeType.Source, NodeType.Source,
NodeType.Exposure, NodeType.Exposure,
NodeType.Metric, NodeType.Metric,
NodeType.Entity,
) )
) )
ALL_RESOURCE_VALUES = DEFAULT_RESOURCE_VALUES | frozenset((NodeType.Analysis,)) ALL_RESOURCE_VALUES = DEFAULT_RESOURCE_VALUES | frozenset((NodeType.Analysis,))
@@ -85,8 +84,6 @@ class ListTask(GraphRunnableTask):
yield self.manifest.exposures[node] yield self.manifest.exposures[node]
elif node in self.manifest.metrics: elif node in self.manifest.metrics:
yield self.manifest.metrics[node] yield self.manifest.metrics[node]
elif node in self.manifest.entities:
yield self.manifest.entities[node]
else: else:
raise RuntimeException( raise RuntimeException(
f'Got an unexpected result from node selection: "{node}"' f'Got an unexpected result from node selection: "{node}"'
@@ -110,11 +107,6 @@ class ListTask(GraphRunnableTask):
# metrics are searched for by pkg.metric_name # metrics are searched for by pkg.metric_name
metric_selector = ".".join([node.package_name, node.name]) metric_selector = ".".join([node.package_name, node.name])
yield f"metric:{metric_selector}" yield f"metric:{metric_selector}"
elif node.resource_type == NodeType.Entity:
assert isinstance(node, ParsedEntity)
# entities are searched for by pkg.entity_name
entity_selector = ".".join([node.package_name, node.name])
yield f"entity:{entity_selector}"
else: else:
# everything else is from `fqn` # everything else is from `fqn`
yield ".".join(node.fqn) yield ".".join(node.fqn)

View File

@@ -159,6 +159,9 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List
class ModelRunner(CompileRunner): class ModelRunner(CompileRunner):
def needs_connection(self):
return True
def get_node_representation(self): def get_node_representation(self):
display_quote_policy = {"database": False, "schema": False, "identifier": False} display_quote_policy = {"database": False, "schema": False, "identifier": False}
relation = self.adapter.Relation.create_from( relation = self.adapter.Relation.create_from(
@@ -262,12 +265,12 @@ class ModelRunner(CompileRunner):
context_config = context["config"] context_config = context["config"]
mat_has_supported_langs = hasattr(materialization_macro, "supported_languages") mat_has_supported_langs = hasattr(materialization_macro, "supported_languages")
model_lang_supported = model.language in materialization_macro.supported_languages model_lang_supported = model.compiled_language in materialization_macro.supported_languages
if mat_has_supported_langs and not model_lang_supported: if mat_has_supported_langs and not model_lang_supported:
str_langs = [str(lang) for lang in materialization_macro.supported_languages] str_langs = [str(lang) for lang in materialization_macro.supported_languages]
raise ValidationException( raise ValidationException(
f'Materialization "{materialization_macro.name}" only supports languages {str_langs}; ' f'Materialization "{materialization_macro.name}" only supports languages {str_langs}; '
f'got "{model.language}"' f'got "{model.language}" which compiles to "{model.compiled_language}"'
) )
hook_ctx = self.adapter.pre_model_hook(context_config) hook_ctx = self.adapter.pre_model_hook(context_config)

View File

@@ -54,7 +54,7 @@ setup(
"hologram>=0.0.14,<=0.0.15", "hologram>=0.0.14,<=0.0.15",
"isodate>=0.6,<0.7", "isodate>=0.6,<0.7",
"logbook>=1.5,<1.6", "logbook>=1.5,<1.6",
"mashumaro[msgpack]==3.1.1", "mashumaro[msgpack]==3.0.4",
"minimal-snowplow-tracker==0.0.2", "minimal-snowplow-tracker==0.0.2",
"networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<2.8.1;python_version<'3.8'",
"networkx>=2.3,<3;python_version>='3.8'", "networkx>=2.3,<3;python_version>='3.8'",

View File

@@ -244,7 +244,7 @@
"generated_at": { "generated_at": {
"type": "string", "type": "string",
"format": "date-time", "format": "date-time",
"default": "2022-11-30T05:36:16.443035Z" "default": "2022-11-01T18:01:47.759437Z"
}, },
"invocation_id": { "invocation_id": {
"oneOf": [ "oneOf": [
@@ -255,7 +255,7 @@
"type": "null" "type": "null"
} }
], ],
"default": "ff51bdcd-689d-45b3-8dbb-5a8016382eef" "default": "94cf6dd0-d59b-4139-bf79-70055cb9bb34"
}, },
"env": { "env": {
"type": "object", "type": "object",
@@ -519,7 +519,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.4447858 "default": 1667325707.764821
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -1066,7 +1066,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.445715 "default": 1667325707.767402
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -1425,7 +1425,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.4462662 "default": 1667325707.768972
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -1672,7 +1672,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.446837 "default": 1667325707.7706041
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -1929,7 +1929,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.447436 "default": 1667325707.772256
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -2176,7 +2176,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.448 "default": 1667325707.7739131
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -2419,7 +2419,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.448638 "default": 1667325707.7757251
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -2714,7 +2714,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.449645 "default": 1667325707.7787
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -3136,7 +3136,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.450196 "default": 1667325707.780513
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -3379,7 +3379,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.450721 "default": 1667325707.782298
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -3583,7 +3583,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.451207 "default": 1667325707.7835789
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -3795,7 +3795,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.451676 "default": 1667325707.784904
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -4017,7 +4017,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.452158 "default": 1667325707.78629
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -4229,7 +4229,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.452618 "default": 1667325707.78762
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -4441,7 +4441,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.453086 "default": 1667325707.789003
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -4649,7 +4649,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.4535701 "default": 1667325707.790516
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -4882,7 +4882,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.454068 "default": 1667325707.792015
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -5081,7 +5081,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.454986 "default": 1667325707.794882
}, },
"config_call_dict": { "config_call_dict": {
"type": "object", "type": "object",
@@ -5462,7 +5462,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.455929 "default": 1667325707.797194
} }
}, },
"additionalProperties": false, "additionalProperties": false,
@@ -5577,7 +5577,7 @@
"generated_at": { "generated_at": {
"type": "string", "type": "string",
"format": "date-time", "format": "date-time",
"default": "2022-11-30T05:36:16.440838Z" "default": "2022-11-01T18:01:47.754102Z"
}, },
"invocation_id": { "invocation_id": {
"oneOf": [ "oneOf": [
@@ -5588,7 +5588,7 @@
"type": "null" "type": "null"
} }
], ],
"default": "ff51bdcd-689d-45b3-8dbb-5a8016382eef" "default": "94cf6dd0-d59b-4139-bf79-70055cb9bb34"
}, },
"env": { "env": {
"type": "object", "type": "object",
@@ -5948,7 +5948,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.45632 "default": 1667325707.798143
}, },
"supported_languages": { "supported_languages": {
"oneOf": [ "oneOf": [
@@ -6199,23 +6199,13 @@
}, },
"default": [] "default": []
}, },
"metrics": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "string"
}
},
"default": []
},
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.456964 "default": 1667325707.799795
} }
}, },
"additionalProperties": false, "additionalProperties": false,
"description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = <NodeType.Exposure: 'exposure'>, description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = <factory>, tags: List[str] = <factory>, config: dbt.contracts.graph.model_config.ExposureConfig = <factory>, unrendered_config: Dict[str, Any] = <factory>, url: Optional[str] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = <factory>, refs: List[List[str]] = <factory>, sources: List[List[str]] = <factory>, metrics: List[List[str]] = <factory>, created_at: float = <factory>)" "description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = <NodeType.Exposure: 'exposure'>, description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = <factory>, tags: List[str] = <factory>, config: dbt.contracts.graph.model_config.ExposureConfig = <factory>, unrendered_config: Dict[str, Any] = <factory>, url: Optional[str] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = <factory>, refs: List[List[str]] = <factory>, sources: List[List[str]] = <factory>, created_at: float = <factory>)"
}, },
"ExposureOwner": { "ExposureOwner": {
"type": "object", "type": "object",
@@ -6434,7 +6424,7 @@
}, },
"created_at": { "created_at": {
"type": "number", "type": "number",
"default": 1669786576.4576042 "default": 1667325707.801514
} }
}, },
"additionalProperties": false, "additionalProperties": false,

View File

@@ -0,0 +1,9 @@
select * from {{ this.schema }}.seed
{{
config({
"unique_key": "col_A",
"materialized": "incremental"
})
}}

View File

@@ -0,0 +1,8 @@
select * from {{ this.schema }}.seed
{{
config({
"materialized": "table"
})
}}

View File

@@ -0,0 +1,8 @@
select * from {{ this.schema }}.seed
{{
config({
"materialized": "table"
})
}}

View File

@@ -0,0 +1,8 @@
select * from {{ this.schema }}.seed
{{
config({
"materialized": "table"
})
}}

View File

@@ -0,0 +1,4 @@
col_A,col_B
1,2
3,4
5,6
1 col_A col_B
2 1 2
3 3 4
4 5 6

View File

@@ -0,0 +1,74 @@
from test.integration.base import DBTIntegrationTest, use_profile
from pytest import mark
class TestAdapterDDL(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.run_dbt(["seed"])
@property
def schema(self):
return "adapter_ddl_063"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
"config-version": 2,
"seeds": {
"quote_columns": False,
},
}
# 63 characters is the character limit for a table name in a postgres database
# (assuming compiled without changes from source)
@use_profile("postgres")
def test_postgres_name_longer_than_63_fails(self):
self.run_dbt(
[
"run",
"-m",
"my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012",
],
expect_pass=False,
)
@mark.skip(
reason="Backup table generation currently adds 12 characters to the relation name, meaning the current name limit is 51."
)
@use_profile("postgres")
def test_postgres_name_shorter_or_equal_to_63_passes(self):
self.run_dbt(
[
"run",
"-m",
"my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0"
"my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901",
],
expect_pass=True,
)
@use_profile("postgres")
def test_postgres_long_name_passes_when_temp_tables_are_generated(self):
self.run_dbt(
[
"run",
"-m",
"my_name_is_51_characters_incremental_abcdefghijklmn",
],
expect_pass=True,
)
# Run again to trigger incremental materialization
self.run_dbt(
[
"run",
"-m",
"my_name_is_51_characters_incremental_abcdefghijklmn",
],
expect_pass=True,
)

View File

@@ -0,0 +1 @@
select * from {{ ref('people') }}

View File

@@ -0,0 +1 @@
select 1 as quite_long_column_name

View File

@@ -0,0 +1,11 @@
select * from {{ ref('people') }}
union all
select * from {{ ref('people') }}
where id in (1,2)
union all
select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }}
where id in (3,4)

View File

@@ -0,0 +1,40 @@
version: 2
models:
- name: fine_model
columns:
- name: id
tests:
- unique
- not_null
- name: problematic_model
columns:
- name: id
tests:
- unique:
store_failures: true
- not_null
- name: first_name
tests:
# test truncation of really long test name
- accepted_values:
values:
- Jack
- Kathryn
- Gerald
- Bonnie
- Harold
- Jacqueline
- Wanda
- Craig
# - Gary
# - Rose
- name: fine_model_but_with_a_no_good_very_long_name
columns:
- name: quite_long_column_name
tests:
# test truncation of really long test name with builtin
- unique

View File

@@ -0,0 +1,3 @@
value_field,n_records
Gary,1
Rose,1
1 value_field n_records
2 Gary 1
3 Rose 1

View File

@@ -0,0 +1,11 @@
id,first_name,last_name,email,gender,ip_address
1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168
2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35
3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243
4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175
5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136
6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220
7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64
8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13
9,Gary,Day,gday8@nih.gov,Male,35.81.68.186
10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100
1 id first_name last_name email gender ip_address
2 1 Jack Hunter jhunter0@pbs.org Male 59.80.20.168
3 2 Kathryn Walker kwalker1@ezinearticles.com Female 194.121.179.35
4 3 Gerald Ryan gryan2@com.com Male 11.3.212.243
5 4 Bonnie Spencer bspencer3@ameblo.jp Female 216.32.196.175
6 5 Harold Taylor htaylor4@people.com.cn Male 253.10.246.136
7 6 Jacqueline Griffin jgriffin5@t.co Female 16.13.192.220
8 7 Wanda Arnold warnold6@google.nl Female 232.116.150.64
9 8 Craig Ortiz cortiz7@sciencedaily.com Male 199.126.106.13
10 9 Gary Day gday8@nih.gov Male 35.81.68.186
11 10 Rose Wright rwright9@yahoo.co.jp Female 236.82.178.100

View File

@@ -0,0 +1,3 @@
id,first_name,last_name,email,gender,ip_address
,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243
,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175
1 id first_name last_name email gender ip_address
2 Gerald Ryan gryan2@com.com Male 11.3.212.243
3 Bonnie Spencer bspencer3@ameblo.jp Female 216.32.196.175

View File

@@ -0,0 +1,3 @@
unique_field,n_records
2,2
1,2
1 unique_field n_records
2 2 2
3 1 2

View File

@@ -0,0 +1,11 @@
id,first_name,last_name,email,gender,ip_address
1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168
2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35
3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243
4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175
5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136
6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220
7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64
8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13
9,Gary,Day,gday8@nih.gov,Male,35.81.68.186
10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100
1 id first_name last_name email gender ip_address
2 1 Jack Hunter jhunter0@pbs.org Male 59.80.20.168
3 2 Kathryn Walker kwalker1@ezinearticles.com Female 194.121.179.35
4 3 Gerald Ryan gryan2@com.com Male 11.3.212.243
5 4 Bonnie Spencer bspencer3@ameblo.jp Female 216.32.196.175
6 5 Harold Taylor htaylor4@people.com.cn Male 253.10.246.136
7 6 Jacqueline Griffin jgriffin5@t.co Female 16.13.192.220
8 7 Wanda Arnold warnold6@google.nl Female 232.116.150.64
9 8 Craig Ortiz cortiz7@sciencedaily.com Male 199.126.106.13
10 9 Gary Day gday8@nih.gov Male 35.81.68.186
11 10 Rose Wright rwright9@yahoo.co.jp Female 236.82.178.100

View File

@@ -0,0 +1,91 @@
from test.integration.base import DBTIntegrationTest, use_profile
class TestStoreTestFailures(DBTIntegrationTest):
@property
def schema(self):
return "test_store_test_failures_067"
def tearDown(self):
test_audit_schema = self.unique_schema() + "_dbt_test__audit"
with self.adapter.connection_named('__test'):
self._drop_schema_named(self.default_database, test_audit_schema)
super().tearDown()
@property
def models(self):
return "models"
@property
def project_config(self):
return {
"config-version": 2,
"test-paths": ["tests"],
"seeds": {
"quote_columns": False,
"test": {
"expected": self.column_type_overrides()
},
},
}
def column_type_overrides(self):
return {}
def run_tests_store_one_failure(self):
test_audit_schema = self.unique_schema() + "_dbt_test__audit"
self.run_dbt(["seed"])
self.run_dbt(["run"])
self.run_dbt(["test"], expect_pass=False)
# one test is configured with store_failures: true, make sure it worked
self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema)
def run_tests_store_failures_and_assert(self):
test_audit_schema = self.unique_schema() + "_dbt_test__audit"
self.run_dbt(["seed"])
self.run_dbt(["run"])
# make sure this works idempotently for all tests
self.run_dbt(["test", "--store-failures"], expect_pass=False)
results = self.run_dbt(["test", "--store-failures"], expect_pass=False)
# compare test results
actual = [(r.status, r.failures) for r in results]
expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0),
('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)]
self.assertEqual(sorted(actual), sorted(expected))
# compare test results stored in database
self.assertTablesEqual("failing_test", "expected_failing_test", test_audit_schema)
self.assertTablesEqual("not_null_problematic_model_id", "expected_not_null_problematic_model_id", test_audit_schema)
self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema)
self.assertTablesEqual("accepted_values_problematic_mo_c533ab4ca65c1a9dbf14f79ded49b628", "expected_accepted_values", test_audit_schema)
class PostgresTestStoreTestFailures(TestStoreTestFailures):
@property
def schema(self):
return "067" # otherwise too long + truncated
def column_type_overrides(self):
return {
"expected_unique_problematic_model_id": {
"+column_types": {
"n_records": "bigint",
},
},
"expected_accepted_values": {
"+column_types": {
"n_records": "bigint",
},
},
}
@use_profile('postgres')
def test__postgres__store_and_assert(self):
self.run_tests_store_one_failure()
self.run_tests_store_failures_and_assert()

View File

@@ -0,0 +1 @@
select * from {{ ref('fine_model') }}

View File

@@ -0,0 +1,2 @@
select * from {{ ref('fine_model') }}
where false

View File

@@ -0,0 +1,29 @@
{{
config(
materialized='incremental',
unique_key='id',
on_schema_change='append_new_columns'
)
}}
{% set string_type = 'varchar(10)' %}
WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
{% if is_incremental() %}
SELECT id,
cast(field1 as {{string_type}}) as field1,
cast(field2 as {{string_type}}) as field2,
cast(field3 as {{string_type}}) as field3,
cast(field4 as {{string_type}}) as field4
FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
{% else %}
SELECT id,
cast(field1 as {{string_type}}) as field1,
cast(field2 as {{string_type}}) as field2
FROM source_data where id <= 3
{% endif %}

View File

@@ -0,0 +1,28 @@
{{
config(
materialized='incremental',
unique_key='id',
on_schema_change='append_new_columns'
)
}}
{% set string_type = 'varchar(10)' %}
WITH source_data AS (SELECT * FROM {{ ref('model_a') }} )
{% if is_incremental() %}
SELECT id,
cast(field1 as {{string_type}}) as field1,
cast(field3 as {{string_type}}) as field3,
cast(field4 as {{string_type}}) as field4
FROM source_data WHERE id NOT IN (SELECT id from {{ this }} )
{% else %}
SELECT id,
cast(field1 as {{string_type}}) as field1,
cast(field2 as {{string_type}}) as field2
FROM source_data where id <= 3
{% endif %}

View File

@@ -0,0 +1,19 @@
{{
config(materialized='table')
}}
{% set string_type = 'varchar(10)' %}
with source_data as (
select * from {{ ref('model_a') }}
)
select id,
cast(field1 as {{string_type}}) as field1,
cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2,
cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3,
cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4
from source_data

View File

@@ -0,0 +1,19 @@
{{
config(materialized='table')
}}
{% set string_type = 'varchar(10)' %}
with source_data as (
select * from {{ ref('model_a') }}
)
select id
,cast(field1 as {{string_type}}) as field1
,cast(field2 as {{string_type}}) as field2
,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3
,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4
from source_data

Some files were not shown because too many files have changed in this diff Show More