mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-17 19:31:34 +00:00
Compare commits
104 Commits
v1.0.3
...
jerco/sql-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
feb37c55c7 | ||
|
|
9a0abc1bfc | ||
|
|
490d68e076 | ||
|
|
c45147fe6d | ||
|
|
bc3468e649 | ||
|
|
8fff6729a2 | ||
|
|
08f50acb9e | ||
|
|
436a5f5cd4 | ||
|
|
aca710048f | ||
|
|
673ad50e21 | ||
|
|
8ee86a61a0 | ||
|
|
0dda0a90cf | ||
|
|
220d8b888c | ||
|
|
42d5812577 | ||
|
|
dea4f5f8ff | ||
|
|
8f50eee330 | ||
|
|
8fd8dfcf74 | ||
|
|
10b27b9633 | ||
|
|
5808ee6dd7 | ||
|
|
a66fe7f467 | ||
|
|
18fef38702 | ||
|
|
3ad61d5d81 | ||
|
|
bb1f5b43be | ||
|
|
a642b20abc | ||
|
|
c112050455 | ||
|
|
43e3fc22c4 | ||
|
|
41c6177ae2 | ||
|
|
72ecd1ce74 | ||
|
|
2d0b975b6c | ||
|
|
8a0bc39a66 | ||
|
|
f3c7b6bfd1 | ||
|
|
0391e4e53a | ||
|
|
3ad3c21886 | ||
|
|
6e0ed751e1 | ||
|
|
c43c79a995 | ||
|
|
d6cc8b3042 | ||
|
|
2f4a6e33ec | ||
|
|
b9867e89cb | ||
|
|
13b18654f0 | ||
|
|
aafa1c7f47 | ||
|
|
638e3ad299 | ||
|
|
d9cfeb1ea3 | ||
|
|
e6786a2bc3 | ||
|
|
13571435a3 | ||
|
|
efb890db2d | ||
|
|
f3735187a6 | ||
|
|
3032594b26 | ||
|
|
1df7a029b4 | ||
|
|
f467fba151 | ||
|
|
8791313ec5 | ||
|
|
7798f932a0 | ||
|
|
a588607ec6 | ||
|
|
348764d99d | ||
|
|
5aeb088a73 | ||
|
|
e943b9fc84 | ||
|
|
892426eecb | ||
|
|
1d25b2b046 | ||
|
|
da70840be8 | ||
|
|
7632782ecd | ||
|
|
6fae647097 | ||
|
|
fc8b8c11d5 | ||
|
|
26a7922a34 | ||
|
|
c18b4f1f1a | ||
|
|
fa31a67499 | ||
|
|
742cd990ee | ||
|
|
8463af35c3 | ||
|
|
b34a4ab493 | ||
|
|
417ccdc3b4 | ||
|
|
7c46b784ef | ||
|
|
067b861d30 | ||
|
|
9f6ed3cec3 | ||
|
|
43edc887f9 | ||
|
|
6d4c64a436 | ||
|
|
0ed14fa236 | ||
|
|
51f2daf4b0 | ||
|
|
76f7bf9900 | ||
|
|
3fc715f066 | ||
|
|
b6811da84f | ||
|
|
1dffccd9da | ||
|
|
9ed9936c84 | ||
|
|
e75ae8c754 | ||
|
|
b68535b8cb | ||
|
|
5310498647 | ||
|
|
22b1a09aa2 | ||
|
|
6855fe06a7 | ||
|
|
affd8619c2 | ||
|
|
b67d5f396b | ||
|
|
b3039fdc76 | ||
|
|
9bdf5fe74a | ||
|
|
c675c2d318 | ||
|
|
2cd1f7d98e | ||
|
|
ce9ac8ea10 | ||
|
|
b90ab74975 | ||
|
|
6d3c3f1995 | ||
|
|
74fbaa18cd | ||
|
|
fc7c073691 | ||
|
|
29f504e201 | ||
|
|
eeb490ed15 | ||
|
|
c220b1e42c | ||
|
|
d973ae9ec6 | ||
|
|
f461683df5 | ||
|
|
41ed976941 | ||
|
|
e93ad5f118 | ||
|
|
d75ed964f8 |
@@ -1,12 +1,12 @@
|
||||
[bumpversion]
|
||||
current_version = 1.0.0rc3
|
||||
current_version = 1.0.1
|
||||
parse = (?P<major>\d+)
|
||||
\.(?P<minor>\d+)
|
||||
\.(?P<patch>\d+)
|
||||
((?P<prekind>a|b|rc)
|
||||
(?P<pre>\d+) # pre-release version num
|
||||
)?
|
||||
serialize =
|
||||
serialize =
|
||||
{major}.{minor}.{patch}{prekind}{pre}
|
||||
{major}.{minor}.{patch}
|
||||
commit = False
|
||||
@@ -15,7 +15,7 @@ tag = False
|
||||
[bumpversion:part:prekind]
|
||||
first_value = a
|
||||
optional_value = final
|
||||
values =
|
||||
values =
|
||||
a
|
||||
b
|
||||
rc
|
||||
@@ -36,4 +36,4 @@ first_value = 1
|
||||
|
||||
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
|
||||
|
||||
[bumpversion:file:docker/requirements/requirements.txt]
|
||||
[bumpversion:file:docker/Dockerfile]
|
||||
|
||||
16
.changes/0.0.0.md
Normal file
16
.changes/0.0.0.md
Normal file
@@ -0,0 +1,16 @@
|
||||
## Previous Releases
|
||||
|
||||
For information on prior major and minor releases, see their changelogs:
|
||||
|
||||
* [1.0](https://github.com/dbt-labs/dbt-core/blob/1.0.latest/CHANGELOG.md)
|
||||
* [0.21](https://github.com/dbt-labs/dbt-core/blob/0.21.latest/CHANGELOG.md)
|
||||
* [0.20](https://github.com/dbt-labs/dbt-core/blob/0.20.latest/CHANGELOG.md)
|
||||
* [0.19](https://github.com/dbt-labs/dbt-core/blob/0.19.latest/CHANGELOG.md)
|
||||
* [0.18](https://github.com/dbt-labs/dbt-core/blob/0.18.latest/CHANGELOG.md)
|
||||
* [0.17](https://github.com/dbt-labs/dbt-core/blob/0.17.latest/CHANGELOG.md)
|
||||
* [0.16](https://github.com/dbt-labs/dbt-core/blob/0.16.latest/CHANGELOG.md)
|
||||
* [0.15](https://github.com/dbt-labs/dbt-core/blob/0.15.latest/CHANGELOG.md)
|
||||
* [0.14](https://github.com/dbt-labs/dbt-core/blob/0.14.latest/CHANGELOG.md)
|
||||
* [0.13](https://github.com/dbt-labs/dbt-core/blob/0.13.latest/CHANGELOG.md)
|
||||
* [0.12](https://github.com/dbt-labs/dbt-core/blob/0.12.latest/CHANGELOG.md)
|
||||
* [0.11 and earlier](https://github.com/dbt-labs/dbt-core/blob/0.11.latest/CHANGELOG.md)
|
||||
31
.changes/1.0.1.md
Normal file
31
.changes/1.0.1.md
Normal file
@@ -0,0 +1,31 @@
|
||||
## dbt-core 1.1.0 (TBD)
|
||||
|
||||
### Features
|
||||
- Added Support for Semantic Versioning ([#4644](https://github.com/dbt-labs/dbt-core/pull/4644))
|
||||
- New Dockerfile to support specific db adapters and platforms. See docker/README.md for details ([#4495](https://github.com/dbt-labs/dbt-core/issues/4495), [#4487](https://github.com/dbt-labs/dbt-core/pull/4487))
|
||||
- Allow unique_key to take a list ([#2479](https://github.com/dbt-labs/dbt-core/issues/2479), [#4618](https://github.com/dbt-labs/dbt-core/pull/4618))
|
||||
- Add `--quiet` global flag and `print` Jinja function ([#3451](https://github.com/dbt-labs/dbt-core/issues/3451), [#4701](https://github.com/dbt-labs/dbt-core/pull/4701))
|
||||
|
||||
### Fixes
|
||||
- User wasn't asked for permission to overwite a profile entry when running init inside an existing project ([#4375](https://github.com/dbt-labs/dbt-core/issues/4375), [#4447](https://github.com/dbt-labs/dbt-core/pull/4447))
|
||||
- Add project name validation to `dbt init` ([#4490](https://github.com/dbt-labs/dbt-core/issues/4490),[#4536](https://github.com/dbt-labs/dbt-core/pull/4536))
|
||||
- Allow override of string and numeric types for adapters. ([#4603](https://github.com/dbt-labs/dbt-core/issues/4603))
|
||||
- A change in secret environment variables won't trigger a full reparse [#4650](https://github.com/dbt-labs/dbt-core/issues/4650) [4665](https://github.com/dbt-labs/dbt-core/pull/4665)
|
||||
- Fix misspellings and typos in docstrings ([#4545](https://github.com/dbt-labs/dbt-core/pull/4545))
|
||||
|
||||
### Under the hood
|
||||
- Testing cleanup ([#4496](https://github.com/dbt-labs/dbt-core/pull/4496), [#4509](https://github.com/dbt-labs/dbt-core/pull/4509))
|
||||
- Clean up test deprecation warnings ([#3988](https://github.com/dbt-labs/dbt-core/issue/3988), [#4556](https://github.com/dbt-labs/dbt-core/pull/4556))
|
||||
- Use mashumaro for serialization in event logging ([#4504](https://github.com/dbt-labs/dbt-core/issues/4504), [#4505](https://github.com/dbt-labs/dbt-core/pull/4505))
|
||||
- Drop support for Python 3.7.0 + 3.7.1 ([#4584](https://github.com/dbt-labs/dbt-core/issues/4584), [#4585](https://github.com/dbt-labs/dbt-core/pull/4585), [#4643](https://github.com/dbt-labs/dbt-core/pull/4643))
|
||||
- Re-format codebase (except tests) using pre-commit hooks ([#3195](https://github.com/dbt-labs/dbt-core/issues/3195), [#4697](https://github.com/dbt-labs/dbt-core/pull/4697))
|
||||
- Add deps module README ([#4686](https://github.com/dbt-labs/dbt-core/pull/4686/))
|
||||
- Initial conversion of tests to pytest ([#4690](https://github.com/dbt-labs/dbt-core/issues/4690), [#4691](https://github.com/dbt-labs/dbt-core/pull/4691))
|
||||
- Fix errors in Windows for tests/functions ([#4781](https://github.com/dbt-labs/dbt-core/issues/4781), [#4767](https://github.com/dbt-labs/dbt-core/pull/4767))
|
||||
|
||||
Contributors:
|
||||
- [@NiallRees](https://github.com/NiallRees) ([#4447](https://github.com/dbt-labs/dbt-core/pull/4447))
|
||||
- [@alswang18](https://github.com/alswang18) ([#4644](https://github.com/dbt-labs/dbt-core/pull/4644))
|
||||
- [@emartens](https://github.com/ehmartens) ([#4701](https://github.com/dbt-labs/dbt-core/pull/4701))
|
||||
- [@mdesmet](https://github.com/mdesmet) ([#4604](https://github.com/dbt-labs/dbt-core/pull/4604))
|
||||
- [@kazanzhy](https://github.com/kazanzhy) ([#4545](https://github.com/dbt-labs/dbt-core/pull/4545))
|
||||
40
.changes/README.md
Normal file
40
.changes/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# CHANGELOG Automation
|
||||
|
||||
We use [changie](https://changie.dev/) to automate `CHANGELOG` generation. For installation and format/command specifics, see the documentation.
|
||||
|
||||
### Quick Tour
|
||||
|
||||
- All new change entries get generated under `/.changes/unreleased` as a yaml file
|
||||
- `header.tpl.md` contains the contents of the entire CHANGELOG file
|
||||
- `0.0.0.md` contains the contents of the footer for the entire CHANGELOG file. changie looks to be in the process of supporting a footer file the same as it supports a header file. Switch to that when available. For now, the 0.0.0 in the file name forces it to the bottom of the changelog no matter what version we are releasing.
|
||||
- `.changie.yaml` contains the fields in a change, the format of a single change, as well as the format of the Contributors section for each version.
|
||||
|
||||
### Workflow
|
||||
|
||||
#### Daily workflow
|
||||
Almost every code change we make associated with an issue will require a `CHANGELOG` entry. After you have created the PR in GitHub, run `changie new` and follow the command prompts to generate a yaml file with your change details. This only needs to be done once per PR.
|
||||
|
||||
The `changie new` command will ensure correct file format and file name. There is a one to one mapping of issues to changes. Multiple issues cannot be lumped into a single entry. If you make a mistake, the yaml file may be directly modified and saved as long as the format is preserved.
|
||||
|
||||
Note: If your PR has been cleared by the Core Team as not needing a changelog entry, the `Skip Changelog` label may be put on the PR to bypass the GitHub action that blacks PRs from being merged when they are missing a `CHANGELOG` entry.
|
||||
|
||||
#### Prerelease Workflow
|
||||
These commands batch up changes in `/.changes/unreleased` to be included in this prerelease and move those files to a directory named for the release version. The `--move-dir` will be created if it does not exist and is created in `/.changes`.
|
||||
|
||||
```
|
||||
changie batch <version> --move-dir '<version>' --prerelease 'rc1'
|
||||
changie merge
|
||||
```
|
||||
|
||||
#### Final Release Workflow
|
||||
These commands batch up changes in `/.changes/unreleased` as well as `/.changes/<version>` to be included in this final release and delete all prereleases. This rolls all prereleases up into a single final release. All `yaml` files in `/unreleased` and `<version>` will be deleted at this point.
|
||||
|
||||
```
|
||||
changie batch <version> --include '<version>' --remove-prereleases
|
||||
changie merge
|
||||
```
|
||||
|
||||
### A Note on Manual Edits & Gotchas
|
||||
- Changie generates markdown files in the `.changes` directory that are parsed together with the `changie merge` command. Every time `changie merge` is run, it regenerates the entire file. For this reason, any changes made directly to `CHANGELOG.md` will be overwritten on the next run of `changie merge`.
|
||||
- If changes need to be made to the `CHANGELOG.md`, make the changes to the relevant `<version>.md` file located in the `/.changes` directory. You will then run `changie merge` to regenerate the `CHANGELOG.MD`.
|
||||
- Do not run `changie batch` again on released versions. Our final release workflow deletes all of the yaml files associated with individual changes. If for some reason modifications to the `CHANGELOG.md` are required after we've generated the final release `CHANGELOG.md`, the modifications need to be done manually to the `<version>.md` file in the `/.changes` directory.
|
||||
6
.changes/header.tpl.md
Executable file
6
.changes/header.tpl.md
Executable file
@@ -0,0 +1,6 @@
|
||||
# dbt Core Changelog
|
||||
|
||||
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
|
||||
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
|
||||
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
|
||||
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](CONTRIBUTING.md)
|
||||
0
core/dbt/include/starter_project/data/.gitkeep → .changes/unreleased/.gitkeep
Normal file → Executable file
0
core/dbt/include/starter_project/data/.gitkeep → .changes/unreleased/.gitkeep
Normal file → Executable file
7
.changes/unreleased/Under the Hood-20220218-161319.yaml
Normal file
7
.changes/unreleased/Under the Hood-20220218-161319.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Automate changelog generation with changie
|
||||
time: 2022-02-18T16:13:19.882436-06:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "4652"
|
||||
PR: "4743"
|
||||
50
.changie.yaml
Executable file
50
.changie.yaml
Executable file
@@ -0,0 +1,50 @@
|
||||
changesDir: .changes
|
||||
unreleasedDir: unreleased
|
||||
headerPath: header.tpl.md
|
||||
versionHeaderPath: ""
|
||||
changelogPath: CHANGELOG.md
|
||||
versionExt: md
|
||||
versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}'
|
||||
kindFormat: '### {{.Kind}}'
|
||||
changeFormat: '- {{.Body}} ([#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), [#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))'
|
||||
kinds:
|
||||
- label: Fixes
|
||||
- label: Features
|
||||
- label: Under the Hood
|
||||
- label: Breaking Changes
|
||||
- label: Docs
|
||||
- label: Dependencies
|
||||
custom:
|
||||
- key: Author
|
||||
label: GitHub Name
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: Issue
|
||||
label: GitHub Issue Number
|
||||
type: int
|
||||
minLength: 4
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number
|
||||
type: int
|
||||
minLength: 4
|
||||
footerFormat: |
|
||||
Contributors:
|
||||
{{- $contributorDict := dict }}
|
||||
{{- $core_team := list "emmyoop" "nathaniel-may" "gshank" "leahwicz" "ChenyuLInx" "stu-k" "iknox-fa" "VersusFacit" "McKnight-42" "jtcohen6" }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $author := $change.Custom.Author }}
|
||||
{{- if not (has $author $core_team)}}
|
||||
{{- $pr := $change.Custom.PR }}
|
||||
{{- if hasKey $contributorDict $author }}
|
||||
{{- $prList := get $contributorDict $author }}
|
||||
{{- $prList = append $prList $pr }}
|
||||
{{- $contributorDict := set $contributorDict $author $prList }}
|
||||
{{- else }}
|
||||
{{- $prList := list $change.Custom.PR }}
|
||||
{{- $contributorDict := set $contributorDict $author $prList }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- range $k,$v := $contributorDict }}
|
||||
- [{{$k}}](https://github.com/{{$k}}) ({{ range $index, $element := $v }}{{if $index}}, {{end}}[#{{$element}}](https://github.com/dbt-labs/dbt-core/pull/{{$element}}){{end}})
|
||||
{{- end }}
|
||||
12
.flake8
Normal file
12
.flake8
Normal file
@@ -0,0 +1,12 @@
|
||||
[flake8]
|
||||
select =
|
||||
E
|
||||
W
|
||||
F
|
||||
ignore =
|
||||
W503 # makes Flake8 work like black
|
||||
W504
|
||||
E203 # makes Flake8 work like black
|
||||
E741
|
||||
E501 # long line checking is done in black
|
||||
exclude = test
|
||||
43
.github/CODEOWNERS
vendored
Normal file
43
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# This file contains the code owners for the dbt-core repo.
|
||||
# PRs will be automatically assigned for review to the associated
|
||||
# team(s) or person(s) that touches any files that are mapped to them.
|
||||
#
|
||||
# A statement takes precedence over the statements above it so more general
|
||||
# assignments are found at the top with specific assignments being lower in
|
||||
# the ordering (i.e. catch all assignment should be the first item)
|
||||
#
|
||||
# Consult GitHub documentation for formatting guidelines:
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#example-of-a-codeowners-file
|
||||
|
||||
# As a default for areas with no assignment,
|
||||
# the core team as a whole will be assigned
|
||||
* @dbt-labs/core
|
||||
|
||||
# Changes to GitHub configurations including Actions
|
||||
/.github/ @leahwicz
|
||||
|
||||
# Language core modules
|
||||
/core/dbt/config/ @dbt-labs/core-language
|
||||
/core/dbt/context/ @dbt-labs/core-language
|
||||
/core/dbt/contracts/ @dbt-labs/core-language
|
||||
/core/dbt/deps/ @dbt-labs/core-language
|
||||
/core/dbt/parser/ @dbt-labs/core-language
|
||||
|
||||
# Execution core modules
|
||||
/core/dbt/events/ @dbt-labs/core-execution @dbt-labs/core-language # eventually remove language but they have knowledge here now
|
||||
/core/dbt/graph/ @dbt-labs/core-execution
|
||||
/core/dbt/task/ @dbt-labs/core-execution
|
||||
|
||||
# Adapter interface, scaffold, Postgres plugin
|
||||
/core/dbt/adapters @dbt-labs/core-adapters
|
||||
/core/scripts/create_adapter_plugin.py @dbt-labs/core-adapters
|
||||
/plugins/ @dbt-labs/core-adapters
|
||||
|
||||
# Global project: default macros, including generic tests + materializations
|
||||
/core/dbt/include/global_project @dbt-labs/core-execution @dbt-labs/core-adapters
|
||||
|
||||
# Perf regression testing framework
|
||||
# This excludes the test project files itself since those aren't specific
|
||||
# framework changes (excluded by not setting an owner next to it- no owner)
|
||||
/performance @nathaniel-may
|
||||
/performance/projects
|
||||
14
.github/actions/latest-wrangler/Dockerfile
vendored
Normal file
14
.github/actions/latest-wrangler/Dockerfile
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
FROM python:3-slim AS builder
|
||||
ADD . /app
|
||||
WORKDIR /app
|
||||
|
||||
# We are installing a dependency here directly into our app source dir
|
||||
RUN pip install --target=/app requests packaging
|
||||
|
||||
# A distroless container image with Python and some basics like SSL certificates
|
||||
# https://github.com/GoogleContainerTools/distroless
|
||||
FROM gcr.io/distroless/python3-debian10
|
||||
COPY --from=builder /app /app
|
||||
WORKDIR /app
|
||||
ENV PYTHONPATH /app
|
||||
CMD ["/app/main.py"]
|
||||
50
.github/actions/latest-wrangler/README.md
vendored
Normal file
50
.github/actions/latest-wrangler/README.md
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
# Github package 'latest' tag wrangler for containers
|
||||
## Usage
|
||||
|
||||
Plug in the necessary inputs to determine if the container being built should be tagged 'latest; at the package level, for example `dbt-redshift:latest`.
|
||||
|
||||
## Inputs
|
||||
| Input | Description |
|
||||
| - | - |
|
||||
| `package` | Name of the GH package to check against |
|
||||
| `new_version` | Semver of new container |
|
||||
| `gh_token` | GH token with package read scope|
|
||||
| `halt_on_missing` | Return non-zero exit code if requested package does not exist. (defaults to false)|
|
||||
|
||||
|
||||
## Outputs
|
||||
| Output | Description |
|
||||
| - | - |
|
||||
| `latest` | Wether or not the new container should be tagged 'latest'|
|
||||
| `minor_latest` | Wether or not the new container should be tagged major.minor.latest |
|
||||
|
||||
## Example workflow
|
||||
```yaml
|
||||
name: Ship it!
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to publish
|
||||
required: true
|
||||
version_number:
|
||||
description: The version number
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Wrangle latest tag
|
||||
id: is_latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.new_version }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Print the results
|
||||
run: |
|
||||
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
|
||||
echo "Is it minor.latest? Survey says: ${{ steps.is_latest.outputs.minor_latest }} !"
|
||||
```
|
||||
20
.github/actions/latest-wrangler/action.yml
vendored
Normal file
20
.github/actions/latest-wrangler/action.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: "Github package 'latest' tag wrangler for containers"
|
||||
description: "Determines wether or not a given dbt container should be given a bare 'latest' tag (I.E. dbt-core:latest)"
|
||||
inputs:
|
||||
package_name:
|
||||
description: "Package to check (I.E. dbt-core, dbt-redshift, etc)"
|
||||
required: true
|
||||
new_version:
|
||||
description: "Semver of the container being built (I.E. 1.0.4)"
|
||||
required: true
|
||||
gh_token:
|
||||
description: "Auth token for github (must have view packages scope)"
|
||||
required: true
|
||||
outputs:
|
||||
latest:
|
||||
description: "Wether or not built container should be tagged latest (bool)"
|
||||
minor_latest:
|
||||
description: "Wether or not built container should be tagged minor.latest (bool)"
|
||||
runs:
|
||||
using: "docker"
|
||||
image: "Dockerfile"
|
||||
26
.github/actions/latest-wrangler/examples/example_workflow.yml
vendored
Normal file
26
.github/actions/latest-wrangler/examples/example_workflow.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Ship it!
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to publish
|
||||
required: true
|
||||
version_number:
|
||||
description: The version number
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Wrangle latest tag
|
||||
id: is_latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.new_version }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Print the results
|
||||
run: |
|
||||
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
|
||||
6
.github/actions/latest-wrangler/examples/example_workflow_dispatch.json
vendored
Normal file
6
.github/actions/latest-wrangler/examples/example_workflow_dispatch.json
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-redshift"
|
||||
}
|
||||
}
|
||||
95
.github/actions/latest-wrangler/main.py
vendored
Normal file
95
.github/actions/latest-wrangler/main.py
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
from distutils.util import strtobool
|
||||
from typing import Union
|
||||
from packaging.version import parse, Version
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# get inputs
|
||||
package = os.environ["INPUT_PACKAGE"]
|
||||
new_version = parse(os.environ["INPUT_NEW_VERSION"])
|
||||
gh_token = os.environ["INPUT_GH_TOKEN"]
|
||||
halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
|
||||
|
||||
# get package metadata from github
|
||||
package_request = requests.get(
|
||||
f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
|
||||
auth=("", gh_token),
|
||||
)
|
||||
package_meta = package_request.json()
|
||||
|
||||
# Log info if we don't get a 200
|
||||
if package_request.status_code != 200:
|
||||
print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
|
||||
|
||||
# Make an early exit if there is no matching package in github
|
||||
if package_request.status_code == 404:
|
||||
if halt_on_missing:
|
||||
sys.exit(1)
|
||||
else:
|
||||
# everything is the latest if the package doesn't exist
|
||||
print(f"::set-output name=latest::{True}")
|
||||
print(f"::set-output name=minor_latest::{True}")
|
||||
sys.exit(0)
|
||||
|
||||
# TODO: verify package meta is "correct"
|
||||
# https://github.com/dbt-labs/dbt-core/issues/4640
|
||||
|
||||
# map versions and tags
|
||||
version_tag_map = {
|
||||
version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
|
||||
}
|
||||
|
||||
# is pre-release
|
||||
pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
|
||||
|
||||
# semver of current latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if "latest" in tags:
|
||||
# N.B. This seems counterintuitive, but we expect any version tagged
|
||||
# 'latest' to have exactly three associated tags:
|
||||
# latest, major.minor.latest, and major.minor.patch.
|
||||
# Subtracting everything that contains the string 'latest' gets us
|
||||
# the major.minor.patch which is what's needed for comparison.
|
||||
current_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_latest = False
|
||||
|
||||
# semver of current_minor_latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if f"{new_version.major}.{new_version.minor}.latest" in tags:
|
||||
# Similar to above, only now we expect exactly two tags:
|
||||
# major.minor.patch and major.minor.latest
|
||||
current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_minor_latest = False
|
||||
|
||||
def is_latest(
|
||||
pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
|
||||
) -> bool:
|
||||
"""Determine if a given contaier should be tagged 'latest' based on:
|
||||
- it's pre-release status
|
||||
- it's version
|
||||
- the version of a previously identified container tagged 'latest'
|
||||
|
||||
:param pre_rel: Wether or not the version of the new container is a pre-release
|
||||
:param new_version: The version of the new container
|
||||
:param remote_latest: The version of the previously identified container that's
|
||||
already tagged latest or False
|
||||
"""
|
||||
# is a pre-release = not latest
|
||||
if pre_rel:
|
||||
return False
|
||||
# + no latest tag found = is latest
|
||||
if not remote_latest:
|
||||
return True
|
||||
# + if remote version is lower than current = is latest, else not latest
|
||||
return True if remote_latest <= new_version else False
|
||||
|
||||
latest = is_latest(pre_rel, new_version, current_latest)
|
||||
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
|
||||
|
||||
print(f"::set-output name=latest::{latest}")
|
||||
print(f"::set-output name=minor_latest::{minor_latest}")
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -18,4 +18,4 @@ resolves #
|
||||
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] I have updated the `CHANGELOG.md` and added information about my change
|
||||
- [ ] I have added information about my change to be included in the [CHANGELOG](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#Adding-CHANGELOG-Entry).
|
||||
|
||||
95
.github/scripts/integration-test-matrix.js
vendored
95
.github/scripts/integration-test-matrix.js
vendored
@@ -1,95 +0,0 @@
|
||||
module.exports = ({ context }) => {
|
||||
const defaultPythonVersion = "3.8";
|
||||
const supportedPythonVersions = ["3.7", "3.8", "3.9"];
|
||||
const supportedAdapters = ["postgres"];
|
||||
|
||||
// if PR, generate matrix based on files changed and PR labels
|
||||
if (context.eventName.includes("pull_request")) {
|
||||
// `changes` is a list of adapter names that have related
|
||||
// file changes in the PR
|
||||
// ex: ['postgres', 'snowflake']
|
||||
const changes = JSON.parse(process.env.CHANGES);
|
||||
const labels = context.payload.pull_request.labels.map(({ name }) => name);
|
||||
console.log("labels", labels);
|
||||
console.log("changes", changes);
|
||||
const testAllLabel = labels.includes("test all");
|
||||
const include = [];
|
||||
|
||||
for (const adapter of supportedAdapters) {
|
||||
if (
|
||||
changes.includes(adapter) ||
|
||||
testAllLabel ||
|
||||
labels.includes(`test ${adapter}`)
|
||||
) {
|
||||
for (const pythonVersion of supportedPythonVersions) {
|
||||
if (
|
||||
pythonVersion === defaultPythonVersion ||
|
||||
labels.includes(`test python${pythonVersion}`) ||
|
||||
testAllLabel
|
||||
) {
|
||||
// always run tests on ubuntu by default
|
||||
include.push({
|
||||
os: "ubuntu-latest",
|
||||
adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
|
||||
if (labels.includes("test windows") || testAllLabel) {
|
||||
include.push({
|
||||
os: "windows-latest",
|
||||
adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
}
|
||||
|
||||
if (labels.includes("test macos") || testAllLabel) {
|
||||
include.push({
|
||||
os: "macos-latest",
|
||||
adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log("matrix", { include });
|
||||
|
||||
return {
|
||||
include,
|
||||
};
|
||||
}
|
||||
// if not PR, generate matrix of python version, adapter, and operating
|
||||
// system to run integration tests on
|
||||
|
||||
const include = [];
|
||||
// run for all adapters and python versions on ubuntu
|
||||
for (const adapter of supportedAdapters) {
|
||||
for (const pythonVersion of supportedPythonVersions) {
|
||||
include.push({
|
||||
os: 'ubuntu-latest',
|
||||
adapter: adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// additionally include runs for all adapters, on macos and windows,
|
||||
// but only for the default python version
|
||||
for (const adapter of supportedAdapters) {
|
||||
for (const operatingSystem of ["windows-latest", "macos-latest"]) {
|
||||
include.push({
|
||||
os: operatingSystem,
|
||||
adapter: adapter,
|
||||
"python-version": defaultPythonVersion,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
console.log("matrix", { include });
|
||||
|
||||
return {
|
||||
include,
|
||||
};
|
||||
};
|
||||
34
.github/workflows/backport.yml
vendored
Normal file
34
.github/workflows/backport.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
# **what?**
|
||||
# When a PR is merged, if it has the backport label, it will create
|
||||
# a new PR to backport those changes to the given branch. If it can't
|
||||
# cleanly do a backport, it will comment on the merged PR of the failure.
|
||||
#
|
||||
# Label naming convention: "backport <branch name to backport to>"
|
||||
# Example: backport 1.0.latest
|
||||
#
|
||||
# You MUST "Squash and merge" the original PR or this won't work.
|
||||
|
||||
# **why?**
|
||||
# Changes sometimes need to be backported to release branches.
|
||||
# This automates the backporting process
|
||||
|
||||
# **when?**
|
||||
# Once a PR is "Squash and merge"'d and it has been correctly labeled
|
||||
# according to the naming convention.
|
||||
|
||||
name: Backport
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
runs-on: ubuntu-18.04
|
||||
name: Backport
|
||||
steps:
|
||||
- name: Backport
|
||||
uses: tibdex/backport@v1.1.1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
62
.github/workflows/changelog-check.yml
vendored
Normal file
62
.github/workflows/changelog-check.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# **what?**
|
||||
# Checks that a file has been committed under the /.changes directory
|
||||
# as a new CHANGELOG entry. Cannot check for a specific filename as
|
||||
# it is dynamically generated by change type and timestamp.
|
||||
# This workflow should not require any secrets since it runs for PRs
|
||||
# from forked repos.
|
||||
# By default, secrets are not passed to workflows running from
|
||||
# a forked repo.
|
||||
|
||||
# **why?**
|
||||
# Ensure code change gets reflected in the CHANGELOG.
|
||||
|
||||
# **when?**
|
||||
# This will run for all PRs going into main and *.latest.
|
||||
|
||||
name: Check Changelog Entry
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
name: changelog
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check if changelog file was added
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
# For each filter, it sets output variable named by the filter to the text:
|
||||
# 'true' - if any of changed files matches any of filter rules
|
||||
# 'false' - if none of changed files matches any of filter rules
|
||||
# also, returns:
|
||||
# `changes` - JSON array with names of all filters matching any of the changed files
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
filters: |
|
||||
changelog:
|
||||
- added: '.changes/unreleased/**.yaml'
|
||||
- name: Check a file has been added to .changes/unreleased if required
|
||||
uses: actions/github-script@v6
|
||||
if: steps.filter.outputs.changelog == 'false' && !contains( github.event.pull_request.labels.*.name, 'Skip Changelog')
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: "Thank you for your pull request! We could not find a changelog entry for this change. For details on how to document a change, see [the contributing guide](CONTRIBUTING.md)."
|
||||
})
|
||||
core.setFailed('Changelog entry required to merge.')
|
||||
222
.github/workflows/integration.yml
vendored
222
.github/workflows/integration.yml
vendored
@@ -1,222 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow runs all integration tests for supported OS
|
||||
# and python versions and core adapters. If triggered by PR,
|
||||
# the workflow will only run tests for adapters related
|
||||
# to code changes. Use the `test all` and `test ${adapter}`
|
||||
# label to run all or additional tests. Use `ok to test`
|
||||
# label to mark PRs from forked repositories that are safe
|
||||
# to run integration tests for. Requires secrets to run
|
||||
# against different warehouses.
|
||||
|
||||
# **why?**
|
||||
# This checks the functionality of dbt from a user's perspective
|
||||
# and attempts to catch functional regressions.
|
||||
|
||||
# **when?**
|
||||
# This workflow will run on every push to a protected branch
|
||||
# and when manually triggered. It will also run for all PRs, including
|
||||
# PRs from forks. The workflow will be skipped until there is a label
|
||||
# to mark the PR as safe to run.
|
||||
|
||||
name: Adapter Integration Tests
|
||||
|
||||
on:
|
||||
# pushes to release branches
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
# all PRs, important to note that `pull_request_target` workflows
|
||||
# will run in the context of the target branch of a PR
|
||||
pull_request_target:
|
||||
# manual tigger
|
||||
workflow_dispatch:
|
||||
|
||||
# explicitly turn off permissions for `GITHUB_TOKEN`
|
||||
permissions: read-all
|
||||
|
||||
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# sets default shell to bash, for all operating systems
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
# generate test metadata about what files changed and the testing matrix to use
|
||||
test-metadata:
|
||||
# run if not a PR from a forked repository or has a label to mark as safe to test
|
||||
if: >-
|
||||
github.event_name != 'pull_request_target' ||
|
||||
github.event.pull_request.head.repo.full_name == github.repository ||
|
||||
contains(github.event.pull_request.labels.*.name, 'ok to test')
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
matrix: ${{ steps.generate-matrix.outputs.result }}
|
||||
|
||||
steps:
|
||||
- name: Check out the repository (non-PR)
|
||||
if: github.event_name != 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check out the repository (PR)
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Check if relevant files changed
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
# For each filter, it sets output variable named by the filter to the text:
|
||||
# 'true' - if any of changed files matches any of filter rules
|
||||
# 'false' - if none of changed files matches any of filter rules
|
||||
# also, returns:
|
||||
# `changes` - JSON array with names of all filters matching any of the changed files
|
||||
uses: dorny/paths-filter@v2
|
||||
id: get-changes
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
filters: |
|
||||
postgres:
|
||||
- 'core/**'
|
||||
- 'plugins/postgres/**'
|
||||
- 'dev-requirements.txt'
|
||||
|
||||
- name: Generate integration test matrix
|
||||
id: generate-matrix
|
||||
uses: actions/github-script@v4
|
||||
env:
|
||||
CHANGES: ${{ steps.get-changes.outputs.changes }}
|
||||
with:
|
||||
script: |
|
||||
const script = require('./.github/scripts/integration-test-matrix.js')
|
||||
const matrix = script({ context })
|
||||
console.log(matrix)
|
||||
return matrix
|
||||
|
||||
test:
|
||||
name: ${{ matrix.adapter }} / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
# run if not a PR from a forked repository or has a label to mark as safe to test
|
||||
# also checks that the matrix generated is not empty
|
||||
if: >-
|
||||
needs.test-metadata.outputs.matrix &&
|
||||
fromJSON( needs.test-metadata.outputs.matrix ).include[0] &&
|
||||
(
|
||||
github.event_name != 'pull_request_target' ||
|
||||
github.event.pull_request.head.repo.full_name == github.repository ||
|
||||
contains(github.event.pull_request.labels.*.name, 'ok to test')
|
||||
)
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
needs: test-metadata
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJSON(needs.test-metadata.outputs.matrix) }}
|
||||
|
||||
env:
|
||||
TOXENV: integration-${{ matrix.adapter }}
|
||||
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
if: github.event_name != 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# explicity checkout the branch for the PR,
|
||||
# this is necessary for the `pull_request_target` event
|
||||
- name: Check out the repository (PR)
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up postgres (linux)
|
||||
if: |
|
||||
matrix.adapter == 'postgres' &&
|
||||
runner.os == 'Linux'
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: Set up postgres (macos)
|
||||
if: |
|
||||
matrix.adapter == 'postgres' &&
|
||||
runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: Set up postgres (windows)
|
||||
if: |
|
||||
matrix.adapter == 'postgres' &&
|
||||
runner.os == 'Windows'
|
||||
uses: ./.github/actions/setup-postgres-windows
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
|
||||
- name: Run tox (postgres)
|
||||
if: matrix.adapter == 'postgres'
|
||||
run: tox
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: logs
|
||||
path: ./logs
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.adapter }}-${{ steps.date.outputs.date }}.csv
|
||||
path: integration_results.csv
|
||||
|
||||
require-label-comment:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
needs: test
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Needs permission PR comment
|
||||
if: >-
|
||||
needs.test.result == 'skipped' &&
|
||||
github.event_name == 'pull_request_target' &&
|
||||
github.event.pull_request.head.repo.full_name != github.repository
|
||||
uses: unsplash/comment-on-pr@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
msg: |
|
||||
"You do not have permissions to run integration tests, @dbt-labs/core "\
|
||||
"needs to label this PR with `ok to test` in order to run integration tests!"
|
||||
check_for_duplicate_msg: true
|
||||
26
.github/workflows/jira-creation.yml
vendored
Normal file
26
.github/workflows/jira-creation.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# **what?**
|
||||
# Mirrors issues into Jira. Includes the information: title,
|
||||
# GitHub Issue ID and URL
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these issues in there
|
||||
|
||||
# **when?**
|
||||
# On issue creation or when an issue is labeled `Jira`
|
||||
|
||||
name: Jira Issue Creation
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-creation.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
26
.github/workflows/jira-label.yml
vendored
Normal file
26
.github/workflows/jira-label.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# **what?**
|
||||
# Calls mirroring Jira label Action. Includes adding a new label
|
||||
# to an existing issue or removing a label as well
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these labels in there
|
||||
|
||||
# **when?**
|
||||
# On labels being added or removed from issues
|
||||
|
||||
name: Jira Label Mirroring
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
issues: read
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-label.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
24
.github/workflows/jira-transition.yml
vendored
Normal file
24
.github/workflows/jira-transition.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# **what?**
|
||||
# Transition a Jira issue to a new state
|
||||
# Only supports these GitHub Issue transitions:
|
||||
# closed, deleted, reopened
|
||||
|
||||
# **why?**
|
||||
# Jira needs to be kept up-to-date
|
||||
|
||||
# **when?**
|
||||
# On issue closing, deletion, reopened
|
||||
|
||||
name: Jira Issue Transition
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [closed, deleted, reopened]
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-transition.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
148
.github/workflows/main.yml
vendored
148
.github/workflows/main.yml
vendored
@@ -1,9 +1,8 @@
|
||||
# **what?**
|
||||
# Runs code quality checks, unit tests, and verifies python build on
|
||||
# all code commited to the repository. This workflow should not
|
||||
# require any secrets since it runs for PRs from forked repos.
|
||||
# By default, secrets are not passed to workflows running from
|
||||
# a forked repo.
|
||||
# Runs code quality checks, unit tests, integration tests and
|
||||
# verifies python build on all code commited to the repository. This workflow
|
||||
# should not require any secrets since it runs for PRs from forked repos. By
|
||||
# default, secrets are not passed to workflows running from a forked repos.
|
||||
|
||||
# **why?**
|
||||
# Ensure code for dbt meets a certain quality standard.
|
||||
@@ -18,7 +17,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
@@ -37,24 +35,13 @@ defaults:
|
||||
|
||||
jobs:
|
||||
code-quality:
|
||||
name: ${{ matrix.toxenv }}
|
||||
name: code-quality
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toxenv: [flake8, mypy]
|
||||
|
||||
env:
|
||||
TOXENV: ${{ matrix.toxenv }}
|
||||
PYTEST_ADDOPTS: "-v --color=yes"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
@@ -62,12 +49,16 @@ jobs:
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
pip install pre-commit
|
||||
pre-commit --version
|
||||
pip install mypy==0.782
|
||||
mypy --version
|
||||
pip install -r editable-requirements.txt
|
||||
dbt --version
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
- name: Run pre-commit hooks
|
||||
run: pre-commit run --all-files --show-diff-on-failure
|
||||
|
||||
unit:
|
||||
name: unit test / python ${{ matrix.python-version }}
|
||||
@@ -86,8 +77,6 @@ jobs:
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
@@ -97,8 +86,8 @@ jobs:
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run tox
|
||||
@@ -115,6 +104,75 @@ jobs:
|
||||
name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv
|
||||
path: unit_results.csv
|
||||
|
||||
integration:
|
||||
name: integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: [3.7, 3.8, 3.9]
|
||||
os: [ubuntu-latest]
|
||||
include:
|
||||
- python-version: 3.8
|
||||
os: windows-latest
|
||||
- python-version: 3.8
|
||||
os: macos-latest
|
||||
|
||||
env:
|
||||
TOXENV: integration
|
||||
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up postgres (linux)
|
||||
if: runner.os == 'Linux'
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: Set up postgres (macos)
|
||||
if: runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: Set up postgres (windows)
|
||||
if: runner.os == 'Windows'
|
||||
uses: ./.github/actions/setup-postgres-windows
|
||||
|
||||
- name: Install python tools
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip --version
|
||||
pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run tests
|
||||
run: tox
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: echo "::set-output name=date::$(date +'%Y_%m_%dT%H_%M_%S')" #no colons allowed for artifacts
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}
|
||||
path: ./logs
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}.csv
|
||||
path: integration_results.csv
|
||||
|
||||
build:
|
||||
name: build packages
|
||||
|
||||
@@ -123,8 +181,6 @@ jobs:
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
@@ -151,44 +207,6 @@ jobs:
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
test-build:
|
||||
name: verify packages / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
needs: build
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
python-version: [3.7, 3.8, 3.9]
|
||||
|
||||
steps:
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install --upgrade wheel
|
||||
pip --version
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
113
.github/workflows/release-docker.yml
vendored
Normal file
113
.github/workflows/release-docker.yml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
# **what?**
|
||||
# This workflow will generate a series of docker images for dbt and push them to the github container registry
|
||||
|
||||
# **why?**
|
||||
# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. This is how we keep those images up-to-date.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
# **next steps**
|
||||
# - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
|
||||
|
||||
name: Docker release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to release. _One_ of [dbt-core, dbt-redshift, dbt-bigquery, dbt-snowflake, dbt-spark, dbt-postgres]
|
||||
required: true
|
||||
version_number:
|
||||
description: The release version number (i.e. 1.0.0b1). Do not include `latest` tags or a leading `v`!
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
get_version_meta:
|
||||
name: Get version meta
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
major: ${{ steps.version.outputs.major }}
|
||||
minor: ${{ steps.version.outputs.minor }}
|
||||
patch: ${{ steps.version.outputs.patch }}
|
||||
latest: ${{ steps.latest.outputs.latest }}
|
||||
minor_latest: ${{ steps.latest.outputs.minor_latest }}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Split version
|
||||
id: version
|
||||
run: |
|
||||
IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
|
||||
echo "::set-output name=major::$MAJOR"
|
||||
echo "::set-output name=minor::$MINOR"
|
||||
echo "::set-output name=patch::$PATCH"
|
||||
|
||||
- name: Is pkg 'latest'
|
||||
id: latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.version_number }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
halt_on_missing: False
|
||||
|
||||
setup_image_builder:
|
||||
name: Set up docker image builder
|
||||
runs-on: ubuntu-latest
|
||||
needs: [get_version_meta]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
build_and_push:
|
||||
name: Build images and push to GHCR
|
||||
runs-on: ubuntu-latest
|
||||
needs: [setup_image_builder, get_version_meta]
|
||||
steps:
|
||||
- name: Get docker build arg
|
||||
id: build_arg
|
||||
run: |
|
||||
echo "::set-output name=build_arg_name::"$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
|
||||
echo "::set-output name=build_arg_value::"$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
|
||||
|
||||
- name: Log in to the GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push MAJOR.MINOR.PATCH tag
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ github.event.inputs.version_number }}
|
||||
|
||||
- name: Build and push MINOR.latest tag
|
||||
uses: docker/build-push-action@v2
|
||||
if: ${{ needs.get_version_meta.outputs.minor_latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ needs.get_version_meta.outputs.major }}.${{ needs.get_version_meta.outputs.minor }}.latest
|
||||
|
||||
- name: Build and push latest tag
|
||||
uses: docker/build-push-action@v2
|
||||
if: ${{ needs.get_version_meta.outputs.latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest
|
||||
17
.github/workflows/release.yml
vendored
17
.github/workflows/release.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# **what?**
|
||||
# Take the given commit, run unit tests specifically on that sha, build and
|
||||
# Take the given commit, run unit tests specifically on that sha, build and
|
||||
# package it, and then release to GitHub and PyPi with that specific build
|
||||
|
||||
# **why?**
|
||||
@@ -95,7 +95,9 @@ jobs:
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
path: |
|
||||
dist/
|
||||
!dist/dbt-${{github.event.inputs.version_number}}.tar.gz
|
||||
|
||||
test-build:
|
||||
name: verify packages
|
||||
@@ -140,9 +142,8 @@ jobs:
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
|
||||
github-release:
|
||||
name: GitHub Release
|
||||
name: GitHub Release
|
||||
|
||||
needs: test-build
|
||||
|
||||
@@ -153,7 +154,7 @@ jobs:
|
||||
with:
|
||||
name: dist
|
||||
path: '.'
|
||||
|
||||
|
||||
# Need to set an output variable because env variables can't be taken as input
|
||||
# This is needed for the next step with releasing to GitHub
|
||||
- name: Find release type
|
||||
@@ -177,7 +178,7 @@ jobs:
|
||||
dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
|
||||
dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
|
||||
dbt-core-${{github.event.inputs.version_number}}.tar.gz
|
||||
|
||||
|
||||
pypi-release:
|
||||
name: Pypi release
|
||||
|
||||
@@ -186,12 +187,12 @@ jobs:
|
||||
needs: github-release
|
||||
|
||||
environment: PypiProd
|
||||
steps:
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: 'dist'
|
||||
|
||||
|
||||
- name: Publish distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.4.2
|
||||
with:
|
||||
|
||||
14
.github/workflows/schema-check.yml
vendored
14
.github/workflows/schema-check.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# **what?**
|
||||
# Compares the schema of the dbt version of the given ref vs
|
||||
# Compares the schema of the dbt version of the given ref vs
|
||||
# the latest official schema releases found in schemas.getdbt.com.
|
||||
# If there are differences, the workflow will fail and upload the
|
||||
# diff as an artifact. The metadata team should be alerted to the change.
|
||||
@@ -37,20 +37,20 @@ jobs:
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
|
||||
- name: Checkout dbt repo
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
path: ${{ env.DBT_REPO_DIRECTORY }}
|
||||
|
||||
|
||||
- name: Checkout schemas.getdbt.com repo
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: dbt-labs/schemas.getdbt.com
|
||||
ref: 'main'
|
||||
ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
|
||||
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
|
||||
|
||||
- name: Generate current schema
|
||||
run: |
|
||||
cd ${{ env.DBT_REPO_DIRECTORY }}
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
pip install --upgrade pip
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
python scripts/collect-artifact-schema.py --path ${{ env.LATEST_SCHEMA_PATH }}
|
||||
|
||||
|
||||
# Copy generated schema files into the schemas.getdbt.com repo
|
||||
# Do a git diff to find any changes
|
||||
# Ignore any date or version changes though
|
||||
|
||||
73
.github/workflows/structured-logging-schema-check.yml
vendored
Normal file
73
.github/workflows/structured-logging-schema-check.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
# This Action checks makes a dbt run to sample json structured logs
|
||||
# and checks that they conform to the currently documented schema.
|
||||
#
|
||||
# If this action fails it either means we have unintentionally deviated
|
||||
# from our documented structured logging schema, or we need to bump the
|
||||
# version of our structured logging and add new documentation to
|
||||
# communicate these changes.
|
||||
|
||||
name: Structured Logging Schema Check
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
# run the performance measurements on the current or default branch
|
||||
test-schema:
|
||||
name: Test Log Schema
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# turns warnings into errors
|
||||
RUSTFLAGS: "-D warnings"
|
||||
# points tests to the log file
|
||||
LOG_DIR: "/home/runner/work/dbt-core/dbt-core/logs"
|
||||
# tells integration tests to output into json format
|
||||
DBT_LOG_FORMAT: "json"
|
||||
steps:
|
||||
- name: checkout dev
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip --version
|
||||
pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Set up postgres
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: ls
|
||||
run: ls
|
||||
|
||||
# integration tests generate a ton of logs in different files. the next step will find them all.
|
||||
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
|
||||
- name: Run integration tests
|
||||
run: tox -e integration -- -nauto
|
||||
|
||||
# apply our schema tests to every log event from the previous step
|
||||
# skips any output that isn't valid json
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: run
|
||||
args: --manifest-path test/interop/log_parsing/Cargo.toml
|
||||
1
.github/workflows/test/.actrc
vendored
Normal file
1
.github/workflows/test/.actrc
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest
|
||||
1
.github/workflows/test/.gitignore
vendored
Normal file
1
.github/workflows/test/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.secrets
|
||||
1
.github/workflows/test/.secrets.EXAMPLE
vendored
Normal file
1
.github/workflows/test/.secrets.EXAMPLE
vendored
Normal file
@@ -0,0 +1 @@
|
||||
GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE
|
||||
6
.github/workflows/test/inputs/release_docker.json
vendored
Normal file
6
.github/workflows/test/inputs/release_docker.json
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-postgres"
|
||||
}
|
||||
}
|
||||
30
.github/workflows/version-bump.yml
vendored
30
.github/workflows/version-bump.yml
vendored
@@ -1,16 +1,16 @@
|
||||
# **what?**
|
||||
# This workflow will take a version number and a dry run flag. With that
|
||||
# it will run versionbump to update the version number everywhere in the
|
||||
# it will run versionbump to update the version number everywhere in the
|
||||
# code base and then generate an update Docker requirements file. If this
|
||||
# is a dry run, a draft PR will open with the changes. If this isn't a dry
|
||||
# run, the changes will be committed to the branch this is run on.
|
||||
|
||||
# **why?**
|
||||
# This is to aid in releasing dbt and making sure we have updated
|
||||
# This is to aid in releasing dbt and making sure we have updated
|
||||
# the versions and Docker requirements in all places.
|
||||
|
||||
# **when?**
|
||||
# This is triggered either manually OR
|
||||
# This is triggered either manually OR
|
||||
# from the repository_dispatch event "version-bump" which is sent from
|
||||
# the dbt-release repo Action
|
||||
|
||||
@@ -25,10 +25,10 @@ on:
|
||||
is_dry_run:
|
||||
description: 'Creates a draft PR to allow testing instead of committing to a branch'
|
||||
required: true
|
||||
default: 'true'
|
||||
default: 'true'
|
||||
repository_dispatch:
|
||||
types: [version-bump]
|
||||
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -57,26 +57,26 @@ jobs:
|
||||
run: |
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
pip install --upgrade pip
|
||||
|
||||
pip install --upgrade pip
|
||||
|
||||
- name: Create PR branch
|
||||
if: ${{ steps.variables.outputs.IS_DRY_RUN == 'true' }}
|
||||
run: |
|
||||
git checkout -b bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
|
||||
git push origin bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
|
||||
git branch --set-upstream-to=origin/bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
|
||||
|
||||
- name: Generate Docker requirements
|
||||
run: |
|
||||
source env/bin/activate
|
||||
pip install -r requirements.txt
|
||||
pip freeze -l > docker/requirements/requirements.txt
|
||||
git status
|
||||
|
||||
# - name: Generate Docker requirements
|
||||
# run: |
|
||||
# source env/bin/activate
|
||||
# pip install -r requirements.txt
|
||||
# pip freeze -l > docker/requirements/requirements.txt
|
||||
# git status
|
||||
|
||||
- name: Bump version
|
||||
run: |
|
||||
source env/bin/activate
|
||||
pip install -r dev-requirements.txt
|
||||
pip install -r dev-requirements.txt
|
||||
env/bin/bumpversion --allow-dirty --new-version ${{steps.variables.outputs.VERSION_NUMBER}} major
|
||||
git status
|
||||
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -49,9 +49,8 @@ coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
test.env
|
||||
*.pytest_cache/
|
||||
|
||||
# Mypy
|
||||
.mypy_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
@@ -66,10 +65,10 @@ docs/_build/
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
#Ipython Notebook
|
||||
# Ipython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
#Emacs
|
||||
# Emacs
|
||||
*~
|
||||
|
||||
# Sublime Text
|
||||
@@ -78,6 +77,7 @@ target/
|
||||
# Vim
|
||||
*.sw*
|
||||
|
||||
# Pyenv
|
||||
.python-version
|
||||
|
||||
# Vim
|
||||
@@ -90,6 +90,7 @@ venv/
|
||||
# AWS credentials
|
||||
.aws/
|
||||
|
||||
# MacOS
|
||||
.DS_Store
|
||||
|
||||
# vscode
|
||||
|
||||
68
.pre-commit-config.yaml
Normal file
68
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,68 @@
|
||||
# Configuration for pre-commit hooks (see https://pre-commit.com/).
|
||||
# Eventually the hooks described here will be run as tests before merging each PR.
|
||||
|
||||
# TODO: remove global exclusion of tests when testing overhaul is complete
|
||||
exclude: ^test/
|
||||
|
||||
# Force all unspecified python hooks to run python 3.8
|
||||
default_language_version:
|
||||
python: python3.8
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
args: [--unsafe]
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
exclude_types:
|
||||
- "markdown"
|
||||
- id: check-case-conflict
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 21.12b0
|
||||
hooks:
|
||||
- id: black
|
||||
args:
|
||||
- "--line-length=99"
|
||||
- "--target-version=py38"
|
||||
- id: black
|
||||
alias: black-check
|
||||
stages: [manual]
|
||||
args:
|
||||
- "--line-length=99"
|
||||
- "--target-version=py38"
|
||||
- "--check"
|
||||
- "--diff"
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 4.0.1
|
||||
hooks:
|
||||
- id: flake8
|
||||
- id: flake8
|
||||
alias: flake8-check
|
||||
stages: [manual]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.782
|
||||
hooks:
|
||||
- id: mypy
|
||||
# N.B.: Mypy is... a bit fragile.
|
||||
#
|
||||
# By using `language: system` we run this hook in the local
|
||||
# environment instead of a pre-commit isolated one. This is needed
|
||||
# to ensure mypy correctly parses the project.
|
||||
|
||||
# It may cause trouble
|
||||
# in that it adds environmental variables out of our control to the
|
||||
# mix. Unfortunately, there's nothing we can do about per pre-commit's
|
||||
# author.
|
||||
# See https://github.com/pre-commit/pre-commit/issues/730 for details.
|
||||
args: [--show-error-codes]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
- id: mypy
|
||||
alias: mypy-check
|
||||
stages: [manual]
|
||||
args: [--show-error-codes, --pretty]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
@@ -2,18 +2,25 @@ The core function of dbt is SQL compilation and execution. Users create projects
|
||||
|
||||
## dbt-core
|
||||
|
||||
Most of the python code in the repository is within the `core/dbt` directory. Currently the main subdirectories are:
|
||||
Most of the python code in the repository is within the `core/dbt` directory.
|
||||
- [`single python files`](core/dbt/README.md): A number of individual files, such as 'compilation.py' and 'exceptions.py'
|
||||
|
||||
- [`adapters`](core/dbt/adapters): Define base classes for behavior that is likely to differ across databases
|
||||
- [`clients`](core/dbt/clients): Interface with dependencies (agate, jinja) or across operating systems
|
||||
- [`config`](core/dbt/config): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
|
||||
- [`context`](core/dbt/context): Build and expose dbt-specific Jinja functionality
|
||||
- [`contracts`](core/dbt/contracts): Define Python objects (dataclasses) that dbt expects to create and validate
|
||||
- [`deps`](core/dbt/deps): Package installation and dependency resolution
|
||||
- [`graph`](core/dbt/graph): Produce a `networkx` DAG of project resources, and selecting those resources given user-supplied criteria
|
||||
- [`include`](core/dbt/include): The dbt "global project," which defines default implementations of Jinja2 macros
|
||||
- [`parser`](core/dbt/parser): Read project files, validate, construct python objects
|
||||
- [`task`](core/dbt/task): Set forth the actions that dbt can perform when invoked
|
||||
The main subdirectories of core/dbt:
|
||||
- [`adapters`](core/dbt/adapters/README.md): Define base classes for behavior that is likely to differ across databases
|
||||
- [`clients`](core/dbt/clients/README.md): Interface with dependencies (agate, jinja) or across operating systems
|
||||
- [`config`](core/dbt/config/README.md): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
|
||||
- [`context`](core/dbt/context/README.md): Build and expose dbt-specific Jinja functionality
|
||||
- [`contracts`](core/dbt/contracts/README.md): Define Python objects (dataclasses) that dbt expects to create and validate
|
||||
- [`deps`](core/dbt/deps/README.md): Package installation and dependency resolution
|
||||
- [`events`](core/dbt/events/README.md): Logging events
|
||||
- [`graph`](core/dbt/graph/README.md): Produce a `networkx` DAG of project resources, and selecting those resources given user-supplied criteria
|
||||
- [`include`](core/dbt/include/README.md): The dbt "global project," which defines default implementations of Jinja2 macros
|
||||
- [`parser`](core/dbt/parser/README.md): Read project files, validate, construct python objects
|
||||
- [`task`](core/dbt/task/README.md): Set forth the actions that dbt can perform when invoked
|
||||
|
||||
Legacy tests are found in the 'test' directory:
|
||||
- [`unit tests`](core/dbt/test/unit/README.md): Unit tests
|
||||
- [`integration tests`](core/dbt/test/integration/README.md): Integration tests
|
||||
|
||||
### Invoking dbt
|
||||
|
||||
@@ -44,4 +51,4 @@ The [`test/`](test/) subdirectory includes unit and integration tests that run a
|
||||
|
||||
- [docker](docker/): All dbt versions are published as Docker images on DockerHub. This subfolder contains the `Dockerfile` (constant) and `requirements.txt` (one for each version).
|
||||
- [etc](etc/): Images for README
|
||||
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, not are they rigorously tested—they're just handy tools for the dbt maintainers :)
|
||||
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, nor are they rigorously tested—they're just handy tools for the dbt maintainers :)
|
||||
|
||||
3404
CHANGELOG.md
Normal file → Executable file
3404
CHANGELOG.md
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
120
CONTRIBUTING.md
120
CONTRIBUTING.md
@@ -10,7 +10,7 @@
|
||||
|
||||
## About this document
|
||||
|
||||
This document is a guide intended for folks interested in contributing to `dbt`. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using `dbt`, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
|
||||
This document is a guide intended for folks interested in contributing to `dbt-core`. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using `dbt-core`, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
|
||||
|
||||
If you're new to python development or contributing to open-source software, we encourage you to read this document from start to finish. If you get stuck, drop us a line in the `#dbt-core-development` channel on [slack](https://community.getdbt.com).
|
||||
|
||||
@@ -20,101 +20,103 @@ If you have an issue or code change suggestion related to a specific database [a
|
||||
|
||||
### Signing the CLA
|
||||
|
||||
Please note that all contributors to `dbt` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the `dbt` codebase. If you are unable to sign the CLA, then the `dbt` maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
|
||||
Please note that all contributors to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the `dbt-core` codebase. If you are unable to sign the CLA, then the `dbt-core` maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
|
||||
|
||||
## Proposing a change
|
||||
|
||||
`dbt` is Apache 2.0-licensed open source software. `dbt` is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
`dbt-core` is Apache 2.0-licensed open source software. `dbt-core` is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
|
||||
### Defining the problem
|
||||
|
||||
If you have an idea for a new feature or if you've discovered a bug in `dbt`, the first step is to open an issue. Please check the list of [open issues](https://github.com/dbt-labs/dbt-core/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The `dbt` maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
|
||||
If you have an idea for a new feature or if you've discovered a bug in `dbt-core`, the first step is to open an issue. Please check the list of [open issues](https://github.com/dbt-labs/dbt-core/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The `dbt-core` maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
|
||||
|
||||
> **Note:** All community-contributed Pull Requests _must_ be associated with an open issue. If you submit a Pull Request that does not pertain to an open issue, you will be asked to create an issue describing the problem before the Pull Request can be reviewed.
|
||||
|
||||
### Discussing the idea
|
||||
|
||||
After you open an issue, a `dbt` maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The `dbt` maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
|
||||
After you open an issue, a `dbt-core` maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The `dbt-core` maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
|
||||
|
||||
### Submitting a change
|
||||
|
||||
If an issue is appropriately well scoped and describes a beneficial change to the `dbt` codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
|
||||
If an issue is appropriately well scoped and describes a beneficial change to the `dbt-core` codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
|
||||
|
||||
The `dbt` maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/dbt-labs/dbt-core/contribute) page.
|
||||
The `dbt-core` maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/dbt-labs/dbt-core/contribute) page.
|
||||
|
||||
Here's a good workflow:
|
||||
- Comment on the open issue, expressing your interest in contributing the required code change
|
||||
- Outline your planned implementation. If you want help getting started, ask!
|
||||
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the `dbt` maintainers will work with you to review your code.
|
||||
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding `dbt`'s [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
|
||||
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the `dbt-core` maintainers will work with you to review your code.
|
||||
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding `dbt-core`'s [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
|
||||
- Check your formatting and linting with [Flake8](https://flake8.pycqa.org/en/latest/#), [Black](https://github.com/psf/black), and the rest of the hooks we have in our [pre-commit](https://pre-commit.com/) [config](https://github.com/dbt-labs/dbt-core/blob/75201be9db1cb2c6c01fa7e71a314f5e5beb060a/.pre-commit-config.yaml).
|
||||
|
||||
In some cases, the right resolution to an open issue might be tangential to the `dbt` codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the `dbt` maintainers are unwilling or unable to incorporate into the `dbt` codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the `dbt` repository, the issue will be tagged with the `wontfix` label (see below) and closed.
|
||||
In some cases, the right resolution to an open issue might be tangential to the `dbt-core` codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the `dbt-core` maintainers are unwilling or unable to incorporate into the `dbt-core` codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the `dbt-core` repository, the issue will be tagged with the `wontfix` label (see below) and closed.
|
||||
|
||||
### Using issue labels
|
||||
|
||||
The `dbt` maintainers use labels to categorize open issues. Some labels indicate the databases impacted by the issue, while others describe the domain in the `dbt` codebase germane to the discussion. While most of these labels are self-explanatory (eg. `snowflake` or `bigquery`), there are others that are worth describing.
|
||||
The `dbt-core` maintainers use labels to categorize open issues. Most labels describe the domain in the `dbt-core` codebase germane to the discussion.
|
||||
|
||||
| tag | description |
|
||||
| --- | ----------- |
|
||||
| [triage](https://github.com/dbt-labs/dbt-core/labels/triage) | This is a new issue which has not yet been reviewed by a `dbt` maintainer. This label is removed when a maintainer reviews and responds to the issue. |
|
||||
| [bug](https://github.com/dbt-labs/dbt-core/labels/bug) | This issue represents a defect or regression in `dbt` |
|
||||
| [enhancement](https://github.com/dbt-labs/dbt-core/labels/enhancement) | This issue represents net-new functionality in `dbt` |
|
||||
| [good first issue](https://github.com/dbt-labs/dbt-core/labels/good%20first%20issue) | This issue does not require deep knowledge of the `dbt` codebase to implement. This issue is appropriate for a first-time contributor. |
|
||||
| [triage](https://github.com/dbt-labs/dbt-core/labels/triage) | This is a new issue which has not yet been reviewed by a `dbt-core` maintainer. This label is removed when a maintainer reviews and responds to the issue. |
|
||||
| [bug](https://github.com/dbt-labs/dbt-core/labels/bug) | This issue represents a defect or regression in `dbt-core` |
|
||||
| [enhancement](https://github.com/dbt-labs/dbt-core/labels/enhancement) | This issue represents net-new functionality in `dbt-core` |
|
||||
| [good first issue](https://github.com/dbt-labs/dbt-core/labels/good%20first%20issue) | This issue does not require deep knowledge of the `dbt-core` codebase to implement. This issue is appropriate for a first-time contributor. |
|
||||
| [help wanted](https://github.com/dbt-labs/dbt-core/labels/help%20wanted) / [discussion](https://github.com/dbt-labs/dbt-core/labels/discussion) | Conversation around this issue in ongoing, and there isn't yet a clear path forward. Input from community members is most welcome. |
|
||||
| [duplicate](https://github.com/dbt-labs/dbt-core/issues/duplicate) | This issue is functionally identical to another open issue. The `dbt` maintainers will close this issue and encourage community members to focus conversation on the other one. |
|
||||
| [snoozed](https://github.com/dbt-labs/dbt-core/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The `dbt` maintainers will revist these issues periodically and re-prioritize them accordingly. |
|
||||
| [stale](https://github.com/dbt-labs/dbt-core/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by `dbt` maintainers, but they can be re-opened if the discussion is restarted. |
|
||||
| [wontfix](https://github.com/dbt-labs/dbt-core/labels/wontfix) | This issue does not require a code change in the `dbt` repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
|
||||
| [duplicate](https://github.com/dbt-labs/dbt-core/issues/duplicate) | This issue is functionally identical to another open issue. The `dbt-core` maintainers will close this issue and encourage community members to focus conversation on the other one. |
|
||||
| [snoozed](https://github.com/dbt-labs/dbt-core/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The `dbt-core` maintainers will revist these issues periodically and re-prioritize them accordingly. |
|
||||
| [stale](https://github.com/dbt-labs/dbt-core/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by `dbt-core` maintainers, but they can be re-opened if the discussion is restarted. |
|
||||
| [wontfix](https://github.com/dbt-labs/dbt-core/labels/wontfix) | This issue does not require a code change in the `dbt-core` repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
|
||||
|
||||
#### Branching Strategy
|
||||
|
||||
`dbt` has three types of branches:
|
||||
`dbt-core` has three types of branches:
|
||||
|
||||
- **Trunks** are where active development of the next release takes place. There is one trunk named `develop` at the time of writing this, and will be the default branch of the repository.
|
||||
- **Release Branches** track a specific, not yet complete release of `dbt`. Each minor version release has a corresponding release branch. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of `dbt`.
|
||||
- **Trunks** are where active development of the next release takes place. There is one trunk named `main` at the time of writing this, and will be the default branch of the repository.
|
||||
- **Release Branches** track a specific, not yet complete release of `dbt-core`. Each minor version release has a corresponding release branch. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of `dbt-core`.
|
||||
- **Feature Branches** track individual features and fixes. On completion they should be merged into the trunk branch or a specific release branch.
|
||||
|
||||
## Getting the code
|
||||
|
||||
### Installing git
|
||||
|
||||
You will need `git` in order to download and modify the `dbt` source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
|
||||
You will need `git` in order to download and modify the `dbt-core` source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
|
||||
|
||||
### External contributors
|
||||
|
||||
If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt` by forking the `dbt` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
|
||||
If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt-core` by forking the `dbt-core` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
|
||||
|
||||
1. fork the `dbt` repository
|
||||
1. fork the `dbt-core` repository
|
||||
2. clone your fork locally
|
||||
3. check out a new branch for your proposed changes
|
||||
4. push changes to your fork
|
||||
5. open a pull request against `dbt-labs/dbt` from your forked repository
|
||||
|
||||
### Core contributors
|
||||
### dbt Labs contributors
|
||||
|
||||
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt` repo. Rather than forking `dbt` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
|
||||
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
|
||||
|
||||
## Setting up an environment
|
||||
|
||||
There are some tools that will be helpful to you in developing locally. While this is the list relevant for `dbt` development, many of these tools are used commonly across open-source python projects.
|
||||
There are some tools that will be helpful to you in developing locally. While this is the list relevant for `dbt-core` development, many of these tools are used commonly across open-source python projects.
|
||||
|
||||
### Tools
|
||||
|
||||
A short list of tools used in `dbt` testing that will be helpful to your understanding:
|
||||
A short list of tools used in `dbt-core` testing that will be helpful to your understanding:
|
||||
|
||||
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.6, Python 3.7, Python 3.8, and Python 3.9
|
||||
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, Python 3.8, and Python 3.9
|
||||
- [`pytest`](https://docs.pytest.org/en/latest/) to discover/run tests
|
||||
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) - but don't worry too much, nobody _really_ understands how make works and our Makefile is super simple
|
||||
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
|
||||
- [`black`](https://github.com/psf/black) for code formatting
|
||||
- [`mypy`](https://mypy.readthedocs.io/en/stable/) for static type checking
|
||||
- [Github Actions](https://github.com/features/actions)
|
||||
|
||||
A deep understanding of these tools in not required to effectively contribute to `dbt`, but we recommend checking out the attached documentation if you're interested in learning more about them.
|
||||
A deep understanding of these tools in not required to effectively contribute to `dbt-core`, but we recommend checking out the attached documentation if you're interested in learning more about them.
|
||||
|
||||
#### virtual environments
|
||||
|
||||
We strongly recommend using virtual environments when developing code in `dbt`. We recommend creating this virtualenv
|
||||
in the root of the `dbt` repository. To create a new virtualenv, run:
|
||||
We strongly recommend using virtual environments when developing code in `dbt-core`. We recommend creating this virtualenv
|
||||
in the root of the `dbt-core` repository. To create a new virtualenv, run:
|
||||
```sh
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
@@ -135,11 +137,11 @@ For testing, and later in the examples in this document, you may want to have `p
|
||||
brew install postgresql
|
||||
```
|
||||
|
||||
## Running `dbt` in development
|
||||
## Running `dbt-core` in development
|
||||
|
||||
### Installation
|
||||
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt` (and its dependencies) with:
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies) with:
|
||||
|
||||
```sh
|
||||
make dev
|
||||
@@ -147,23 +149,24 @@ make dev
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
```
|
||||
|
||||
When `dbt` is installed this way, any changes you make to the `dbt` source code will be reflected immediately in your next `dbt` run.
|
||||
When `dbt-core` is installed this way, any changes you make to the `dbt-core` source code will be reflected immediately in your next `dbt-core` run.
|
||||
|
||||
### Running `dbt`
|
||||
|
||||
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
|
||||
### Running `dbt-core`
|
||||
|
||||
With your virtualenv activated, the `dbt-core` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
|
||||
|
||||
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local postgres instance, or a specific test sandbox within your data warehouse if appropriate.
|
||||
|
||||
## Testing
|
||||
|
||||
Getting the `dbt` integration tests set up in your local environment will be very helpful as you start to make changes to your local version of `dbt`. The section that follows outlines some helpful tips for setting up the test environment.
|
||||
Getting the `dbt-core` integration tests set up in your local environment will be very helpful as you start to make changes to your local version of `dbt-core`. The section that follows outlines some helpful tips for setting up the test environment.
|
||||
|
||||
Although `dbt` works with a number of different databases, you won't need to supply credentials for every one of these databases in your test environment. Instead you can test all dbt-core code changes with Python and Postgres.
|
||||
Although `dbt-core` works with a number of different databases, you won't need to supply credentials for every one of these databases in your test environment. Instead you can test all dbt-core code changes with Python and Postgres.
|
||||
|
||||
### Initial setup
|
||||
|
||||
We recommend starting with `dbt`'s Postgres tests. These tests cover most of the functionality in `dbt`, are the fastest to run, and are the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
|
||||
We recommend starting with `dbt-core`'s Postgres tests. These tests cover most of the functionality in `dbt-core`, are the fastest to run, and are the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
|
||||
|
||||
```sh
|
||||
make setup-db
|
||||
@@ -174,15 +177,6 @@ docker-compose up -d database
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
```
|
||||
|
||||
`dbt` uses test credentials specified in a `test.env` file in the root of the repository for non-Postgres databases. This `test.env` file is git-ignored, but please be _extra_ careful to never check in credentials or other sensitive information when developing against `dbt`. To create your `test.env` file, copy the provided sample file, then supply your relevant credentials. This step is only required to use non-Postgres databases.
|
||||
|
||||
```
|
||||
cp test.env.sample test.env
|
||||
$EDITOR test.env
|
||||
```
|
||||
|
||||
> In general, it's most important to have successful unit and Postgres tests. Once you open a PR, `dbt` will automatically run integration tests for the other three core database adapters. Of course, if you are a BigQuery user, contributing a BigQuery-only feature, it's important to run BigQuery tests as well.
|
||||
|
||||
### Test commands
|
||||
|
||||
There are a few methods for running tests locally.
|
||||
@@ -198,19 +192,18 @@ make test
|
||||
# Runs postgres integration tests with py38 in "fail fast" mode.
|
||||
make integration
|
||||
```
|
||||
> These make targets assume you have a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) installed locally,
|
||||
> These make targets assume you have a local install of a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) for unit/integration testing and pre-commit for code quality checks,
|
||||
> unless you use choose a Docker container to run tests. Run `make help` for more info.
|
||||
|
||||
Check out the other targets in the Makefile to see other commonly used test
|
||||
suites.
|
||||
|
||||
#### `pre-commit`
|
||||
[`pre-commit`](https.pre-commit.com) takes care of running all code-checks for formatting and linting. Run `make dev` to install `pre-commit` in your local environment. Once this is done you can use any of the linter-based make targets as well as a git pre-commit hook that will ensure proper formatting and linting.
|
||||
|
||||
#### `tox`
|
||||
|
||||
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run
|
||||
tests. You can also run tests in parallel, for example, you can run unit tests
|
||||
for Python 3.6, Python 3.7, Python 3.8, `flake8` checks, and `mypy` checks in
|
||||
parallel with `tox -p`. Also, you can run unit tests for specific python versions
|
||||
with `tox -e py36`. The configuration for these tests in located in `tox.ini`.
|
||||
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, and Python 3.9 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`.
|
||||
|
||||
#### `pytest`
|
||||
|
||||
@@ -226,10 +219,21 @@ python -m pytest test/unit/test_graph.py::GraphTest::test__dependency_list
|
||||
```
|
||||
> [Here](https://docs.pytest.org/en/reorganize-docs/new-docs/user/commandlineuseful.html)
|
||||
> is a list of useful command-line options for `pytest` to use while developing.
|
||||
|
||||
## Adding CHANGELOG Entry
|
||||
|
||||
We use [changie](https://changie.dev) to generate `CHANGELOG` entries. Do not edit the `CHANGELOG.md` directly. Your modifications will be lost.
|
||||
|
||||
Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system.
|
||||
|
||||
Once changie is installed and your PR is created, simply run `changie new` and changie will walk you through the process of creating a changelog entry. Commit the file that's created and your changelog entry is complete!
|
||||
|
||||
## Submitting a Pull Request
|
||||
|
||||
dbt Labs provides a CI environment to test changes to specific adapters, and periodic maintenance checks of `dbt-core` through Github Actions. For example, if you submit a pull request to the `dbt-redshift` repo, GitHub will trigger automated code checks and tests against Redshift.
|
||||
|
||||
A `dbt` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
- First time contributors should note code checks + unit tests require a maintainer to approve.
|
||||
|
||||
Once all tests are passing and your PR has been approved, a `dbt` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
##
|
||||
# This dockerfile is used for local development and adapter testing only.
|
||||
# See `/docker` for a generic and production-ready docker file
|
||||
##
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
85
Makefile
85
Makefile
@@ -8,45 +8,58 @@ endif
|
||||
|
||||
.PHONY: dev
|
||||
dev: ## Installs dbt-* packages in develop mode along with development dependencies.
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
@\
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt && \
|
||||
pre-commit install
|
||||
|
||||
.PHONY: mypy
|
||||
mypy: .env ## Runs mypy for static type checking.
|
||||
$(DOCKER_CMD) tox -e mypy
|
||||
mypy: .env ## Runs mypy against staged changes for static type checking.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual mypy-check | grep -v "INFO"
|
||||
|
||||
.PHONY: flake8
|
||||
flake8: .env ## Runs flake8 to enforce style guide.
|
||||
$(DOCKER_CMD) tox -e flake8
|
||||
flake8: .env ## Runs flake8 against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual flake8-check | grep -v "INFO"
|
||||
|
||||
.PHONY: black
|
||||
black: .env ## Runs black against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual black-check -v | grep -v "INFO"
|
||||
|
||||
.PHONY: lint
|
||||
lint: .env ## Runs all code checks in parallel.
|
||||
$(DOCKER_CMD) tox -p -e flake8,mypy
|
||||
lint: .env ## Runs flake8 and mypy code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: unit
|
||||
unit: .env ## Runs unit tests with py38.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py38
|
||||
|
||||
.PHONY: test
|
||||
test: .env ## Runs unit tests with py38 and code checks in parallel.
|
||||
$(DOCKER_CMD) tox -p -e py38,flake8,mypy
|
||||
test: .env ## Runs unit tests with py38 and code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py38; \
|
||||
$(DOCKER_CMD) pre-commit run black-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: integration
|
||||
integration: .env integration-postgres ## Alias for integration-postgres.
|
||||
integration: .env ## Runs postgres integration tests with py38.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py38-integration -- -nauto
|
||||
|
||||
.PHONY: integration-fail-fast
|
||||
integration-fail-fast: .env integration-postgres-fail-fast ## Alias for integration-postgres-fail-fast.
|
||||
|
||||
.PHONY: integration-postgres
|
||||
integration-postgres: .env ## Runs postgres integration tests with py38.
|
||||
$(DOCKER_CMD) tox -e py38-postgres -- -nauto
|
||||
|
||||
.PHONY: integration-postgres-fail-fast
|
||||
integration-postgres-fail-fast: .env ## Runs postgres integration tests with py38 in "fail fast" mode.
|
||||
$(DOCKER_CMD) tox -e py38-postgres -- -x -nauto
|
||||
integration-fail-fast: .env ## Runs postgres integration tests with py38 in "fail fast" mode.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py38-integration -- -x -nauto
|
||||
|
||||
.PHONY: setup-db
|
||||
setup-db: ## Setup Postgres database with docker-compose for system testing.
|
||||
docker-compose up -d database
|
||||
@\
|
||||
docker-compose up -d database && \
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
|
||||
# This rule creates a file named .env that is used by docker-compose for passing
|
||||
@@ -62,27 +75,29 @@ endif
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Resets development environment.
|
||||
rm -f .coverage
|
||||
rm -rf .eggs/
|
||||
rm -f .env
|
||||
rm -rf .tox/
|
||||
rm -rf build/
|
||||
rm -rf dbt.egg-info/
|
||||
rm -f dbt_project.yml
|
||||
rm -rf dist/
|
||||
rm -f htmlcov/*.{css,html,js,json,png}
|
||||
rm -rf logs/
|
||||
rm -rf target/
|
||||
find . -type f -name '*.pyc' -delete
|
||||
find . -type d -name '__pycache__' -depth -delete
|
||||
@echo 'cleaning repo...'
|
||||
@rm -f .coverage
|
||||
@rm -rf .eggs/
|
||||
@rm -f .env
|
||||
@rm -rf .tox/
|
||||
@rm -rf build/
|
||||
@rm -rf dbt.egg-info/
|
||||
@rm -f dbt_project.yml
|
||||
@rm -rf dist/
|
||||
@rm -f htmlcov/*.{css,html,js,json,png}
|
||||
@rm -rf logs/
|
||||
@rm -rf target/
|
||||
@find . -type f -name '*.pyc' -delete
|
||||
@find . -type d -name '__pycache__' -depth -delete
|
||||
@echo 'done.'
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help message.
|
||||
@echo 'usage: make [target] [USE_DOCKER=true]'
|
||||
@echo
|
||||
@echo 'targets:'
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
@grep -E '^[8+a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
@echo
|
||||
@echo 'options:'
|
||||
@echo 'use USE_DOCKER=true to run target in a docker container'
|
||||
|
||||
|
||||
@@ -3,10 +3,7 @@
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="Unit Tests Badge"/>
|
||||
</a>
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml/badge.svg?event=push" alt="Integration Tests Badge"/>
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
@@ -3,10 +3,7 @@
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="Unit Tests Badge"/>
|
||||
</a>
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml/badge.svg?event=push" alt="Integration Tests Badge"/>
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
51
core/dbt/README.md
Normal file
51
core/dbt/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# core/dbt directory README
|
||||
|
||||
## The following are individual files in this directory.
|
||||
|
||||
### deprecations.py
|
||||
|
||||
### flags.py
|
||||
|
||||
### main.py
|
||||
|
||||
### tracking.py
|
||||
|
||||
### version.py
|
||||
|
||||
### lib.py
|
||||
|
||||
### node_types.py
|
||||
|
||||
### helper_types.py
|
||||
|
||||
### links.py
|
||||
|
||||
### semver.py
|
||||
|
||||
### ui.py
|
||||
|
||||
### compilation.py
|
||||
|
||||
### dataclass_schema.py
|
||||
|
||||
### exceptions.py
|
||||
|
||||
### hooks.py
|
||||
|
||||
### logger.py
|
||||
|
||||
### profiler.py
|
||||
|
||||
### utils.py
|
||||
|
||||
|
||||
## The subdirectories will be documented in a README in the subdirectory
|
||||
* config
|
||||
* include
|
||||
* adapters
|
||||
* context
|
||||
* deps
|
||||
* graph
|
||||
* task
|
||||
* clients
|
||||
* events
|
||||
1
core/dbt/adapters/README.md
Normal file
1
core/dbt/adapters/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Adapters README
|
||||
@@ -8,10 +8,10 @@ from dbt.exceptions import RuntimeException
|
||||
@dataclass
|
||||
class Column:
|
||||
TYPE_LABELS: ClassVar[Dict[str, str]] = {
|
||||
'STRING': 'TEXT',
|
||||
'TIMESTAMP': 'TIMESTAMP',
|
||||
'FLOAT': 'FLOAT',
|
||||
'INTEGER': 'INT'
|
||||
"STRING": "TEXT",
|
||||
"TIMESTAMP": "TIMESTAMP",
|
||||
"FLOAT": "FLOAT",
|
||||
"INTEGER": "INT",
|
||||
}
|
||||
column: str
|
||||
dtype: str
|
||||
@@ -24,7 +24,7 @@ class Column:
|
||||
return cls.TYPE_LABELS.get(dtype.upper(), dtype)
|
||||
|
||||
@classmethod
|
||||
def create(cls, name, label_or_dtype: str) -> 'Column':
|
||||
def create(cls, name, label_or_dtype: str) -> "Column":
|
||||
column_type = cls.translate_type(label_or_dtype)
|
||||
return cls(name, column_type)
|
||||
|
||||
@@ -39,16 +39,14 @@ class Column:
|
||||
@property
|
||||
def data_type(self) -> str:
|
||||
if self.is_string():
|
||||
return Column.string_type(self.string_size())
|
||||
return self.string_type(self.string_size())
|
||||
elif self.is_numeric():
|
||||
return Column.numeric_type(self.dtype, self.numeric_precision,
|
||||
self.numeric_scale)
|
||||
return self.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
|
||||
else:
|
||||
return self.dtype
|
||||
|
||||
def is_string(self) -> bool:
|
||||
return self.dtype.lower() in ['text', 'character varying', 'character',
|
||||
'varchar']
|
||||
return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
|
||||
|
||||
def is_number(self):
|
||||
return any([self.is_integer(), self.is_numeric(), self.is_float()])
|
||||
@@ -56,33 +54,45 @@ class Column:
|
||||
def is_float(self):
|
||||
return self.dtype.lower() in [
|
||||
# floats
|
||||
'real', 'float4', 'float', 'double precision', 'float8'
|
||||
"real",
|
||||
"float4",
|
||||
"float",
|
||||
"double precision",
|
||||
"float8",
|
||||
]
|
||||
|
||||
def is_integer(self) -> bool:
|
||||
return self.dtype.lower() in [
|
||||
# real types
|
||||
'smallint', 'integer', 'bigint',
|
||||
'smallserial', 'serial', 'bigserial',
|
||||
"smallint",
|
||||
"integer",
|
||||
"bigint",
|
||||
"smallserial",
|
||||
"serial",
|
||||
"bigserial",
|
||||
# aliases
|
||||
'int2', 'int4', 'int8',
|
||||
'serial2', 'serial4', 'serial8',
|
||||
"int2",
|
||||
"int4",
|
||||
"int8",
|
||||
"serial2",
|
||||
"serial4",
|
||||
"serial8",
|
||||
]
|
||||
|
||||
def is_numeric(self) -> bool:
|
||||
return self.dtype.lower() in ['numeric', 'decimal']
|
||||
return self.dtype.lower() in ["numeric", "decimal"]
|
||||
|
||||
def string_size(self) -> int:
|
||||
if not self.is_string():
|
||||
raise RuntimeException("Called string_size() on non-string field!")
|
||||
|
||||
if self.dtype == 'text' or self.char_size is None:
|
||||
if self.dtype == "text" or self.char_size is None:
|
||||
# char_size should never be None. Handle it reasonably just in case
|
||||
return 256
|
||||
else:
|
||||
return int(self.char_size)
|
||||
|
||||
def can_expand_to(self, other_column: 'Column') -> bool:
|
||||
def can_expand_to(self, other_column: "Column") -> bool:
|
||||
"""returns True if this column can be expanded to the size of the
|
||||
other column"""
|
||||
if not self.is_string() or not other_column.is_string():
|
||||
@@ -110,12 +120,10 @@ class Column:
|
||||
return "<Column {} ({})>".format(self.name, self.data_type)
|
||||
|
||||
@classmethod
|
||||
def from_description(cls, name: str, raw_data_type: str) -> 'Column':
|
||||
match = re.match(r'([^(]+)(\([^)]+\))?', raw_data_type)
|
||||
def from_description(cls, name: str, raw_data_type: str) -> "Column":
|
||||
match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
|
||||
if match is None:
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data type "{raw_data_type}"'
|
||||
)
|
||||
raise RuntimeException(f'Could not interpret data type "{raw_data_type}"')
|
||||
data_type, size_info = match.groups()
|
||||
char_size = None
|
||||
numeric_precision = None
|
||||
@@ -123,7 +131,7 @@ class Column:
|
||||
if size_info is not None:
|
||||
# strip out the parentheses
|
||||
size_info = size_info[1:-1]
|
||||
parts = size_info.split(',')
|
||||
parts = size_info.split(",")
|
||||
if len(parts) == 1:
|
||||
try:
|
||||
char_size = int(parts[0])
|
||||
@@ -148,6 +156,4 @@ class Column:
|
||||
f'could not convert "{parts[1]}" to an integer'
|
||||
)
|
||||
|
||||
return cls(
|
||||
name, data_type, char_size, numeric_precision, numeric_scale
|
||||
)
|
||||
return cls(name, data_type, char_size, numeric_precision, numeric_scale)
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
import abc
|
||||
import os
|
||||
|
||||
# multiprocessing.RLock is a function returning this type
|
||||
from multiprocessing.synchronize import RLock
|
||||
from threading import get_ident
|
||||
from typing import (
|
||||
Dict, Tuple, Hashable, Optional, ContextManager, List, Union
|
||||
)
|
||||
from typing import Dict, Tuple, Hashable, Optional, ContextManager, List, Union
|
||||
|
||||
import agate
|
||||
|
||||
import dbt.exceptions
|
||||
from dbt.contracts.connection import (
|
||||
Connection, Identifier, ConnectionState,
|
||||
AdapterRequiredConfig, LazyHandle, AdapterResponse
|
||||
Connection,
|
||||
Identifier,
|
||||
ConnectionState,
|
||||
AdapterRequiredConfig,
|
||||
LazyHandle,
|
||||
AdapterResponse,
|
||||
)
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.adapters.base.query_headers import (
|
||||
@@ -27,7 +30,7 @@ from dbt.events.types import (
|
||||
ConnectionClosed,
|
||||
ConnectionClosed2,
|
||||
Rollback,
|
||||
RollbackFailed
|
||||
RollbackFailed,
|
||||
)
|
||||
from dbt import flags
|
||||
|
||||
@@ -45,6 +48,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
You must also set the 'TYPE' class attribute with a class-unique constant
|
||||
string.
|
||||
"""
|
||||
|
||||
TYPE: str = NotImplemented
|
||||
|
||||
def __init__(self, profile: AdapterRequiredConfig):
|
||||
@@ -66,16 +70,14 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
key = self.get_thread_identifier()
|
||||
with self.lock:
|
||||
if key not in self.thread_connections:
|
||||
raise dbt.exceptions.InvalidConnectionException(
|
||||
key, list(self.thread_connections)
|
||||
)
|
||||
raise dbt.exceptions.InvalidConnectionException(key, list(self.thread_connections))
|
||||
return self.thread_connections[key]
|
||||
|
||||
def set_thread_connection(self, conn: Connection) -> None:
|
||||
key = self.get_thread_identifier()
|
||||
if key in self.thread_connections:
|
||||
raise dbt.exceptions.InternalException(
|
||||
'In set_thread_connection, existing connection exists for {}'
|
||||
"In set_thread_connection, existing connection exists for {}"
|
||||
)
|
||||
self.thread_connections[key] = conn
|
||||
|
||||
@@ -115,18 +117,19 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
underlying database.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`exception_handler` is not implemented for this adapter!')
|
||||
"`exception_handler` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
def set_connection_name(self, name: Optional[str] = None) -> Connection:
|
||||
conn_name: str
|
||||
if name is None:
|
||||
# if a name isn't specified, we'll re-use a single handle
|
||||
# named 'master'
|
||||
conn_name = 'master'
|
||||
conn_name = "master"
|
||||
else:
|
||||
if not isinstance(name, str):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f'For connection name, got {name} - not a string!'
|
||||
f"For connection name, got {name} - not a string!"
|
||||
)
|
||||
assert isinstance(name, str)
|
||||
conn_name = name
|
||||
@@ -139,16 +142,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
state=ConnectionState.INIT,
|
||||
transaction_open=False,
|
||||
handle=None,
|
||||
credentials=self.profile.credentials
|
||||
credentials=self.profile.credentials,
|
||||
)
|
||||
self.set_thread_connection(conn)
|
||||
|
||||
if conn.name == conn_name and conn.state == 'open':
|
||||
if conn.name == conn_name and conn.state == "open":
|
||||
return conn
|
||||
|
||||
fire_event(NewConnection(conn_name=conn_name, conn_type=self.TYPE))
|
||||
|
||||
if conn.state == 'open':
|
||||
if conn.state == "open":
|
||||
fire_event(ConnectionReused(conn_name=conn_name))
|
||||
else:
|
||||
conn.handle = LazyHandle(self.open)
|
||||
@@ -160,7 +163,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
def cancel_open(self) -> Optional[List[str]]:
|
||||
"""Cancel all open connections on the adapter. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`cancel_open` is not implemented for this adapter!'
|
||||
"`cancel_open` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractclassmethod
|
||||
@@ -173,9 +176,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
This should be thread-safe, or hold the lock if necessary. The given
|
||||
connection should not be in either in_use or available.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`open` is not implemented for this adapter!'
|
||||
)
|
||||
raise dbt.exceptions.NotImplementedException("`open` is not implemented for this adapter!")
|
||||
|
||||
def release(self) -> None:
|
||||
with self.lock:
|
||||
@@ -195,7 +196,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
def cleanup_all(self) -> None:
|
||||
with self.lock:
|
||||
for connection in self.thread_connections.values():
|
||||
if connection.state not in {'closed', 'init'}:
|
||||
if connection.state not in {"closed", "init"}:
|
||||
fire_event(ConnectionLeftOpen(conn_name=connection.name))
|
||||
else:
|
||||
fire_event(ConnectionClosed(conn_name=connection.name))
|
||||
@@ -208,14 +209,14 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
def begin(self) -> None:
|
||||
"""Begin a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`begin` is not implemented for this adapter!'
|
||||
"`begin` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def commit(self) -> None:
|
||||
"""Commit a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`commit` is not implemented for this adapter!'
|
||||
"`commit` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -230,7 +231,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
def _close_handle(cls, connection: Connection) -> None:
|
||||
"""Perform the actual close operation."""
|
||||
# On windows, sometimes connection handles don't have a close() attr.
|
||||
if hasattr(connection.handle, 'close'):
|
||||
if hasattr(connection.handle, "close"):
|
||||
fire_event(ConnectionClosed2(conn_name=connection.name))
|
||||
connection.handle.close()
|
||||
else:
|
||||
@@ -241,7 +242,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
"""Roll back the given connection."""
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.InternalException(
|
||||
f'Tried to rollback transaction on connection '
|
||||
f"Tried to rollback transaction on connection "
|
||||
f'"{connection.name}", but it does not have one open!'
|
||||
)
|
||||
|
||||
@@ -291,5 +292,5 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
:rtype: Tuple[Union[str, AdapterResponse], agate.Table]
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`execute` is not implemented for this adapter!'
|
||||
"`execute` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,9 +30,11 @@ class _Available:
|
||||
x.update(big_expensive_db_query())
|
||||
return x
|
||||
"""
|
||||
|
||||
def inner(func):
|
||||
func._parse_replacement_ = parse_replacement
|
||||
return self(func)
|
||||
|
||||
return inner
|
||||
|
||||
def deprecated(
|
||||
@@ -57,13 +59,14 @@ class _Available:
|
||||
The optional parse_replacement, if provided, will provide a parse-time
|
||||
replacement for the actual method (see `available.parse`).
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
func_name = func.__name__
|
||||
renamed_method(func_name, supported_name)
|
||||
|
||||
@wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
warn('adapter:{}'.format(func_name))
|
||||
warn("adapter:{}".format(func_name))
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if parse_replacement:
|
||||
@@ -71,6 +74,7 @@ class _Available:
|
||||
else:
|
||||
available_function = self
|
||||
return available_function(inner)
|
||||
|
||||
return wrapper
|
||||
|
||||
def parse_none(self, func: Callable) -> Callable:
|
||||
@@ -95,9 +99,7 @@ class AdapterMeta(abc.ABCMeta):
|
||||
# I'm not sure there is any benefit to it after poking around a bit,
|
||||
# but having it doesn't hurt on the python side (and omitting it could
|
||||
# hurt for obscure metaclass reasons, for all I know)
|
||||
cls = abc.ABCMeta.__new__( # type: ignore
|
||||
mcls, name, bases, namespace, **kwargs
|
||||
)
|
||||
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) # type: ignore
|
||||
|
||||
# this is very much inspired by ABCMeta's own implementation
|
||||
|
||||
@@ -109,14 +111,14 @@ class AdapterMeta(abc.ABCMeta):
|
||||
|
||||
# collect base class data first
|
||||
for base in bases:
|
||||
available.update(getattr(base, '_available_', set()))
|
||||
replacements.update(getattr(base, '_parse_replacements_', set()))
|
||||
available.update(getattr(base, "_available_", set()))
|
||||
replacements.update(getattr(base, "_parse_replacements_", set()))
|
||||
|
||||
# override with local data if it exists
|
||||
for name, value in namespace.items():
|
||||
if getattr(value, '_is_available_', False):
|
||||
if getattr(value, "_is_available_", False):
|
||||
available.add(name)
|
||||
parse_replacement = getattr(value, '_parse_replacement_', None)
|
||||
parse_replacement = getattr(value, "_parse_replacement_", None)
|
||||
if parse_replacement is not None:
|
||||
replacements[name] = parse_replacement
|
||||
|
||||
|
||||
@@ -8,11 +8,10 @@ from dbt.adapters.protocol import AdapterProtocol
|
||||
def project_name_from_path(include_path: str) -> str:
|
||||
# avoid an import cycle
|
||||
from dbt.config.project import Project
|
||||
|
||||
partial = Project.partial_load(include_path)
|
||||
if partial.project_name is None:
|
||||
raise CompilationException(
|
||||
f'Invalid project at {include_path}: name not set!'
|
||||
)
|
||||
raise CompilationException(f"Invalid project at {include_path}: name not set!")
|
||||
return partial.project_name
|
||||
|
||||
|
||||
@@ -23,12 +22,13 @@ class AdapterPlugin:
|
||||
:param dependencies: A list of adapter names that this adapter depends
|
||||
upon.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
adapter: Type[AdapterProtocol],
|
||||
credentials: Type[Credentials],
|
||||
include_path: str,
|
||||
dependencies: Optional[List[str]] = None
|
||||
dependencies: Optional[List[str]] = None,
|
||||
):
|
||||
|
||||
self.adapter: Type[AdapterProtocol] = adapter
|
||||
|
||||
@@ -15,7 +15,7 @@ class NodeWrapper:
|
||||
self._inner_node = node
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._inner_node, name, '')
|
||||
return getattr(self._inner_node, name, "")
|
||||
|
||||
|
||||
class _QueryComment(local):
|
||||
@@ -24,6 +24,7 @@ class _QueryComment(local):
|
||||
- the current thread's query comment.
|
||||
- a source_name indicating what set the current thread's query comment
|
||||
"""
|
||||
|
||||
def __init__(self, initial):
|
||||
self.query_comment: Optional[str] = initial
|
||||
self.append = False
|
||||
@@ -35,21 +36,19 @@ class _QueryComment(local):
|
||||
if self.append:
|
||||
# replace last ';' with '<comment>;'
|
||||
sql = sql.rstrip()
|
||||
if sql[-1] == ';':
|
||||
if sql[-1] == ";":
|
||||
sql = sql[:-1]
|
||||
return '{}\n/* {} */;'.format(sql, self.query_comment.strip())
|
||||
return "{}\n/* {} */;".format(sql, self.query_comment.strip())
|
||||
|
||||
return '{}\n/* {} */'.format(sql, self.query_comment.strip())
|
||||
return "{}\n/* {} */".format(sql, self.query_comment.strip())
|
||||
|
||||
return '/* {} */\n{}'.format(self.query_comment.strip(), sql)
|
||||
return "/* {} */\n{}".format(self.query_comment.strip(), sql)
|
||||
|
||||
def set(self, comment: Optional[str], append: bool):
|
||||
if isinstance(comment, str) and '*/' in comment:
|
||||
if isinstance(comment, str) and "*/" in comment:
|
||||
# tell the user "no" so they don't hurt themselves by writing
|
||||
# garbage
|
||||
raise RuntimeException(
|
||||
f'query comment contains illegal value "*/": {comment}'
|
||||
)
|
||||
raise RuntimeException(f'query comment contains illegal value "*/": {comment}')
|
||||
self.query_comment = comment
|
||||
self.append = append
|
||||
|
||||
@@ -63,15 +62,17 @@ class MacroQueryStringSetter:
|
||||
self.config = config
|
||||
|
||||
comment_macro = self._get_comment_macro()
|
||||
self.generator: QueryStringFunc = lambda name, model: ''
|
||||
self.generator: QueryStringFunc = lambda name, model: ""
|
||||
# if the comment value was None or the empty string, just skip it
|
||||
if comment_macro:
|
||||
assert isinstance(comment_macro, str)
|
||||
macro = '\n'.join((
|
||||
'{%- macro query_comment_macro(connection_name, node) -%}',
|
||||
comment_macro,
|
||||
'{% endmacro %}'
|
||||
))
|
||||
macro = "\n".join(
|
||||
(
|
||||
"{%- macro query_comment_macro(connection_name, node) -%}",
|
||||
comment_macro,
|
||||
"{% endmacro %}",
|
||||
)
|
||||
)
|
||||
ctx = self._get_context()
|
||||
self.generator = QueryStringGenerator(macro, ctx)
|
||||
self.comment = _QueryComment(None)
|
||||
@@ -87,7 +88,7 @@ class MacroQueryStringSetter:
|
||||
return self.comment.add(sql)
|
||||
|
||||
def reset(self):
|
||||
self.set('master', None)
|
||||
self.set("master", None)
|
||||
|
||||
def set(self, name: str, node: Optional[CompileResultNode]):
|
||||
wrapped: Optional[NodeWrapper] = None
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
from collections.abc import Hashable
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
|
||||
)
|
||||
from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
|
||||
|
||||
from dbt.contracts.graph.compiled import CompiledNode
|
||||
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
|
||||
from dbt.contracts.relation import (
|
||||
RelationType, ComponentName, HasQuoting, FakeAPIObject, Policy, Path
|
||||
RelationType,
|
||||
ComponentName,
|
||||
HasQuoting,
|
||||
FakeAPIObject,
|
||||
Policy,
|
||||
Path,
|
||||
)
|
||||
from dbt.exceptions import InternalException
|
||||
from dbt.node_types import NodeType
|
||||
@@ -16,7 +19,7 @@ from dbt.utils import filter_null_values, deep_merge, classproperty
|
||||
import dbt.exceptions
|
||||
|
||||
|
||||
Self = TypeVar('Self', bound='BaseRelation')
|
||||
Self = TypeVar("Self", bound="BaseRelation")
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=False, repr=False)
|
||||
@@ -40,7 +43,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
if field.name == field_name:
|
||||
return field
|
||||
# this should be unreachable
|
||||
raise ValueError(f'BaseRelation has no {field_name} field!')
|
||||
raise ValueError(f"BaseRelation has no {field_name} field!")
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
@@ -49,20 +52,18 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
@classmethod
|
||||
def get_default_quote_policy(cls) -> Policy:
|
||||
return cls._get_field_named('quote_policy').default
|
||||
return cls._get_field_named("quote_policy").default
|
||||
|
||||
@classmethod
|
||||
def get_default_include_policy(cls) -> Policy:
|
||||
return cls._get_field_named('include_policy').default
|
||||
return cls._get_field_named("include_policy").default
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Override `.get` to return a metadata object so we don't break
|
||||
dbt_utils.
|
||||
"""
|
||||
if key == 'metadata':
|
||||
return {
|
||||
'type': self.__class__.__name__
|
||||
}
|
||||
if key == "metadata":
|
||||
return {"type": self.__class__.__name__}
|
||||
return super().get(key, default)
|
||||
|
||||
def matches(
|
||||
@@ -71,16 +72,19 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema: Optional[str] = None,
|
||||
identifier: Optional[str] = None,
|
||||
) -> bool:
|
||||
search = filter_null_values({
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier
|
||||
})
|
||||
search = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
|
||||
if not search:
|
||||
# nothing was passed in
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
"Tried to match relation, but no search path was passed!")
|
||||
"Tried to match relation, but no search path was passed!"
|
||||
)
|
||||
|
||||
exact_match = True
|
||||
approximate_match = True
|
||||
@@ -88,17 +92,13 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
for k, v in search.items():
|
||||
if not self._is_exactish_match(k, v):
|
||||
exact_match = False
|
||||
|
||||
if (
|
||||
self.path.get_lowered_part(k).strip(self.quote_character) !=
|
||||
v.lower().strip(self.quote_character)
|
||||
if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
|
||||
self.quote_character
|
||||
):
|
||||
approximate_match = False
|
||||
approximate_match = False # type: ignore[union-attr]
|
||||
|
||||
if approximate_match and not exact_match:
|
||||
target = self.create(
|
||||
database=database, schema=schema, identifier=identifier
|
||||
)
|
||||
target = self.create(database=database, schema=schema, identifier=identifier)
|
||||
dbt.exceptions.approximate_relation_match(target, self)
|
||||
|
||||
return exact_match
|
||||
@@ -112,11 +112,13 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema: Optional[bool] = None,
|
||||
identifier: Optional[bool] = None,
|
||||
) -> Self:
|
||||
policy = filter_null_values({
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier
|
||||
})
|
||||
policy = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
|
||||
new_quote_policy = self.quote_policy.replace_dict(policy)
|
||||
return self.replace(quote_policy=new_quote_policy)
|
||||
@@ -127,16 +129,18 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema: Optional[bool] = None,
|
||||
identifier: Optional[bool] = None,
|
||||
) -> Self:
|
||||
policy = filter_null_values({
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier
|
||||
})
|
||||
policy = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
|
||||
new_include_policy = self.include_policy.replace_dict(policy)
|
||||
return self.replace(include_policy=new_include_policy)
|
||||
|
||||
def information_schema(self, view_name=None) -> 'InformationSchema':
|
||||
def information_schema(self, view_name=None) -> "InformationSchema":
|
||||
# some of our data comes from jinja, where things can be `Undefined`.
|
||||
if not isinstance(view_name, str):
|
||||
view_name = None
|
||||
@@ -146,10 +150,10 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
info_schema = InformationSchema.from_relation(self, view_name)
|
||||
return info_schema.incorporate(path={"schema": None})
|
||||
|
||||
def information_schema_only(self) -> 'InformationSchema':
|
||||
def information_schema_only(self) -> "InformationSchema":
|
||||
return self.information_schema()
|
||||
|
||||
def without_identifier(self) -> 'BaseRelation':
|
||||
def without_identifier(self) -> "BaseRelation":
|
||||
"""Return a form of this relation that only has the database and schema
|
||||
set to included. To get the appropriately-quoted form the schema out of
|
||||
the result (for use as part of a query), use `.render()`. To get the
|
||||
@@ -159,9 +163,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
"""
|
||||
return self.include(identifier=False).replace_path(identifier=None)
|
||||
|
||||
def _render_iterator(
|
||||
self
|
||||
) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
|
||||
for key in ComponentName:
|
||||
path_part: Optional[str] = None
|
||||
@@ -173,27 +175,22 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
def render(self) -> str:
|
||||
# if there is nothing set, this will return the empty string.
|
||||
return '.'.join(
|
||||
part for _, part in self._render_iterator()
|
||||
if part is not None
|
||||
)
|
||||
return ".".join(part for _, part in self._render_iterator() if part is not None)
|
||||
|
||||
def quoted(self, identifier):
|
||||
return '{quote_char}{identifier}{quote_char}'.format(
|
||||
return "{quote_char}{identifier}{quote_char}".format(
|
||||
quote_char=self.quote_character,
|
||||
identifier=identifier,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_source(
|
||||
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
|
||||
) -> Self:
|
||||
def create_from_source(cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any) -> Self:
|
||||
source_quoting = source.quoting.to_dict(omit_none=True)
|
||||
source_quoting.pop('column', None)
|
||||
source_quoting.pop("column", None)
|
||||
quote_policy = deep_merge(
|
||||
cls.get_default_quote_policy().to_dict(omit_none=True),
|
||||
source_quoting,
|
||||
kwargs.get('quote_policy', {}),
|
||||
kwargs.get("quote_policy", {}),
|
||||
)
|
||||
|
||||
return cls.create(
|
||||
@@ -201,12 +198,12 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema=source.schema,
|
||||
identifier=source.identifier,
|
||||
quote_policy=quote_policy,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def add_ephemeral_prefix(name: str):
|
||||
return f'__dbt__cte__{name}'
|
||||
return f"__dbt__cte__{name}"
|
||||
|
||||
@classmethod
|
||||
def create_ephemeral_from_node(
|
||||
@@ -239,7 +236,8 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema=node.schema,
|
||||
identifier=node.alias,
|
||||
quote_policy=quote_policy,
|
||||
**kwargs)
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from(
|
||||
@@ -251,15 +249,14 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
if node.resource_type == NodeType.Source:
|
||||
if not isinstance(node, ParsedSourceDefinition):
|
||||
raise InternalException(
|
||||
'type mismatch, expected ParsedSourceDefinition but got {}'
|
||||
.format(type(node))
|
||||
"type mismatch, expected ParsedSourceDefinition but got {}".format(type(node))
|
||||
)
|
||||
return cls.create_from_source(node, **kwargs)
|
||||
else:
|
||||
if not isinstance(node, (ParsedNode, CompiledNode)):
|
||||
raise InternalException(
|
||||
'type mismatch, expected ParsedNode or CompiledNode but '
|
||||
'got {}'.format(type(node))
|
||||
"type mismatch, expected ParsedNode or CompiledNode but "
|
||||
"got {}".format(type(node))
|
||||
)
|
||||
return cls.create_from_node(config, node, **kwargs)
|
||||
|
||||
@@ -272,14 +269,16 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
type: Optional[RelationType] = None,
|
||||
**kwargs,
|
||||
) -> Self:
|
||||
kwargs.update({
|
||||
'path': {
|
||||
'database': database,
|
||||
'schema': schema,
|
||||
'identifier': identifier,
|
||||
},
|
||||
'type': type,
|
||||
})
|
||||
kwargs.update(
|
||||
{
|
||||
"path": {
|
||||
"database": database,
|
||||
"schema": schema,
|
||||
"identifier": identifier,
|
||||
},
|
||||
"type": type,
|
||||
}
|
||||
)
|
||||
return cls.from_dict(kwargs)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
@@ -345,7 +344,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
return RelationType
|
||||
|
||||
|
||||
Info = TypeVar('Info', bound='InformationSchema')
|
||||
Info = TypeVar("Info", bound="InformationSchema")
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=False, repr=False)
|
||||
@@ -355,17 +354,15 @@ class InformationSchema(BaseRelation):
|
||||
def __post_init__(self):
|
||||
if not isinstance(self.information_schema_view, (type(None), str)):
|
||||
raise dbt.exceptions.CompilationException(
|
||||
'Got an invalid name: {}'.format(self.information_schema_view)
|
||||
"Got an invalid name: {}".format(self.information_schema_view)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_path(
|
||||
cls, relation: BaseRelation, information_schema_view: Optional[str]
|
||||
) -> Path:
|
||||
def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
|
||||
return Path(
|
||||
database=relation.database,
|
||||
schema=relation.schema,
|
||||
identifier='INFORMATION_SCHEMA',
|
||||
identifier="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -396,9 +393,7 @@ class InformationSchema(BaseRelation):
|
||||
relation: BaseRelation,
|
||||
information_schema_view: Optional[str],
|
||||
) -> Info:
|
||||
include_policy = cls.get_include_policy(
|
||||
relation, information_schema_view
|
||||
)
|
||||
include_policy = cls.get_include_policy(relation, information_schema_view)
|
||||
quote_policy = cls.get_quote_policy(relation, information_schema_view)
|
||||
path = cls.get_path(relation, information_schema_view)
|
||||
return cls(
|
||||
@@ -420,6 +415,7 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
search for what schemas. The schema values are all lowercased to avoid
|
||||
duplication.
|
||||
"""
|
||||
|
||||
def add(self, relation: BaseRelation):
|
||||
key = relation.information_schema_only()
|
||||
if key not in self:
|
||||
@@ -429,9 +425,7 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
schema = relation.schema.lower()
|
||||
self[key].add(schema)
|
||||
|
||||
def search(
|
||||
self
|
||||
) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
|
||||
def search(self) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
|
||||
for information_schema_name, schemas in self.items():
|
||||
for schema in schemas:
|
||||
yield information_schema_name, schema
|
||||
@@ -446,14 +440,13 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
dbt.exceptions.raise_compiler_error(str(seen))
|
||||
|
||||
for information_schema_name, schema in self.search():
|
||||
path = {
|
||||
'database': information_schema_name.database,
|
||||
'schema': schema
|
||||
}
|
||||
new.add(information_schema_name.incorporate(
|
||||
path=path,
|
||||
quote_policy={'database': False},
|
||||
include_policy={'database': False},
|
||||
))
|
||||
path = {"database": information_schema_name.database, "schema": schema}
|
||||
new.add(
|
||||
information_schema_name.incorporate(
|
||||
path=path,
|
||||
quote_policy={"database": False},
|
||||
include_policy={"database": False},
|
||||
)
|
||||
)
|
||||
|
||||
return new
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from dbt.adapters.reference_keys import _make_key, _ReferenceKey
|
||||
import dbt.exceptions
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
@@ -18,21 +18,10 @@ from dbt.events.types import (
|
||||
RenameSchema,
|
||||
TemporaryRelation,
|
||||
UncachedRelation,
|
||||
UpdateReference
|
||||
UpdateReference,
|
||||
)
|
||||
from dbt.utils import lowercase
|
||||
|
||||
_ReferenceKey = namedtuple('_ReferenceKey', 'database schema identifier')
|
||||
|
||||
|
||||
def _make_key(relation) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(lowercase(relation.database),
|
||||
lowercase(relation.schema),
|
||||
lowercase(relation.identifier))
|
||||
from dbt.helper_types import Lazy
|
||||
|
||||
|
||||
def dot_separated(key: _ReferenceKey) -> str:
|
||||
@@ -40,7 +29,7 @@ def dot_separated(key: _ReferenceKey) -> str:
|
||||
|
||||
:param _ReferenceKey key: The key to stringify.
|
||||
"""
|
||||
return '.'.join(map(str, key))
|
||||
return ".".join(map(str, key))
|
||||
|
||||
|
||||
class _CachedRelation:
|
||||
@@ -52,14 +41,15 @@ class _CachedRelation:
|
||||
that refer to this relation.
|
||||
:attr BaseRelation inner: The underlying dbt relation.
|
||||
"""
|
||||
|
||||
def __init__(self, inner):
|
||||
self.referenced_by = {}
|
||||
self.inner = inner
|
||||
|
||||
def __str__(self) -> str:
|
||||
return (
|
||||
'_CachedRelation(database={}, schema={}, identifier={}, inner={})'
|
||||
).format(self.database, self.schema, self.identifier, self.inner)
|
||||
return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
|
||||
self.database, self.schema, self.identifier, self.inner
|
||||
)
|
||||
|
||||
@property
|
||||
def database(self) -> Optional[str]:
|
||||
@@ -93,7 +83,7 @@ class _CachedRelation:
|
||||
"""
|
||||
return _make_key(self)
|
||||
|
||||
def add_reference(self, referrer: '_CachedRelation'):
|
||||
def add_reference(self, referrer: "_CachedRelation"):
|
||||
"""Add a reference from referrer to self, indicating that if this node
|
||||
were drop...cascaded, the referrer would be dropped as well.
|
||||
|
||||
@@ -137,9 +127,9 @@ class _CachedRelation:
|
||||
# table_name is ever anything but the identifier (via .create())
|
||||
self.inner = self.inner.incorporate(
|
||||
path={
|
||||
'database': new_relation.inner.database,
|
||||
'schema': new_relation.inner.schema,
|
||||
'identifier': new_relation.inner.identifier
|
||||
"database": new_relation.inner.database,
|
||||
"schema": new_relation.inner.schema,
|
||||
"identifier": new_relation.inner.identifier,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -155,8 +145,9 @@ class _CachedRelation:
|
||||
"""
|
||||
if new_key in self.referenced_by:
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in rename of "{}" -> "{}", new name is in the cache already'
|
||||
.format(old_key, new_key)
|
||||
'in rename of "{}" -> "{}", new name is in the cache already'.format(
|
||||
old_key, new_key
|
||||
)
|
||||
)
|
||||
|
||||
if old_key not in self.referenced_by:
|
||||
@@ -181,13 +172,16 @@ class RelationsCache:
|
||||
The adapters also hold this lock while filling the cache.
|
||||
:attr Set[str] schemas: The set of known/cached schemas, all lowercased.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.relations: Dict[_ReferenceKey, _CachedRelation] = {}
|
||||
self.lock = threading.RLock()
|
||||
self.schemas: Set[Tuple[Optional[str], Optional[str]]] = set()
|
||||
|
||||
def add_schema(
|
||||
self, database: Optional[str], schema: Optional[str],
|
||||
self,
|
||||
database: Optional[str],
|
||||
schema: Optional[str],
|
||||
) -> None:
|
||||
"""Add a schema to the set of known schemas (case-insensitive)
|
||||
|
||||
@@ -197,7 +191,9 @@ class RelationsCache:
|
||||
self.schemas.add((lowercase(database), lowercase(schema)))
|
||||
|
||||
def drop_schema(
|
||||
self, database: Optional[str], schema: Optional[str],
|
||||
self,
|
||||
database: Optional[str],
|
||||
schema: Optional[str],
|
||||
) -> None:
|
||||
"""Drop the given schema and remove it from the set of known schemas.
|
||||
|
||||
@@ -241,10 +237,7 @@ class RelationsCache:
|
||||
# self.relations or any cache entry's referenced_by during iteration
|
||||
# it's a runtime error!
|
||||
with self.lock:
|
||||
return {
|
||||
dot_separated(k): v.dump_graph_entry()
|
||||
for k, v in self.relations.items()
|
||||
}
|
||||
return {dot_separated(k): v.dump_graph_entry() for k, v in self.relations.items()}
|
||||
|
||||
def _setdefault(self, relation: _CachedRelation):
|
||||
"""Add a relation to the cache, or return it if it already exists.
|
||||
@@ -272,15 +265,13 @@ class RelationsCache:
|
||||
return
|
||||
if referenced is None:
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in add_link, referenced link key {} not in cache!'
|
||||
.format(referenced_key)
|
||||
"in add_link, referenced link key {} not in cache!".format(referenced_key)
|
||||
)
|
||||
|
||||
dependent = self.relations.get(dependent_key)
|
||||
if dependent is None:
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in add_link, dependent link key {} not in cache!'
|
||||
.format(dependent_key)
|
||||
"in add_link, dependent link key {} not in cache!".format(dependent_key)
|
||||
)
|
||||
|
||||
assert dependent is not None # we just raised!
|
||||
@@ -303,25 +294,20 @@ class RelationsCache:
|
||||
:raises InternalError: If either entry does not exist.
|
||||
"""
|
||||
ref_key = _make_key(referenced)
|
||||
dep_key = _make_key(dependent)
|
||||
if (ref_key.database, ref_key.schema) not in self:
|
||||
# if we have not cached the referenced schema at all, we must be
|
||||
# referring to a table outside our control. There's no need to make
|
||||
# a link - we will never drop the referenced relation during a run.
|
||||
fire_event(UncachedRelation(dep_key=dependent, ref_key=ref_key))
|
||||
fire_event(UncachedRelation(dep_key=dep_key, ref_key=ref_key))
|
||||
return
|
||||
if ref_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
referenced = referenced.replace(
|
||||
type=referenced.External
|
||||
)
|
||||
referenced = referenced.replace(type=referenced.External)
|
||||
self.add(referenced)
|
||||
|
||||
dep_key = _make_key(dependent)
|
||||
if dep_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
dependent = dependent.replace(
|
||||
type=referenced.External
|
||||
)
|
||||
dependent = dependent.replace(type=referenced.External)
|
||||
self.add(dependent)
|
||||
fire_event(AddLink(dep_key=dep_key, ref_key=ref_key))
|
||||
with self.lock:
|
||||
@@ -334,12 +320,12 @@ class RelationsCache:
|
||||
:param BaseRelation relation: The underlying relation.
|
||||
"""
|
||||
cached = _CachedRelation(relation)
|
||||
fire_event(AddRelation(relation=cached))
|
||||
fire_event(DumpBeforeAddGraph(dump=self.dump_graph()))
|
||||
fire_event(AddRelation(relation=_make_key(cached)))
|
||||
fire_event(DumpBeforeAddGraph(dump=Lazy.defer(lambda: self.dump_graph())))
|
||||
|
||||
with self.lock:
|
||||
self._setdefault(cached)
|
||||
fire_event(DumpAfterAddGraph(dump=self.dump_graph()))
|
||||
fire_event(DumpAfterAddGraph(dump=Lazy.defer(lambda: self.dump_graph())))
|
||||
|
||||
def _remove_refs(self, keys):
|
||||
"""Removes all references to all entries in keys. This does not
|
||||
@@ -354,17 +340,17 @@ class RelationsCache:
|
||||
for cached in self.relations.values():
|
||||
cached.release_references(keys)
|
||||
|
||||
def _drop_cascade_relation(self, dropped):
|
||||
def _drop_cascade_relation(self, dropped_key):
|
||||
"""Drop the given relation and cascade it appropriately to all
|
||||
dependent relations.
|
||||
|
||||
:param _CachedRelation dropped: An existing _CachedRelation to drop.
|
||||
"""
|
||||
if dropped not in self.relations:
|
||||
fire_event(DropMissingRelation(relation=dropped))
|
||||
if dropped_key not in self.relations:
|
||||
fire_event(DropMissingRelation(relation=dropped_key))
|
||||
return
|
||||
consequences = self.relations[dropped].collect_consequences()
|
||||
fire_event(DropCascade(dropped=dropped, consequences=consequences))
|
||||
consequences = self.relations[dropped_key].collect_consequences()
|
||||
fire_event(DropCascade(dropped=dropped_key, consequences=consequences))
|
||||
self._remove_refs(consequences)
|
||||
|
||||
def drop(self, relation):
|
||||
@@ -378,10 +364,10 @@ class RelationsCache:
|
||||
:param str schema: The schema of the relation to drop.
|
||||
:param str identifier: The identifier of the relation to drop.
|
||||
"""
|
||||
dropped = _make_key(relation)
|
||||
fire_event(DropRelation(dropped=dropped))
|
||||
dropped_key = _make_key(relation)
|
||||
fire_event(DropRelation(dropped=dropped_key))
|
||||
with self.lock:
|
||||
self._drop_cascade_relation(dropped)
|
||||
self._drop_cascade_relation(dropped_key)
|
||||
|
||||
def _rename_relation(self, old_key, new_relation):
|
||||
"""Rename a relation named old_key to new_key, updating references.
|
||||
@@ -428,8 +414,9 @@ class RelationsCache:
|
||||
"""
|
||||
if new_key in self.relations:
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in rename, new key {} already in cache: {}'
|
||||
.format(new_key, list(self.relations.keys()))
|
||||
"in rename, new key {} already in cache: {}".format(
|
||||
new_key, list(self.relations.keys())
|
||||
)
|
||||
)
|
||||
|
||||
if old_key not in self.relations:
|
||||
@@ -453,7 +440,7 @@ class RelationsCache:
|
||||
new_key = _make_key(new)
|
||||
fire_event(RenameSchema(old_key=old_key, new_key=new_key))
|
||||
|
||||
fire_event(DumpBeforeRenameSchema(dump=self.dump_graph()))
|
||||
fire_event(DumpBeforeRenameSchema(dump=Lazy.defer(lambda: self.dump_graph())))
|
||||
|
||||
with self.lock:
|
||||
if self._check_rename_constraints(old_key, new_key):
|
||||
@@ -461,11 +448,9 @@ class RelationsCache:
|
||||
else:
|
||||
self._setdefault(_CachedRelation(new))
|
||||
|
||||
fire_event(DumpAfterRenameSchema(dump=self.dump_graph()))
|
||||
fire_event(DumpAfterRenameSchema(dump=Lazy.defer(lambda: self.dump_graph())))
|
||||
|
||||
def get_relations(
|
||||
self, database: Optional[str], schema: Optional[str]
|
||||
) -> List[Any]:
|
||||
def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
|
||||
"""Case-insensitively yield all relations matching the given schema.
|
||||
|
||||
:param str schema: The case-insensitive schema name to list from.
|
||||
@@ -476,14 +461,14 @@ class RelationsCache:
|
||||
schema = lowercase(schema)
|
||||
with self.lock:
|
||||
results = [
|
||||
r.inner for r in self.relations.values()
|
||||
if (lowercase(r.schema) == schema and
|
||||
lowercase(r.database) == database)
|
||||
r.inner
|
||||
for r in self.relations.values()
|
||||
if (lowercase(r.schema) == schema and lowercase(r.database) == database)
|
||||
]
|
||||
|
||||
if None in results:
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in get_relations, a None relation was found in the cache!'
|
||||
"in get_relations, a None relation was found in the cache!"
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
@@ -49,9 +49,7 @@ class AdapterContainer:
|
||||
adapter = self.get_adapter_class_by_name(name)
|
||||
return adapter.Relation
|
||||
|
||||
def get_config_class_by_name(
|
||||
self, name: str
|
||||
) -> Type[AdapterConfig]:
|
||||
def get_config_class_by_name(self, name: str) -> Type[AdapterConfig]:
|
||||
adapter = self.get_adapter_class_by_name(name)
|
||||
return adapter.AdapterSpecificConfigs
|
||||
|
||||
@@ -61,13 +59,13 @@ class AdapterContainer:
|
||||
# singletons
|
||||
try:
|
||||
# mypy doesn't think modules have any attributes.
|
||||
mod: Any = import_module('.' + name, 'dbt.adapters')
|
||||
mod: Any = import_module("." + name, "dbt.adapters")
|
||||
except ModuleNotFoundError as exc:
|
||||
# if we failed to import the target module in particular, inform
|
||||
# the user about it via a runtime error
|
||||
if exc.name == 'dbt.adapters.' + name:
|
||||
if exc.name == "dbt.adapters." + name:
|
||||
fire_event(AdapterImportError(exc=exc))
|
||||
raise RuntimeException(f'Could not find adapter type {name}!')
|
||||
raise RuntimeException(f"Could not find adapter type {name}!")
|
||||
# otherwise, the error had to have come from some underlying
|
||||
# library. Log the stack trace.
|
||||
|
||||
@@ -78,8 +76,8 @@ class AdapterContainer:
|
||||
|
||||
if plugin_type != name:
|
||||
raise RuntimeException(
|
||||
f'Expected to find adapter with type named {name}, got '
|
||||
f'adapter with type {plugin_type}'
|
||||
f"Expected to find adapter with type named {name}, got "
|
||||
f"adapter with type {plugin_type}"
|
||||
)
|
||||
|
||||
with self.lock:
|
||||
@@ -109,8 +107,7 @@ class AdapterContainer:
|
||||
return self.adapters[adapter_name]
|
||||
|
||||
def reset_adapters(self):
|
||||
"""Clear the adapters. This is useful for tests, which change configs.
|
||||
"""
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
with self.lock:
|
||||
for adapter in self.adapters.values():
|
||||
adapter.cleanup_connections()
|
||||
@@ -140,9 +137,7 @@ class AdapterContainer:
|
||||
try:
|
||||
plugin = self.plugins[plugin_name]
|
||||
except KeyError:
|
||||
raise InternalException(
|
||||
f'No plugin found for {plugin_name}'
|
||||
) from None
|
||||
raise InternalException(f"No plugin found for {plugin_name}") from None
|
||||
plugins.append(plugin)
|
||||
seen.add(plugin_name)
|
||||
if plugin.dependencies is None:
|
||||
@@ -153,9 +148,7 @@ class AdapterContainer:
|
||||
return plugins
|
||||
|
||||
def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
|
||||
package_names: List[str] = [
|
||||
p.project_name for p in self.get_adapter_plugins(name)
|
||||
]
|
||||
package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
|
||||
package_names.append(GLOBAL_PROJECT_NAME)
|
||||
return package_names
|
||||
|
||||
@@ -165,9 +158,7 @@ class AdapterContainer:
|
||||
try:
|
||||
path = self.packages[package_name]
|
||||
except KeyError:
|
||||
raise InternalException(
|
||||
f'No internal package listing found for {package_name}'
|
||||
)
|
||||
raise InternalException(f"No internal package listing found for {package_name}")
|
||||
paths.append(path)
|
||||
return paths
|
||||
|
||||
@@ -186,9 +177,12 @@ def get_adapter(config: AdapterRequiredConfig):
|
||||
return FACTORY.lookup_adapter(config.credentials.type)
|
||||
|
||||
|
||||
def get_adapter_by_type(adapter_type):
|
||||
return FACTORY.lookup_adapter(adapter_type)
|
||||
|
||||
|
||||
def reset_adapters():
|
||||
"""Clear the adapters. This is useful for tests, which change configs.
|
||||
"""
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
FACTORY.reset_adapters()
|
||||
|
||||
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Type, Hashable, Optional, ContextManager, List, Generic, TypeVar, ClassVar,
|
||||
Tuple, Union, Dict, Any
|
||||
Type,
|
||||
Hashable,
|
||||
Optional,
|
||||
ContextManager,
|
||||
List,
|
||||
Generic,
|
||||
TypeVar,
|
||||
ClassVar,
|
||||
Tuple,
|
||||
Union,
|
||||
Dict,
|
||||
Any,
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
|
||||
import agate
|
||||
|
||||
from dbt.contracts.connection import (
|
||||
Connection, AdapterRequiredConfig, AdapterResponse
|
||||
)
|
||||
from dbt.contracts.graph.compiled import (
|
||||
CompiledNode, ManifestNode, NonSourceCompiledNode
|
||||
)
|
||||
from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
|
||||
from dbt.contracts.graph.compiled import CompiledNode, ManifestNode, NonSourceCompiledNode
|
||||
from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition
|
||||
from dbt.contracts.graph.model_config import BaseConfig
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
@@ -34,7 +40,7 @@ class ColumnProtocol(Protocol):
|
||||
pass
|
||||
|
||||
|
||||
Self = TypeVar('Self', bound='RelationProtocol')
|
||||
Self = TypeVar("Self", bound="RelationProtocol")
|
||||
|
||||
|
||||
class RelationProtocol(Protocol):
|
||||
@@ -64,22 +70,15 @@ class CompilerProtocol(Protocol):
|
||||
...
|
||||
|
||||
|
||||
AdapterConfig_T = TypeVar(
|
||||
'AdapterConfig_T', bound=AdapterConfig
|
||||
)
|
||||
ConnectionManager_T = TypeVar(
|
||||
'ConnectionManager_T', bound=ConnectionManagerProtocol
|
||||
)
|
||||
Relation_T = TypeVar(
|
||||
'Relation_T', bound=RelationProtocol
|
||||
)
|
||||
Column_T = TypeVar(
|
||||
'Column_T', bound=ColumnProtocol
|
||||
)
|
||||
Compiler_T = TypeVar('Compiler_T', bound=CompilerProtocol)
|
||||
AdapterConfig_T = TypeVar("AdapterConfig_T", bound=AdapterConfig)
|
||||
ConnectionManager_T = TypeVar("ConnectionManager_T", bound=ConnectionManagerProtocol)
|
||||
Relation_T = TypeVar("Relation_T", bound=RelationProtocol)
|
||||
Column_T = TypeVar("Column_T", bound=ColumnProtocol)
|
||||
Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
|
||||
|
||||
|
||||
class AdapterProtocol(
|
||||
# TODO CT-211
|
||||
class AdapterProtocol( # type: ignore[misc]
|
||||
Protocol,
|
||||
Generic[
|
||||
AdapterConfig_T,
|
||||
@@ -87,7 +86,7 @@ class AdapterProtocol(
|
||||
Relation_T,
|
||||
Column_T,
|
||||
Compiler_T,
|
||||
]
|
||||
],
|
||||
):
|
||||
AdapterSpecificConfigs: ClassVar[Type[AdapterConfig_T]]
|
||||
Column: ClassVar[Type[Column_T]]
|
||||
|
||||
24
core/dbt/adapters/reference_keys.py
Normal file
24
core/dbt/adapters/reference_keys.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# this module exists to resolve circular imports with the events module
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Optional
|
||||
|
||||
|
||||
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
|
||||
|
||||
|
||||
def lowercase(value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return value.lower()
|
||||
|
||||
|
||||
def _make_key(relation) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(
|
||||
lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
|
||||
)
|
||||
@@ -7,9 +7,7 @@ import agate
|
||||
import dbt.clients.agate_helper
|
||||
import dbt.exceptions
|
||||
from dbt.adapters.base import BaseConnectionManager
|
||||
from dbt.contracts.connection import (
|
||||
Connection, ConnectionState, AdapterResponse
|
||||
)
|
||||
from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus
|
||||
|
||||
@@ -23,11 +21,12 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
- get_response
|
||||
- open
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel(self, connection: Connection):
|
||||
"""Cancel the given connection."""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`cancel` is not implemented for this adapter!'
|
||||
"`cancel` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
def cancel_open(self) -> List[str]:
|
||||
@@ -40,10 +39,7 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
# if the connection failed, the handle will be None so we have
|
||||
# nothing to cancel.
|
||||
if (
|
||||
connection.handle is not None and
|
||||
connection.state == ConnectionState.OPEN
|
||||
):
|
||||
if connection.handle is not None and connection.state == ConnectionState.OPEN:
|
||||
self.cancel(connection)
|
||||
if connection.name is not None:
|
||||
names.append(connection.name)
|
||||
@@ -54,7 +50,7 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
sql: str,
|
||||
auto_begin: bool = True,
|
||||
bindings: Optional[Any] = None,
|
||||
abridge_sql_log: bool = False
|
||||
abridge_sql_log: bool = False,
|
||||
) -> Tuple[Connection, Any]:
|
||||
connection = self.get_thread_connection()
|
||||
if auto_begin and connection.transaction_open is False:
|
||||
@@ -63,7 +59,7 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
with self.exception_handler(sql):
|
||||
if abridge_sql_log:
|
||||
log_sql = '{}...'.format(sql[:512])
|
||||
log_sql = "{}...".format(sql[:512])
|
||||
else:
|
||||
log_sql = sql
|
||||
|
||||
@@ -85,23 +81,26 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
def get_response(cls, cursor: Any) -> Union[AdapterResponse, str]:
|
||||
"""Get the status of the cursor."""
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`get_response` is not implemented for this adapter!'
|
||||
"`get_response` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def process_results(
|
||||
cls,
|
||||
column_names: Iterable[str],
|
||||
rows: Iterable[Any]
|
||||
cls, column_names: Iterable[str], rows: Iterable[Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
unique_col_names = dict()
|
||||
for idx in range(len(column_names)):
|
||||
col_name = column_names[idx]
|
||||
# TODO CT-211
|
||||
unique_col_names = dict() # type: ignore[var-annotated]
|
||||
# TODO CT-211
|
||||
for idx in range(len(column_names)): # type: ignore[arg-type]
|
||||
# TODO CT-211
|
||||
col_name = column_names[idx] # type: ignore[index]
|
||||
if col_name in unique_col_names:
|
||||
unique_col_names[col_name] += 1
|
||||
column_names[idx] = f'{col_name}_{unique_col_names[col_name]}'
|
||||
# TODO CT-211
|
||||
column_names[idx] = f"{col_name}_{unique_col_names[col_name]}" # type: ignore[index] # noqa
|
||||
else:
|
||||
unique_col_names[column_names[idx]] = 1
|
||||
# TODO CT-211
|
||||
unique_col_names[column_names[idx]] = 1 # type: ignore[index]
|
||||
return [dict(zip(column_names, row)) for row in rows]
|
||||
|
||||
@classmethod
|
||||
@@ -114,10 +113,7 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
rows = cursor.fetchall()
|
||||
data = cls.process_results(column_names, rows)
|
||||
|
||||
return dbt.clients.agate_helper.table_from_data_flat(
|
||||
data,
|
||||
column_names
|
||||
)
|
||||
return dbt.clients.agate_helper.table_from_data_flat(data, column_names)
|
||||
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
@@ -132,17 +128,18 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
return response, table
|
||||
|
||||
def add_begin_query(self):
|
||||
return self.add_query('BEGIN', auto_begin=False)
|
||||
return self.add_query("BEGIN", auto_begin=False)
|
||||
|
||||
def add_commit_query(self):
|
||||
return self.add_query('COMMIT', auto_begin=False)
|
||||
return self.add_query("COMMIT", auto_begin=False)
|
||||
|
||||
def begin(self):
|
||||
connection = self.get_thread_connection()
|
||||
if connection.transaction_open is True:
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Tried to begin a new transaction on connection "{}", but '
|
||||
'it already had one open!'.format(connection.name))
|
||||
"it already had one open!".format(connection.name)
|
||||
)
|
||||
|
||||
self.add_begin_query()
|
||||
|
||||
@@ -154,7 +151,8 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Tried to commit transaction on connection "{}", but '
|
||||
'it does not have one open!'.format(connection.name))
|
||||
"it does not have one open!".format(connection.name)
|
||||
)
|
||||
|
||||
fire_event(SQLCommit(conn_name=connection.name))
|
||||
self.add_commit_query()
|
||||
|
||||
@@ -5,6 +5,7 @@ import dbt.clients.agate_helper
|
||||
from dbt.contracts.connection import Connection
|
||||
import dbt.exceptions
|
||||
from dbt.adapters.base import BaseAdapter, available
|
||||
from dbt.adapters.cache import _make_key
|
||||
from dbt.adapters.sql import SQLConnectionManager
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
|
||||
@@ -12,21 +13,21 @@ from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
|
||||
|
||||
from dbt.adapters.base.relation import BaseRelation
|
||||
|
||||
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME = 'get_columns_in_relation'
|
||||
LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
|
||||
CHECK_SCHEMA_EXISTS_MACRO_NAME = 'check_schema_exists'
|
||||
CREATE_SCHEMA_MACRO_NAME = 'create_schema'
|
||||
DROP_SCHEMA_MACRO_NAME = 'drop_schema'
|
||||
RENAME_RELATION_MACRO_NAME = 'rename_relation'
|
||||
TRUNCATE_RELATION_MACRO_NAME = 'truncate_relation'
|
||||
DROP_RELATION_MACRO_NAME = 'drop_relation'
|
||||
ALTER_COLUMN_TYPE_MACRO_NAME = 'alter_column_type'
|
||||
LIST_RELATIONS_MACRO_NAME = "list_relations_without_caching"
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME = "get_columns_in_relation"
|
||||
LIST_SCHEMAS_MACRO_NAME = "list_schemas"
|
||||
CHECK_SCHEMA_EXISTS_MACRO_NAME = "check_schema_exists"
|
||||
CREATE_SCHEMA_MACRO_NAME = "create_schema"
|
||||
DROP_SCHEMA_MACRO_NAME = "drop_schema"
|
||||
RENAME_RELATION_MACRO_NAME = "rename_relation"
|
||||
TRUNCATE_RELATION_MACRO_NAME = "truncate_relation"
|
||||
DROP_RELATION_MACRO_NAME = "drop_relation"
|
||||
ALTER_COLUMN_TYPE_MACRO_NAME = "alter_column_type"
|
||||
|
||||
|
||||
class SQLAdapter(BaseAdapter):
|
||||
"""The default adapter with the common agate conversions and some SQL
|
||||
methods implemented. This adapter has a different much shorter list of
|
||||
methods was implemented. This adapter has a different much shorter list of
|
||||
methods to implement, but some more macros that must be implemented.
|
||||
|
||||
To implement a macro, implement "${adapter_type}__${macro_name}". in the
|
||||
@@ -62,30 +63,24 @@ class SQLAdapter(BaseAdapter):
|
||||
:param abridge_sql_log: If set, limit the raw sql logged to 512
|
||||
characters
|
||||
"""
|
||||
return self.connections.add_query(sql, auto_begin, bindings,
|
||||
abridge_sql_log)
|
||||
return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
|
||||
|
||||
@classmethod
|
||||
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
return "text"
|
||||
|
||||
@classmethod
|
||||
def convert_number_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> str:
|
||||
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
|
||||
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
# TODO CT-211
|
||||
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore[attr-defined]
|
||||
return "float8" if decimals else "integer"
|
||||
|
||||
@classmethod
|
||||
def convert_boolean_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> str:
|
||||
def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
return "boolean"
|
||||
|
||||
@classmethod
|
||||
def convert_datetime_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> str:
|
||||
def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
return "timestamp without time zone"
|
||||
|
||||
@classmethod
|
||||
@@ -101,36 +96,27 @@ class SQLAdapter(BaseAdapter):
|
||||
return True
|
||||
|
||||
def expand_column_types(self, goal, current):
|
||||
reference_columns = {
|
||||
c.name: c for c in
|
||||
self.get_columns_in_relation(goal)
|
||||
}
|
||||
reference_columns = {c.name: c for c in self.get_columns_in_relation(goal)}
|
||||
|
||||
target_columns = {
|
||||
c.name: c for c
|
||||
in self.get_columns_in_relation(current)
|
||||
}
|
||||
target_columns = {c.name: c for c in self.get_columns_in_relation(current)}
|
||||
|
||||
for column_name, reference_column in reference_columns.items():
|
||||
target_column = target_columns.get(column_name)
|
||||
|
||||
if target_column is not None and \
|
||||
target_column.can_expand_to(reference_column):
|
||||
if target_column is not None and target_column.can_expand_to(reference_column):
|
||||
col_string_size = reference_column.string_size()
|
||||
new_type = self.Column.string_type(col_string_size)
|
||||
fire_event(
|
||||
ColTypeChange(
|
||||
orig_type=target_column.data_type,
|
||||
new_type=new_type,
|
||||
table=current,
|
||||
table=_make_key(current),
|
||||
)
|
||||
)
|
||||
|
||||
self.alter_column_type(current, column_name, new_type)
|
||||
|
||||
def alter_column_type(
|
||||
self, relation, column_name, new_column_type
|
||||
) -> None:
|
||||
def alter_column_type(self, relation, column_name, new_column_type) -> None:
|
||||
"""
|
||||
1. Create a new column (w/ temp name and correct type)
|
||||
2. Copy data over to it
|
||||
@@ -138,53 +124,40 @@ class SQLAdapter(BaseAdapter):
|
||||
4. Rename the new column to existing column
|
||||
"""
|
||||
kwargs = {
|
||||
'relation': relation,
|
||||
'column_name': column_name,
|
||||
'new_column_type': new_column_type,
|
||||
"relation": relation,
|
||||
"column_name": column_name,
|
||||
"new_column_type": new_column_type,
|
||||
}
|
||||
self.execute_macro(
|
||||
ALTER_COLUMN_TYPE_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
self.execute_macro(ALTER_COLUMN_TYPE_MACRO_NAME, kwargs=kwargs)
|
||||
|
||||
def drop_relation(self, relation):
|
||||
if relation.type is None:
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
'Tried to drop relation {}, but its type is null.'
|
||||
.format(relation))
|
||||
"Tried to drop relation {}, but its type is null.".format(relation)
|
||||
)
|
||||
|
||||
self.cache_dropped(relation)
|
||||
self.execute_macro(
|
||||
DROP_RELATION_MACRO_NAME,
|
||||
kwargs={'relation': relation}
|
||||
)
|
||||
self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
|
||||
|
||||
def truncate_relation(self, relation):
|
||||
self.execute_macro(
|
||||
TRUNCATE_RELATION_MACRO_NAME,
|
||||
kwargs={'relation': relation}
|
||||
)
|
||||
self.execute_macro(TRUNCATE_RELATION_MACRO_NAME, kwargs={"relation": relation})
|
||||
|
||||
def rename_relation(self, from_relation, to_relation):
|
||||
self.cache_renamed(from_relation, to_relation)
|
||||
|
||||
kwargs = {'from_relation': from_relation, 'to_relation': to_relation}
|
||||
self.execute_macro(
|
||||
RENAME_RELATION_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
kwargs = {"from_relation": from_relation, "to_relation": to_relation}
|
||||
self.execute_macro(RENAME_RELATION_MACRO_NAME, kwargs=kwargs)
|
||||
|
||||
def get_columns_in_relation(self, relation):
|
||||
return self.execute_macro(
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME,
|
||||
kwargs={'relation': relation}
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME, kwargs={"relation": relation}
|
||||
)
|
||||
|
||||
def create_schema(self, relation: BaseRelation) -> None:
|
||||
relation = relation.without_identifier()
|
||||
fire_event(SchemaCreation(relation=relation))
|
||||
fire_event(SchemaCreation(relation=_make_key(relation)))
|
||||
kwargs = {
|
||||
'relation': relation,
|
||||
"relation": relation,
|
||||
}
|
||||
self.execute_macro(CREATE_SCHEMA_MACRO_NAME, kwargs=kwargs)
|
||||
self.commit_if_has_connection()
|
||||
@@ -193,51 +166,44 @@ class SQLAdapter(BaseAdapter):
|
||||
|
||||
def drop_schema(self, relation: BaseRelation) -> None:
|
||||
relation = relation.without_identifier()
|
||||
fire_event(SchemaDrop(relation=relation))
|
||||
fire_event(SchemaDrop(relation=_make_key(relation)))
|
||||
kwargs = {
|
||||
'relation': relation,
|
||||
"relation": relation,
|
||||
}
|
||||
self.execute_macro(DROP_SCHEMA_MACRO_NAME, kwargs=kwargs)
|
||||
# we can update the cache here
|
||||
self.cache.drop_schema(relation.database, relation.schema)
|
||||
|
||||
def list_relations_without_caching(
|
||||
self, schema_relation: BaseRelation,
|
||||
self,
|
||||
schema_relation: BaseRelation,
|
||||
) -> List[BaseRelation]:
|
||||
kwargs = {'schema_relation': schema_relation}
|
||||
results = self.execute_macro(
|
||||
LIST_RELATIONS_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
kwargs = {"schema_relation": schema_relation}
|
||||
results = self.execute_macro(LIST_RELATIONS_MACRO_NAME, kwargs=kwargs)
|
||||
|
||||
relations = []
|
||||
quote_policy = {
|
||||
'database': True,
|
||||
'schema': True,
|
||||
'identifier': True
|
||||
}
|
||||
quote_policy = {"database": True, "schema": True, "identifier": True}
|
||||
for _database, name, _schema, _type in results:
|
||||
try:
|
||||
_type = self.Relation.get_relation_type(_type)
|
||||
except ValueError:
|
||||
_type = self.Relation.External
|
||||
relations.append(self.Relation.create(
|
||||
database=_database,
|
||||
schema=_schema,
|
||||
identifier=name,
|
||||
quote_policy=quote_policy,
|
||||
type=_type
|
||||
))
|
||||
relations.append(
|
||||
self.Relation.create(
|
||||
database=_database,
|
||||
schema=_schema,
|
||||
identifier=name,
|
||||
quote_policy=quote_policy,
|
||||
type=_type,
|
||||
)
|
||||
)
|
||||
return relations
|
||||
|
||||
def quote(self, identifier):
|
||||
return '"{}"'.format(identifier)
|
||||
|
||||
def list_schemas(self, database: str) -> List[str]:
|
||||
results = self.execute_macro(
|
||||
LIST_SCHEMAS_MACRO_NAME,
|
||||
kwargs={'database': database}
|
||||
)
|
||||
results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
|
||||
|
||||
return [row[0] for row in results]
|
||||
|
||||
@@ -245,13 +211,10 @@ class SQLAdapter(BaseAdapter):
|
||||
information_schema = self.Relation.create(
|
||||
database=database,
|
||||
schema=schema,
|
||||
identifier='INFORMATION_SCHEMA',
|
||||
quote_policy=self.config.quoting
|
||||
identifier="INFORMATION_SCHEMA",
|
||||
quote_policy=self.config.quoting,
|
||||
).information_schema()
|
||||
|
||||
kwargs = {'information_schema': information_schema, 'schema': schema}
|
||||
results = self.execute_macro(
|
||||
CHECK_SCHEMA_EXISTS_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
kwargs = {"information_schema": information_schema, "schema": schema}
|
||||
results = self.execute_macro(CHECK_SCHEMA_EXISTS_MACRO_NAME, kwargs=kwargs)
|
||||
return results[0][0] > 0
|
||||
|
||||
1
core/dbt/clients/README.md
Normal file
1
core/dbt/clients/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Clients README
|
||||
@@ -10,79 +10,83 @@ def regex(pat):
|
||||
|
||||
class BlockData:
|
||||
"""raw plaintext data from the top level of the file."""
|
||||
|
||||
def __init__(self, contents):
|
||||
self.block_type_name = '__dbt__data'
|
||||
self.block_type_name = "__dbt__data"
|
||||
self.contents = contents
|
||||
self.full_block = contents
|
||||
|
||||
|
||||
class BlockTag:
|
||||
def __init__(self, block_type_name, block_name, contents=None,
|
||||
full_block=None, **kw):
|
||||
def __init__(self, block_type_name, block_name, contents=None, full_block=None, **kw):
|
||||
self.block_type_name = block_type_name
|
||||
self.block_name = block_name
|
||||
self.contents = contents
|
||||
self.full_block = full_block
|
||||
|
||||
def __str__(self):
|
||||
return 'BlockTag({!r}, {!r})'.format(self.block_type_name,
|
||||
self.block_name)
|
||||
return "BlockTag({!r}, {!r})".format(self.block_type_name, self.block_name)
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
@property
|
||||
def end_block_type_name(self):
|
||||
return 'end{}'.format(self.block_type_name)
|
||||
return "end{}".format(self.block_type_name)
|
||||
|
||||
def end_pat(self):
|
||||
# we don't want to use string formatting here because jinja uses most
|
||||
# of the string formatting operators in its syntax...
|
||||
pattern = ''.join((
|
||||
r'(?P<endblock>((?:\s*\{\%\-|\{\%)\s*',
|
||||
self.end_block_type_name,
|
||||
r'\s*(?:\-\%\}\s*|\%\})))',
|
||||
))
|
||||
pattern = "".join(
|
||||
(
|
||||
r"(?P<endblock>((?:\s*\{\%\-|\{\%)\s*",
|
||||
self.end_block_type_name,
|
||||
r"\s*(?:\-\%\}\s*|\%\})))",
|
||||
)
|
||||
)
|
||||
return regex(pattern)
|
||||
|
||||
|
||||
Tag = namedtuple('Tag', 'block_type_name block_name start end')
|
||||
Tag = namedtuple("Tag", "block_type_name block_name start end")
|
||||
|
||||
|
||||
_NAME_PATTERN = r'[A-Za-z_][A-Za-z_0-9]*'
|
||||
_NAME_PATTERN = r"[A-Za-z_][A-Za-z_0-9]*"
|
||||
|
||||
COMMENT_START_PATTERN = regex(r'(?:(?P<comment_start>(\s*\{\#)))')
|
||||
COMMENT_END_PATTERN = regex(r'(.*?)(\s*\#\})')
|
||||
RAW_START_PATTERN = regex(
|
||||
r'(?:\s*\{\%\-|\{\%)\s*(?P<raw_start>(raw))\s*(?:\-\%\}\s*|\%\})'
|
||||
COMMENT_START_PATTERN = regex(r"(?:(?P<comment_start>(\s*\{\#)))")
|
||||
COMMENT_END_PATTERN = regex(r"(.*?)(\s*\#\})")
|
||||
RAW_START_PATTERN = regex(r"(?:\s*\{\%\-|\{\%)\s*(?P<raw_start>(raw))\s*(?:\-\%\}\s*|\%\})")
|
||||
EXPR_START_PATTERN = regex(r"(?P<expr_start>(\{\{\s*))")
|
||||
EXPR_END_PATTERN = regex(r"(?P<expr_end>(\s*\}\}))")
|
||||
|
||||
BLOCK_START_PATTERN = regex(
|
||||
"".join(
|
||||
(
|
||||
r"(?:\s*\{\%\-|\{\%)\s*",
|
||||
r"(?P<block_type_name>({}))".format(_NAME_PATTERN),
|
||||
# some blocks have a 'block name'.
|
||||
r"(?:\s+(?P<block_name>({})))?".format(_NAME_PATTERN),
|
||||
)
|
||||
)
|
||||
)
|
||||
EXPR_START_PATTERN = regex(r'(?P<expr_start>(\{\{\s*))')
|
||||
EXPR_END_PATTERN = regex(r'(?P<expr_end>(\s*\}\}))')
|
||||
|
||||
BLOCK_START_PATTERN = regex(''.join((
|
||||
r'(?:\s*\{\%\-|\{\%)\s*',
|
||||
r'(?P<block_type_name>({}))'.format(_NAME_PATTERN),
|
||||
# some blocks have a 'block name'.
|
||||
r'(?:\s+(?P<block_name>({})))?'.format(_NAME_PATTERN),
|
||||
)))
|
||||
|
||||
|
||||
RAW_BLOCK_PATTERN = regex(''.join((
|
||||
r'(?:\s*\{\%\-|\{\%)\s*raw\s*(?:\-\%\}\s*|\%\})',
|
||||
r'(?:.*?)',
|
||||
r'(?:\s*\{\%\-|\{\%)\s*endraw\s*(?:\-\%\}\s*|\%\})',
|
||||
)))
|
||||
RAW_BLOCK_PATTERN = regex(
|
||||
"".join(
|
||||
(
|
||||
r"(?:\s*\{\%\-|\{\%)\s*raw\s*(?:\-\%\}\s*|\%\})",
|
||||
r"(?:.*?)",
|
||||
r"(?:\s*\{\%\-|\{\%)\s*endraw\s*(?:\-\%\}\s*|\%\})",
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
TAG_CLOSE_PATTERN = regex(r'(?:(?P<tag_close>(\-\%\}\s*|\%\})))')
|
||||
TAG_CLOSE_PATTERN = regex(r"(?:(?P<tag_close>(\-\%\}\s*|\%\})))")
|
||||
|
||||
# stolen from jinja's lexer. Note that we've consumed all prefix whitespace by
|
||||
# the time we want to use this.
|
||||
STRING_PATTERN = regex(
|
||||
r"(?P<string>('([^'\\]*(?:\\.[^'\\]*)*)'|"
|
||||
r'"([^"\\]*(?:\\.[^"\\]*)*)"))'
|
||||
)
|
||||
STRING_PATTERN = regex(r"(?P<string>('([^'\\]*(?:\\.[^'\\]*)*)'|" r'"([^"\\]*(?:\\.[^"\\]*)*)"))')
|
||||
|
||||
QUOTE_START_PATTERN = regex(r'''(?P<quote>(['"]))''')
|
||||
QUOTE_START_PATTERN = regex(r"""(?P<quote>(['"]))""")
|
||||
|
||||
|
||||
class TagIterator:
|
||||
@@ -99,10 +103,10 @@ class TagIterator:
|
||||
end_val: int = self.pos if end is None else end
|
||||
data = self.data[:end_val]
|
||||
# if not found, rfind returns -1, and -1+1=0, which is perfect!
|
||||
last_line_start = data.rfind('\n') + 1
|
||||
last_line_start = data.rfind("\n") + 1
|
||||
# it's easy to forget this, but line numbers are 1-indexed
|
||||
line_number = data.count('\n') + 1
|
||||
return f'{line_number}:{end_val - last_line_start}'
|
||||
line_number = data.count("\n") + 1
|
||||
return f"{line_number}:{end_val - last_line_start}"
|
||||
|
||||
def advance(self, new_position):
|
||||
self.pos = new_position
|
||||
@@ -120,7 +124,7 @@ class TagIterator:
|
||||
matches = []
|
||||
for pattern in patterns:
|
||||
# default to 'search', but sometimes we want to 'match'.
|
||||
if kwargs.get('method', 'search') == 'search':
|
||||
if kwargs.get("method", "search") == "search":
|
||||
match = self._search(pattern)
|
||||
else:
|
||||
match = self._match(pattern)
|
||||
@@ -136,7 +140,7 @@ class TagIterator:
|
||||
match = self._first_match(*patterns, **kwargs)
|
||||
if match is None:
|
||||
msg = 'unexpected EOF, expected {}, got "{}"'.format(
|
||||
expected_name, self.data[self.pos:]
|
||||
expected_name, self.data[self.pos :]
|
||||
)
|
||||
dbt.exceptions.raise_compiler_error(msg)
|
||||
return match
|
||||
@@ -156,22 +160,20 @@ class TagIterator:
|
||||
"""
|
||||
self.advance(match.end())
|
||||
while True:
|
||||
match = self._expect_match('}}',
|
||||
EXPR_END_PATTERN,
|
||||
QUOTE_START_PATTERN)
|
||||
if match.groupdict().get('expr_end') is not None:
|
||||
match = self._expect_match("}}", EXPR_END_PATTERN, QUOTE_START_PATTERN)
|
||||
if match.groupdict().get("expr_end") is not None:
|
||||
break
|
||||
else:
|
||||
# it's a quote. we haven't advanced for this match yet, so
|
||||
# just slurp up the whole string, no need to rewind.
|
||||
match = self._expect_match('string', STRING_PATTERN)
|
||||
match = self._expect_match("string", STRING_PATTERN)
|
||||
self.advance(match.end())
|
||||
|
||||
self.advance(match.end())
|
||||
|
||||
def handle_comment(self, match):
|
||||
self.advance(match.end())
|
||||
match = self._expect_match('#}', COMMENT_END_PATTERN)
|
||||
match = self._expect_match("#}", COMMENT_END_PATTERN)
|
||||
self.advance(match.end())
|
||||
|
||||
def _expect_block_close(self):
|
||||
@@ -188,22 +190,19 @@ class TagIterator:
|
||||
"""
|
||||
while True:
|
||||
end_match = self._expect_match(
|
||||
'tag close ("%}")',
|
||||
QUOTE_START_PATTERN,
|
||||
TAG_CLOSE_PATTERN
|
||||
'tag close ("%}")', QUOTE_START_PATTERN, TAG_CLOSE_PATTERN
|
||||
)
|
||||
self.advance(end_match.end())
|
||||
if end_match.groupdict().get('tag_close') is not None:
|
||||
if end_match.groupdict().get("tag_close") is not None:
|
||||
return
|
||||
# must be a string. Rewind to its start and advance past it.
|
||||
self.rewind()
|
||||
string_match = self._expect_match('string', STRING_PATTERN)
|
||||
string_match = self._expect_match("string", STRING_PATTERN)
|
||||
self.advance(string_match.end())
|
||||
|
||||
def handle_raw(self):
|
||||
# raw blocks are super special, they are a single complete regex
|
||||
match = self._expect_match('{% raw %}...{% endraw %}',
|
||||
RAW_BLOCK_PATTERN)
|
||||
match = self._expect_match("{% raw %}...{% endraw %}", RAW_BLOCK_PATTERN)
|
||||
self.advance(match.end())
|
||||
return match.end()
|
||||
|
||||
@@ -220,30 +219,24 @@ class TagIterator:
|
||||
"""
|
||||
groups = match.groupdict()
|
||||
# always a value
|
||||
block_type_name = groups['block_type_name']
|
||||
block_type_name = groups["block_type_name"]
|
||||
# might be None
|
||||
block_name = groups.get('block_name')
|
||||
block_name = groups.get("block_name")
|
||||
start_pos = self.pos
|
||||
if block_type_name == 'raw':
|
||||
match = self._expect_match('{% raw %}...{% endraw %}',
|
||||
RAW_BLOCK_PATTERN)
|
||||
if block_type_name == "raw":
|
||||
match = self._expect_match("{% raw %}...{% endraw %}", RAW_BLOCK_PATTERN)
|
||||
self.advance(match.end())
|
||||
else:
|
||||
self.advance(match.end())
|
||||
self._expect_block_close()
|
||||
return Tag(
|
||||
block_type_name=block_type_name,
|
||||
block_name=block_name,
|
||||
start=start_pos,
|
||||
end=self.pos
|
||||
block_type_name=block_type_name, block_name=block_name, start=start_pos, end=self.pos
|
||||
)
|
||||
|
||||
def find_tags(self):
|
||||
while True:
|
||||
match = self._first_match(
|
||||
BLOCK_START_PATTERN,
|
||||
COMMENT_START_PATTERN,
|
||||
EXPR_START_PATTERN
|
||||
BLOCK_START_PATTERN, COMMENT_START_PATTERN, EXPR_START_PATTERN
|
||||
)
|
||||
if match is None:
|
||||
break
|
||||
@@ -252,9 +245,9 @@ class TagIterator:
|
||||
# start = self.pos
|
||||
|
||||
groups = match.groupdict()
|
||||
comment_start = groups.get('comment_start')
|
||||
expr_start = groups.get('expr_start')
|
||||
block_type_name = groups.get('block_type_name')
|
||||
comment_start = groups.get("comment_start")
|
||||
expr_start = groups.get("expr_start")
|
||||
block_type_name = groups.get("block_type_name")
|
||||
|
||||
if comment_start is not None:
|
||||
self.handle_comment(match)
|
||||
@@ -264,8 +257,8 @@ class TagIterator:
|
||||
yield self.handle_tag(match)
|
||||
else:
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Invalid regex match in next_block, expected block start, '
|
||||
'expr start, or comment start'
|
||||
"Invalid regex match in next_block, expected block start, "
|
||||
"expr start, or comment start"
|
||||
)
|
||||
|
||||
def __iter__(self):
|
||||
@@ -273,21 +266,18 @@ class TagIterator:
|
||||
|
||||
|
||||
duplicate_tags = (
|
||||
'Got nested tags: {outer.block_type_name} (started at {outer.start}) did '
|
||||
'not have a matching {{% end{outer.block_type_name} %}} before a '
|
||||
'subsequent {inner.block_type_name} was found (started at {inner.start})'
|
||||
"Got nested tags: {outer.block_type_name} (started at {outer.start}) did "
|
||||
"not have a matching {{% end{outer.block_type_name} %}} before a "
|
||||
"subsequent {inner.block_type_name} was found (started at {inner.start})"
|
||||
)
|
||||
|
||||
|
||||
_CONTROL_FLOW_TAGS = {
|
||||
'if': 'endif',
|
||||
'for': 'endfor',
|
||||
"if": "endif",
|
||||
"for": "endfor",
|
||||
}
|
||||
|
||||
_CONTROL_FLOW_END_TAGS = {
|
||||
v: k
|
||||
for k, v in _CONTROL_FLOW_TAGS.items()
|
||||
}
|
||||
_CONTROL_FLOW_END_TAGS = {v: k for k, v in _CONTROL_FLOW_TAGS.items()}
|
||||
|
||||
|
||||
class BlockIterator:
|
||||
@@ -310,15 +300,15 @@ class BlockIterator:
|
||||
|
||||
def is_current_end(self, tag):
|
||||
return (
|
||||
tag.block_type_name.startswith('end') and
|
||||
self.current is not None and
|
||||
tag.block_type_name[3:] == self.current.block_type_name
|
||||
tag.block_type_name.startswith("end")
|
||||
and self.current is not None
|
||||
and tag.block_type_name[3:] == self.current.block_type_name
|
||||
)
|
||||
|
||||
def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
|
||||
"""Find all top-level blocks in the data."""
|
||||
if allowed_blocks is None:
|
||||
allowed_blocks = {'snapshot', 'macro', 'materialization', 'docs'}
|
||||
allowed_blocks = {"snapshot", "macro", "materialization", "docs"}
|
||||
|
||||
for tag in self.tag_parser.find_tags():
|
||||
if tag.block_type_name in _CONTROL_FLOW_TAGS:
|
||||
@@ -329,37 +319,35 @@ class BlockIterator:
|
||||
found = self.stack.pop()
|
||||
else:
|
||||
expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
|
||||
dbt.exceptions.raise_compiler_error((
|
||||
'Got an unexpected control flow end tag, got {} but '
|
||||
'never saw a preceeding {} (@ {})'
|
||||
).format(
|
||||
tag.block_type_name,
|
||||
expected,
|
||||
self.tag_parser.linepos(tag.start)
|
||||
))
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Got an unexpected control flow end tag, got {} but "
|
||||
"never saw a preceeding {} (@ {})"
|
||||
).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start))
|
||||
)
|
||||
expected = _CONTROL_FLOW_TAGS[found]
|
||||
if expected != tag.block_type_name:
|
||||
dbt.exceptions.raise_compiler_error((
|
||||
'Got an unexpected control flow end tag, got {} but '
|
||||
'expected {} next (@ {})'
|
||||
).format(
|
||||
tag.block_type_name,
|
||||
expected,
|
||||
self.tag_parser.linepos(tag.start)
|
||||
))
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Got an unexpected control flow end tag, got {} but "
|
||||
"expected {} next (@ {})"
|
||||
).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start))
|
||||
)
|
||||
|
||||
if tag.block_type_name in allowed_blocks:
|
||||
if self.stack:
|
||||
dbt.exceptions.raise_compiler_error((
|
||||
'Got a block definition inside control flow at {}. '
|
||||
'All dbt block definitions must be at the top level'
|
||||
).format(self.tag_parser.linepos(tag.start)))
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Got a block definition inside control flow at {}. "
|
||||
"All dbt block definitions must be at the top level"
|
||||
).format(self.tag_parser.linepos(tag.start))
|
||||
)
|
||||
if self.current is not None:
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
duplicate_tags.format(outer=self.current, inner=tag)
|
||||
)
|
||||
if collect_raw_data:
|
||||
raw_data = self.data[self.last_position:tag.start]
|
||||
raw_data = self.data[self.last_position : tag.start]
|
||||
self.last_position = tag.start
|
||||
if raw_data:
|
||||
yield BlockData(raw_data)
|
||||
@@ -371,23 +359,25 @@ class BlockIterator:
|
||||
yield BlockTag(
|
||||
block_type_name=self.current.block_type_name,
|
||||
block_name=self.current.block_name,
|
||||
contents=self.data[self.current.end:tag.start],
|
||||
full_block=self.data[self.current.start:tag.end]
|
||||
contents=self.data[self.current.end : tag.start],
|
||||
full_block=self.data[self.current.start : tag.end],
|
||||
)
|
||||
self.current = None
|
||||
|
||||
if self.current:
|
||||
linecount = self.data[:self.current.end].count('\n') + 1
|
||||
dbt.exceptions.raise_compiler_error((
|
||||
'Reached EOF without finding a close tag for '
|
||||
'{} (searched from line {})'
|
||||
).format(self.current.block_type_name, linecount))
|
||||
linecount = self.data[: self.current.end].count("\n") + 1
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Reached EOF without finding a close tag for " "{} (searched from line {})"
|
||||
).format(self.current.block_type_name, linecount)
|
||||
)
|
||||
|
||||
if collect_raw_data:
|
||||
raw_data = self.data[self.last_position:]
|
||||
raw_data = self.data[self.last_position :]
|
||||
if raw_data:
|
||||
yield BlockData(raw_data)
|
||||
|
||||
def lex_for_blocks(self, allowed_blocks=None, collect_raw_data=True):
|
||||
return list(self.find_blocks(allowed_blocks=allowed_blocks,
|
||||
collect_raw_data=collect_raw_data))
|
||||
return list(
|
||||
self.find_blocks(allowed_blocks=allowed_blocks, collect_raw_data=collect_raw_data)
|
||||
)
|
||||
|
||||
@@ -10,7 +10,17 @@ from typing import Iterable, List, Dict, Union, Optional, Any
|
||||
from dbt.exceptions import RuntimeException
|
||||
|
||||
|
||||
BOM = BOM_UTF8.decode('utf-8') # '\ufeff'
|
||||
BOM = BOM_UTF8.decode("utf-8") # '\ufeff'
|
||||
|
||||
|
||||
class Number(agate.data_types.Number):
|
||||
# undo the change in https://github.com/wireservice/agate/pull/733
|
||||
# i.e. do not cast True and False to numeric 1 and 0
|
||||
def cast(self, d):
|
||||
if type(d) == bool:
|
||||
raise agate.exceptions.CastError("Do not cast True to 1 or False to 0.")
|
||||
else:
|
||||
return super().cast(d)
|
||||
|
||||
|
||||
class ISODateTime(agate.data_types.DateTime):
|
||||
@@ -30,32 +40,24 @@ class ISODateTime(agate.data_types.DateTime):
|
||||
except: # noqa
|
||||
pass
|
||||
|
||||
raise agate.exceptions.CastError(
|
||||
'Can not parse value "%s" as datetime.' % d
|
||||
)
|
||||
raise agate.exceptions.CastError('Can not parse value "%s" as datetime.' % d)
|
||||
|
||||
|
||||
def build_type_tester(
|
||||
text_columns: Iterable[str],
|
||||
string_null_values: Optional[Iterable[str]] = ('null', '')
|
||||
text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = ("null", "")
|
||||
) -> agate.TypeTester:
|
||||
|
||||
types = [
|
||||
agate.data_types.Number(null_values=('null', '')),
|
||||
agate.data_types.Date(null_values=('null', ''),
|
||||
date_format='%Y-%m-%d'),
|
||||
agate.data_types.DateTime(null_values=('null', ''),
|
||||
datetime_format='%Y-%m-%d %H:%M:%S'),
|
||||
ISODateTime(null_values=('null', '')),
|
||||
agate.data_types.Boolean(true_values=('true',),
|
||||
false_values=('false',),
|
||||
null_values=('null', '')),
|
||||
agate.data_types.Text(null_values=string_null_values)
|
||||
Number(null_values=("null", "")),
|
||||
agate.data_types.Date(null_values=("null", ""), date_format="%Y-%m-%d"),
|
||||
agate.data_types.DateTime(null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"),
|
||||
ISODateTime(null_values=("null", "")),
|
||||
agate.data_types.Boolean(
|
||||
true_values=("true",), false_values=("false",), null_values=("null", "")
|
||||
),
|
||||
agate.data_types.Text(null_values=string_null_values),
|
||||
]
|
||||
force = {
|
||||
k: agate.data_types.Text(null_values=string_null_values)
|
||||
for k in text_columns
|
||||
}
|
||||
force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns}
|
||||
return agate.TypeTester(force=force, types=types)
|
||||
|
||||
|
||||
@@ -72,16 +74,13 @@ def table_from_rows(
|
||||
else:
|
||||
# If text_only_columns are present, prevent coercing empty string or
|
||||
# literal 'null' strings to a None representation.
|
||||
column_types = build_type_tester(
|
||||
text_only_columns,
|
||||
string_null_values=()
|
||||
)
|
||||
column_types = build_type_tester(text_only_columns, string_null_values=())
|
||||
|
||||
return agate.Table(rows, column_names, column_types=column_types)
|
||||
|
||||
|
||||
def table_from_data(data, column_names: Iterable[str]) -> agate.Table:
|
||||
"Convert list of dictionaries into an Agate table"
|
||||
"Convert a list of dictionaries into an Agate table"
|
||||
|
||||
# The agate table is generated from a list of dicts, so the column order
|
||||
# from `data` is not preserved. We can use `select` to reorder the columns
|
||||
@@ -120,9 +119,7 @@ def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:
|
||||
rows.append(row)
|
||||
|
||||
return table_from_rows(
|
||||
rows=rows,
|
||||
column_names=column_names,
|
||||
text_only_columns=text_only_columns
|
||||
rows=rows, column_names=column_names, text_only_columns=text_only_columns
|
||||
)
|
||||
|
||||
|
||||
@@ -140,7 +137,7 @@ def as_matrix(table):
|
||||
|
||||
def from_csv(abspath, text_columns):
|
||||
type_tester = build_type_tester(text_columns=text_columns)
|
||||
with open(abspath, encoding='utf-8') as fp:
|
||||
with open(abspath, encoding="utf-8") as fp:
|
||||
if fp.read(1) != BOM:
|
||||
fp.seek(0)
|
||||
return agate.Table.from_csv(fp, column_types=type_tester)
|
||||
@@ -172,8 +169,8 @@ class ColumnTypeBuilder(Dict[str, NullableAgateType]):
|
||||
elif not isinstance(value, type(existing_type)):
|
||||
# actual type mismatch!
|
||||
raise RuntimeException(
|
||||
f'Tables contain columns with the same names ({key}), '
|
||||
f'but different types ({value} vs {existing_type})'
|
||||
f"Tables contain columns with the same names ({key}), "
|
||||
f"but different types ({value} vs {existing_type})"
|
||||
)
|
||||
|
||||
def finalize(self) -> Dict[str, agate.data_types.DataType]:
|
||||
@@ -187,9 +184,7 @@ class ColumnTypeBuilder(Dict[str, NullableAgateType]):
|
||||
return result
|
||||
|
||||
|
||||
def _merged_column_types(
|
||||
tables: List[agate.Table]
|
||||
) -> Dict[str, agate.data_types.DataType]:
|
||||
def _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]:
|
||||
# this is a lot like agate.Table.merge, but with handling for all-null
|
||||
# rows being "any type".
|
||||
new_columns: ColumnTypeBuilder = ColumnTypeBuilder()
|
||||
@@ -215,10 +210,7 @@ def merge_tables(tables: List[agate.Table]) -> agate.Table:
|
||||
|
||||
rows: List[agate.Row] = []
|
||||
for table in tables:
|
||||
if (
|
||||
table.column_names == column_names and
|
||||
table.column_types == column_types
|
||||
):
|
||||
if table.column_names == column_names and table.column_types == column_types:
|
||||
rows.extend(table.rows)
|
||||
else:
|
||||
for row in table.rows:
|
||||
|
||||
@@ -4,11 +4,21 @@ import os.path
|
||||
from dbt.clients.system import run_cmd, rmdir
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
GitSparseCheckoutSubdirectory, GitProgressCheckoutRevision,
|
||||
GitProgressUpdatingExistingDependency, GitProgressPullingNewDependency,
|
||||
GitNothingToDo, GitProgressUpdatedCheckoutRange, GitProgressCheckedOutAt
|
||||
GitSparseCheckoutSubdirectory,
|
||||
GitProgressCheckoutRevision,
|
||||
GitProgressUpdatingExistingDependency,
|
||||
GitProgressPullingNewDependency,
|
||||
GitNothingToDo,
|
||||
GitProgressUpdatedCheckoutRange,
|
||||
GitProgressCheckedOutAt,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
CommandResultError,
|
||||
RuntimeException,
|
||||
bad_package_spec,
|
||||
raise_git_cloning_error,
|
||||
raise_git_cloning_problem,
|
||||
)
|
||||
import dbt.exceptions
|
||||
from packaging import version
|
||||
|
||||
|
||||
@@ -18,23 +28,23 @@ def _is_commit(revision: str) -> bool:
|
||||
|
||||
|
||||
def _raise_git_cloning_error(repo, revision, error):
|
||||
stderr = error.stderr.decode('utf-8').strip()
|
||||
if 'usage: git' in stderr:
|
||||
stderr = stderr.split('\nusage: git')[0]
|
||||
stderr = error.stderr.decode("utf-8").strip()
|
||||
if "usage: git" in stderr:
|
||||
stderr = stderr.split("\nusage: git")[0]
|
||||
if re.match("fatal: destination path '(.+)' already exists", stderr):
|
||||
raise error
|
||||
raise_git_cloning_error(error)
|
||||
|
||||
dbt.exceptions.bad_package_spec(repo, revision, stderr)
|
||||
bad_package_spec(repo, revision, stderr)
|
||||
|
||||
|
||||
def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None):
|
||||
has_revision = revision is not None
|
||||
is_commit = _is_commit(revision or "")
|
||||
|
||||
clone_cmd = ['git', 'clone', '--depth', '1']
|
||||
clone_cmd = ["git", "clone", "--depth", "1"]
|
||||
if subdirectory:
|
||||
fire_event(GitSparseCheckoutSubdirectory(subdir=subdirectory))
|
||||
out, _ = run_cmd(cwd, ['git', '--version'], env={'LC_ALL': 'C'})
|
||||
out, _ = run_cmd(cwd, ["git", "--version"], env={"LC_ALL": "C"})
|
||||
git_version = version.parse(re.search(r"\d+\.\d+\.\d+", out.decode("utf-8")).group(0))
|
||||
if not git_version >= version.parse("2.25.0"):
|
||||
# 2.25.0 introduces --sparse
|
||||
@@ -42,37 +52,37 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec
|
||||
"Please update your git version to pull a dbt package "
|
||||
"from a subdirectory: your version is {}, >= 2.25.0 needed".format(git_version)
|
||||
)
|
||||
clone_cmd.extend(['--filter=blob:none', '--sparse'])
|
||||
clone_cmd.extend(["--filter=blob:none", "--sparse"])
|
||||
|
||||
if has_revision and not is_commit:
|
||||
clone_cmd.extend(['--branch', revision])
|
||||
clone_cmd.extend(["--branch", revision])
|
||||
|
||||
clone_cmd.append(repo)
|
||||
|
||||
if dirname is not None:
|
||||
clone_cmd.append(dirname)
|
||||
try:
|
||||
result = run_cmd(cwd, clone_cmd, env={'LC_ALL': 'C'})
|
||||
except dbt.exceptions.CommandResultError as exc:
|
||||
result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"})
|
||||
except CommandResultError as exc:
|
||||
_raise_git_cloning_error(repo, revision, exc)
|
||||
|
||||
if subdirectory:
|
||||
cwd_subdir = os.path.join(cwd, dirname or '')
|
||||
clone_cmd_subdir = ['git', 'sparse-checkout', 'set', subdirectory]
|
||||
cwd_subdir = os.path.join(cwd, dirname or "")
|
||||
clone_cmd_subdir = ["git", "sparse-checkout", "set", subdirectory]
|
||||
try:
|
||||
run_cmd(cwd_subdir, clone_cmd_subdir)
|
||||
except dbt.exceptions.CommandResultError as exc:
|
||||
except CommandResultError as exc:
|
||||
_raise_git_cloning_error(repo, revision, exc)
|
||||
|
||||
if remove_git_dir:
|
||||
rmdir(os.path.join(dirname, '.git'))
|
||||
rmdir(os.path.join(dirname, ".git"))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def list_tags(cwd):
|
||||
out, err = run_cmd(cwd, ['git', 'tag', '--list'], env={'LC_ALL': 'C'})
|
||||
tags = out.decode('utf-8').strip().split("\n")
|
||||
out, err = run_cmd(cwd, ["git", "tag", "--list"], env={"LC_ALL": "C"})
|
||||
tags = out.decode("utf-8").strip().split("\n")
|
||||
return tags
|
||||
|
||||
|
||||
@@ -84,44 +94,44 @@ def _checkout(cwd, repo, revision):
|
||||
if _is_commit(revision):
|
||||
run_cmd(cwd, fetch_cmd + [revision])
|
||||
else:
|
||||
run_cmd(cwd, ['git', 'remote', 'set-branches', 'origin', revision])
|
||||
run_cmd(cwd, ["git", "remote", "set-branches", "origin", revision])
|
||||
run_cmd(cwd, fetch_cmd + ["--tags", revision])
|
||||
|
||||
if _is_commit(revision):
|
||||
spec = revision
|
||||
# Prefer tags to branches if one exists
|
||||
elif revision in list_tags(cwd):
|
||||
spec = 'tags/{}'.format(revision)
|
||||
spec = "tags/{}".format(revision)
|
||||
else:
|
||||
spec = 'origin/{}'.format(revision)
|
||||
spec = "origin/{}".format(revision)
|
||||
|
||||
out, err = run_cmd(cwd, ['git', 'reset', '--hard', spec],
|
||||
env={'LC_ALL': 'C'})
|
||||
out, err = run_cmd(cwd, ["git", "reset", "--hard", spec], env={"LC_ALL": "C"})
|
||||
return out, err
|
||||
|
||||
|
||||
def checkout(cwd, repo, revision=None):
|
||||
if revision is None:
|
||||
revision = 'HEAD'
|
||||
revision = "HEAD"
|
||||
try:
|
||||
return _checkout(cwd, repo, revision)
|
||||
except dbt.exceptions.CommandResultError as exc:
|
||||
stderr = exc.stderr.decode('utf-8').strip()
|
||||
dbt.exceptions.bad_package_spec(repo, revision, stderr)
|
||||
except CommandResultError as exc:
|
||||
stderr = exc.stderr.decode("utf-8").strip()
|
||||
bad_package_spec(repo, revision, stderr)
|
||||
|
||||
|
||||
def get_current_sha(cwd):
|
||||
out, err = run_cmd(cwd, ['git', 'rev-parse', 'HEAD'], env={'LC_ALL': 'C'})
|
||||
out, err = run_cmd(cwd, ["git", "rev-parse", "HEAD"], env={"LC_ALL": "C"})
|
||||
|
||||
return out.decode('utf-8')
|
||||
return out.decode("utf-8")
|
||||
|
||||
|
||||
def remove_remote(cwd):
|
||||
return run_cmd(cwd, ['git', 'remote', 'rm', 'origin'], env={'LC_ALL': 'C'})
|
||||
return run_cmd(cwd, ["git", "remote", "rm", "origin"], env={"LC_ALL": "C"})
|
||||
|
||||
|
||||
def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
|
||||
revision=None, subdirectory=None):
|
||||
def clone_and_checkout(
|
||||
repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None
|
||||
):
|
||||
exists = None
|
||||
try:
|
||||
_, err = clone(
|
||||
@@ -131,14 +141,11 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
|
||||
remove_git_dir=remove_git_dir,
|
||||
subdirectory=subdirectory,
|
||||
)
|
||||
except dbt.exceptions.CommandResultError as exc:
|
||||
err = exc.stderr.decode('utf-8')
|
||||
except CommandResultError as exc:
|
||||
err = exc.stderr.decode("utf-8")
|
||||
exists = re.match("fatal: destination path '(.+)' already exists", err)
|
||||
if not exists:
|
||||
print(
|
||||
'\nSomething went wrong while cloning {}'.format(repo) +
|
||||
'\nCheck the debug logs for more information')
|
||||
raise
|
||||
raise_git_cloning_problem(repo)
|
||||
|
||||
directory = None
|
||||
start_sha = None
|
||||
@@ -146,11 +153,9 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
|
||||
directory = exists.group(1)
|
||||
fire_event(GitProgressUpdatingExistingDependency(dir=directory))
|
||||
else:
|
||||
matches = re.match("Cloning into '(.+)'", err.decode('utf-8'))
|
||||
matches = re.match("Cloning into '(.+)'", err.decode("utf-8"))
|
||||
if matches is None:
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
f'Error cloning {repo} - never saw "Cloning into ..." from git'
|
||||
)
|
||||
raise RuntimeException(f'Error cloning {repo} - never saw "Cloning into ..." from git')
|
||||
directory = matches.group(1)
|
||||
fire_event(GitProgressPullingNewDependency(dir=directory))
|
||||
full_path = os.path.join(cwd, directory)
|
||||
@@ -161,9 +166,9 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
|
||||
if start_sha == end_sha:
|
||||
fire_event(GitNothingToDo(sha=start_sha[:7]))
|
||||
else:
|
||||
fire_event(GitProgressUpdatedCheckoutRange(
|
||||
start_sha=start_sha[:7], end_sha=end_sha[:7]
|
||||
))
|
||||
fire_event(
|
||||
GitProgressUpdatedCheckoutRange(start_sha=start_sha[:7], end_sha=end_sha[:7])
|
||||
)
|
||||
else:
|
||||
fire_event(GitProgressCheckedOutAt(end_sha=end_sha[:7]))
|
||||
return os.path.join(directory, subdirectory or '')
|
||||
return os.path.join(directory, subdirectory or "")
|
||||
|
||||
@@ -7,10 +7,7 @@ import threading
|
||||
from ast import literal_eval
|
||||
from contextlib import contextmanager
|
||||
from itertools import chain, islice
|
||||
from typing import (
|
||||
List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple,
|
||||
Callable
|
||||
)
|
||||
from typing import List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple, Callable
|
||||
|
||||
import jinja2
|
||||
import jinja2.ext
|
||||
@@ -20,17 +17,24 @@ import jinja2.parser
|
||||
import jinja2.sandbox
|
||||
|
||||
from dbt.utils import (
|
||||
get_dbt_macro_name, get_docs_macro_name, get_materialization_macro_name,
|
||||
get_test_macro_name, deep_map_render
|
||||
get_dbt_macro_name,
|
||||
get_docs_macro_name,
|
||||
get_materialization_macro_name,
|
||||
get_test_macro_name,
|
||||
deep_map_render,
|
||||
)
|
||||
|
||||
from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag
|
||||
from dbt.contracts.graph.compiled import CompiledGenericTestNode
|
||||
from dbt.contracts.graph.parsed import ParsedGenericTestNode
|
||||
from dbt.exceptions import (
|
||||
InternalException, raise_compiler_error, CompilationException,
|
||||
invalid_materialization_argument, MacroReturn, JinjaRenderingException,
|
||||
UndefinedMacroException
|
||||
InternalException,
|
||||
raise_compiler_error,
|
||||
CompilationException,
|
||||
invalid_materialization_argument,
|
||||
MacroReturn,
|
||||
JinjaRenderingException,
|
||||
UndefinedMacroException,
|
||||
)
|
||||
from dbt import flags
|
||||
|
||||
@@ -40,27 +44,22 @@ def _linecache_inject(source, write):
|
||||
# this is the only reliable way to accomplish this. Obviously, it's
|
||||
# really darn noisy and will fill your temporary directory
|
||||
tmp_file = tempfile.NamedTemporaryFile(
|
||||
prefix='dbt-macro-compiled-',
|
||||
suffix='.py',
|
||||
prefix="dbt-macro-compiled-",
|
||||
suffix=".py",
|
||||
delete=False,
|
||||
mode='w+',
|
||||
encoding='utf-8',
|
||||
mode="w+",
|
||||
encoding="utf-8",
|
||||
)
|
||||
tmp_file.write(source)
|
||||
filename = tmp_file.name
|
||||
else:
|
||||
# `codecs.encode` actually takes a `bytes` as the first argument if
|
||||
# the second argument is 'hex' - mypy does not know this.
|
||||
rnd = codecs.encode(os.urandom(12), 'hex') # type: ignore
|
||||
filename = rnd.decode('ascii')
|
||||
rnd = codecs.encode(os.urandom(12), "hex") # type: ignore
|
||||
filename = rnd.decode("ascii")
|
||||
|
||||
# put ourselves in the cache
|
||||
cache_entry = (
|
||||
len(source),
|
||||
None,
|
||||
[line + '\n' for line in source.splitlines()],
|
||||
filename
|
||||
)
|
||||
cache_entry = (len(source), None, [line + "\n" for line in source.splitlines()], filename)
|
||||
# linecache does in fact have an attribute `cache`, thanks
|
||||
linecache.cache[filename] = cache_entry # type: ignore
|
||||
return filename
|
||||
@@ -73,12 +72,10 @@ class MacroFuzzParser(jinja2.parser.Parser):
|
||||
# modified to fuzz macros defined in the same file. this way
|
||||
# dbt can understand the stack of macros being called.
|
||||
# - @cmcarthur
|
||||
node.name = get_dbt_macro_name(
|
||||
self.parse_assign_target(name_only=True).name)
|
||||
node.name = get_dbt_macro_name(self.parse_assign_target(name_only=True).name)
|
||||
|
||||
self.parse_signature(node)
|
||||
node.body = self.parse_statements(('name:endmacro',),
|
||||
drop_needle=True)
|
||||
node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
|
||||
return node
|
||||
|
||||
|
||||
@@ -94,8 +91,8 @@ class MacroFuzzEnvironment(jinja2.sandbox.SandboxedEnvironment):
|
||||
If the value is 'write', also write the files to disk.
|
||||
WARNING: This can write a ton of data if you aren't careful.
|
||||
"""
|
||||
if filename == '<template>' and flags.MACRO_DEBUGGING:
|
||||
write = flags.MACRO_DEBUGGING == 'write'
|
||||
if filename == "<template>" and flags.MACRO_DEBUGGING:
|
||||
write = flags.MACRO_DEBUGGING == "write"
|
||||
filename = _linecache_inject(source, write)
|
||||
|
||||
return super()._compile(source, filename) # type: ignore
|
||||
@@ -106,7 +103,7 @@ class NativeSandboxEnvironment(MacroFuzzEnvironment):
|
||||
|
||||
|
||||
class TextMarker(str):
|
||||
"""A special native-env marker that indicates that a value is text and is
|
||||
"""A special native-env marker that indicates a value is text and is
|
||||
not to be evaluated. Use this to prevent your numbery-strings from becoming
|
||||
numbers!
|
||||
"""
|
||||
@@ -138,7 +135,7 @@ def quoted_native_concat(nodes):
|
||||
head = list(islice(nodes, 2))
|
||||
|
||||
if not head:
|
||||
return ''
|
||||
return ""
|
||||
|
||||
if len(head) == 1:
|
||||
raw = head[0]
|
||||
@@ -156,13 +153,9 @@ def quoted_native_concat(nodes):
|
||||
except (ValueError, SyntaxError, MemoryError):
|
||||
result = raw
|
||||
if isinstance(raw, BoolMarker) and not isinstance(result, bool):
|
||||
raise JinjaRenderingException(
|
||||
f"Could not convert value '{raw!s}' into type 'bool'"
|
||||
)
|
||||
raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'bool'")
|
||||
if isinstance(raw, NumberMarker) and not _is_number(result):
|
||||
raise JinjaRenderingException(
|
||||
f"Could not convert value '{raw!s}' into type 'number'"
|
||||
)
|
||||
raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'number'")
|
||||
|
||||
return result
|
||||
|
||||
@@ -180,9 +173,7 @@ class NativeSandboxTemplate(jinja2.nativetypes.NativeTemplate): # mypy: ignore
|
||||
vars = dict(*args, **kwargs)
|
||||
|
||||
try:
|
||||
return quoted_native_concat(
|
||||
self.root_render_func(self.new_context(vars))
|
||||
)
|
||||
return quoted_native_concat(self.root_render_func(self.new_context(vars)))
|
||||
except Exception:
|
||||
return self.environment.handle_exception()
|
||||
|
||||
@@ -221,10 +212,10 @@ class BaseMacroGenerator:
|
||||
self.context: Optional[Dict[str, Any]] = context
|
||||
|
||||
def get_template(self):
|
||||
raise NotImplementedError('get_template not implemented!')
|
||||
raise NotImplementedError("get_template not implemented!")
|
||||
|
||||
def get_name(self) -> str:
|
||||
raise NotImplementedError('get_name not implemented!')
|
||||
raise NotImplementedError("get_name not implemented!")
|
||||
|
||||
def get_macro(self):
|
||||
name = self.get_name()
|
||||
@@ -247,9 +238,7 @@ class BaseMacroGenerator:
|
||||
def call_macro(self, *args, **kwargs):
|
||||
# called from __call__ methods
|
||||
if self.context is None:
|
||||
raise InternalException(
|
||||
'Context is still None in call_macro!'
|
||||
)
|
||||
raise InternalException("Context is still None in call_macro!")
|
||||
assert self.context is not None
|
||||
|
||||
macro = self.get_macro()
|
||||
@@ -276,7 +265,7 @@ class MacroStack(threading.local):
|
||||
def pop(self, name):
|
||||
got = self.call_stack.pop()
|
||||
if got != name:
|
||||
raise InternalException(f'popped {got}, expected {name}')
|
||||
raise InternalException(f"popped {got}, expected {name}")
|
||||
|
||||
|
||||
class MacroGenerator(BaseMacroGenerator):
|
||||
@@ -285,7 +274,7 @@ class MacroGenerator(BaseMacroGenerator):
|
||||
macro,
|
||||
context: Optional[Dict[str, Any]] = None,
|
||||
node: Optional[Any] = None,
|
||||
stack: Optional[MacroStack] = None
|
||||
stack: Optional[MacroStack] = None,
|
||||
) -> None:
|
||||
super().__init__(context)
|
||||
self.macro = macro
|
||||
@@ -333,9 +322,7 @@ class MacroGenerator(BaseMacroGenerator):
|
||||
|
||||
|
||||
class QueryStringGenerator(BaseMacroGenerator):
|
||||
def __init__(
|
||||
self, template_str: str, context: Dict[str, Any]
|
||||
) -> None:
|
||||
def __init__(self, template_str: str, context: Dict[str, Any]) -> None:
|
||||
super().__init__(context)
|
||||
self.template_str: str = template_str
|
||||
env = get_environment()
|
||||
@@ -345,7 +332,7 @@ class QueryStringGenerator(BaseMacroGenerator):
|
||||
)
|
||||
|
||||
def get_name(self) -> str:
|
||||
return 'query_comment_macro'
|
||||
return "query_comment_macro"
|
||||
|
||||
def get_template(self):
|
||||
"""Don't use the template cache, we don't have a node"""
|
||||
@@ -356,45 +343,39 @@ class QueryStringGenerator(BaseMacroGenerator):
|
||||
|
||||
|
||||
class MaterializationExtension(jinja2.ext.Extension):
|
||||
tags = ['materialization']
|
||||
tags = ["materialization"]
|
||||
|
||||
def parse(self, parser):
|
||||
node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
|
||||
materialization_name = \
|
||||
parser.parse_assign_target(name_only=True).name
|
||||
materialization_name = parser.parse_assign_target(name_only=True).name
|
||||
|
||||
adapter_name = 'default'
|
||||
adapter_name = "default"
|
||||
node.args = []
|
||||
node.defaults = []
|
||||
|
||||
while parser.stream.skip_if('comma'):
|
||||
while parser.stream.skip_if("comma"):
|
||||
target = parser.parse_assign_target(name_only=True)
|
||||
|
||||
if target.name == 'default':
|
||||
if target.name == "default":
|
||||
pass
|
||||
|
||||
elif target.name == 'adapter':
|
||||
parser.stream.expect('assign')
|
||||
elif target.name == "adapter":
|
||||
parser.stream.expect("assign")
|
||||
value = parser.parse_expression()
|
||||
adapter_name = value.value
|
||||
|
||||
else:
|
||||
invalid_materialization_argument(
|
||||
materialization_name, target.name
|
||||
)
|
||||
invalid_materialization_argument(materialization_name, target.name)
|
||||
|
||||
node.name = get_materialization_macro_name(
|
||||
materialization_name, adapter_name
|
||||
)
|
||||
node.name = get_materialization_macro_name(materialization_name, adapter_name)
|
||||
|
||||
node.body = parser.parse_statements(('name:endmaterialization',),
|
||||
drop_needle=True)
|
||||
node.body = parser.parse_statements(("name:endmaterialization",), drop_needle=True)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
class DocumentationExtension(jinja2.ext.Extension):
|
||||
tags = ['docs']
|
||||
tags = ["docs"]
|
||||
|
||||
def parse(self, parser):
|
||||
node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
|
||||
@@ -403,13 +384,12 @@ class DocumentationExtension(jinja2.ext.Extension):
|
||||
node.args = []
|
||||
node.defaults = []
|
||||
node.name = get_docs_macro_name(docs_name)
|
||||
node.body = parser.parse_statements(('name:enddocs',),
|
||||
drop_needle=True)
|
||||
node.body = parser.parse_statements(("name:enddocs",), drop_needle=True)
|
||||
return node
|
||||
|
||||
|
||||
class TestExtension(jinja2.ext.Extension):
|
||||
tags = ['test']
|
||||
tags = ["test"]
|
||||
|
||||
def parse(self, parser):
|
||||
node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
|
||||
@@ -417,13 +397,12 @@ class TestExtension(jinja2.ext.Extension):
|
||||
|
||||
parser.parse_signature(node)
|
||||
node.name = get_test_macro_name(test_name)
|
||||
node.body = parser.parse_statements(('name:endtest',),
|
||||
drop_needle=True)
|
||||
node.body = parser.parse_statements(("name:endtest",), drop_needle=True)
|
||||
return node
|
||||
|
||||
|
||||
def _is_dunder_name(name):
|
||||
return name.startswith('__') and name.endswith('__')
|
||||
return name.startswith("__") and name.endswith("__")
|
||||
|
||||
|
||||
def create_undefined(node=None):
|
||||
@@ -444,10 +423,9 @@ def create_undefined(node=None):
|
||||
return self
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == 'name' or _is_dunder_name(name):
|
||||
if name == "name" or _is_dunder_name(name):
|
||||
raise AttributeError(
|
||||
"'{}' object has no attribute '{}'"
|
||||
.format(type(self).__name__, name)
|
||||
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
|
||||
)
|
||||
|
||||
self.name = name
|
||||
@@ -458,24 +436,24 @@ def create_undefined(node=None):
|
||||
return self
|
||||
|
||||
def __reduce__(self):
|
||||
raise_compiler_error(f'{self.name} is undefined', node=node)
|
||||
raise_compiler_error(f"{self.name} is undefined", node=node)
|
||||
|
||||
return Undefined
|
||||
|
||||
|
||||
NATIVE_FILTERS: Dict[str, Callable[[Any], Any]] = {
|
||||
'as_text': TextMarker,
|
||||
'as_bool': BoolMarker,
|
||||
'as_native': NativeMarker,
|
||||
'as_number': NumberMarker,
|
||||
"as_text": TextMarker,
|
||||
"as_bool": BoolMarker,
|
||||
"as_native": NativeMarker,
|
||||
"as_number": NumberMarker,
|
||||
}
|
||||
|
||||
|
||||
TEXT_FILTERS: Dict[str, Callable[[Any], Any]] = {
|
||||
'as_text': lambda x: x,
|
||||
'as_bool': lambda x: x,
|
||||
'as_native': lambda x: x,
|
||||
'as_number': lambda x: x,
|
||||
"as_text": lambda x: x,
|
||||
"as_bool": lambda x: x,
|
||||
"as_native": lambda x: x,
|
||||
"as_number": lambda x: x,
|
||||
}
|
||||
|
||||
|
||||
@@ -485,15 +463,15 @@ def get_environment(
|
||||
native: bool = False,
|
||||
) -> jinja2.Environment:
|
||||
args: Dict[str, List[Union[str, Type[jinja2.ext.Extension]]]] = {
|
||||
'extensions': ['jinja2.ext.do']
|
||||
"extensions": ["jinja2.ext.do"]
|
||||
}
|
||||
|
||||
if capture_macros:
|
||||
args['undefined'] = create_undefined(node)
|
||||
args["undefined"] = create_undefined(node)
|
||||
|
||||
args['extensions'].append(MaterializationExtension)
|
||||
args['extensions'].append(DocumentationExtension)
|
||||
args['extensions'].append(TestExtension)
|
||||
args["extensions"].append(MaterializationExtension)
|
||||
args["extensions"].append(DocumentationExtension)
|
||||
args["extensions"].append(TestExtension)
|
||||
|
||||
env_cls: Type[jinja2.Environment]
|
||||
text_filter: Type
|
||||
@@ -556,8 +534,8 @@ def _requote_result(raw_value: str, rendered: str) -> str:
|
||||
elif single_quoted:
|
||||
quote_char = "'"
|
||||
else:
|
||||
quote_char = ''
|
||||
return f'{quote_char}{rendered}{quote_char}'
|
||||
quote_char = ""
|
||||
return f"{quote_char}{rendered}{quote_char}"
|
||||
|
||||
|
||||
# performance note: Local benmcharking (so take it with a big grain of salt!)
|
||||
@@ -565,7 +543,7 @@ def _requote_result(raw_value: str, rendered: str) -> str:
|
||||
# checking two separate patterns, but the standard deviation is smaller with
|
||||
# one pattern. The time difference between the two was ~2 std deviations, which
|
||||
# is small enough that I've just chosen the more readable option.
|
||||
_HAS_RENDER_CHARS_PAT = re.compile(r'({[{%#]|[#}%]})')
|
||||
_HAS_RENDER_CHARS_PAT = re.compile(r"({[{%#]|[#}%]})")
|
||||
|
||||
|
||||
def get_rendered(
|
||||
@@ -581,11 +559,7 @@ def get_rendered(
|
||||
# If this is desirable in the native env as well, we could handle the
|
||||
# native=True case by passing the input string to ast.literal_eval, like
|
||||
# the native renderer does.
|
||||
if (
|
||||
not native and
|
||||
isinstance(string, str) and
|
||||
_HAS_RENDER_CHARS_PAT.search(string) is None
|
||||
):
|
||||
if not native and isinstance(string, str) and _HAS_RENDER_CHARS_PAT.search(string) is None:
|
||||
return string
|
||||
template = get_template(
|
||||
string,
|
||||
@@ -606,7 +580,7 @@ def extract_toplevel_blocks(
|
||||
allowed_blocks: Optional[Set[str]] = None,
|
||||
collect_raw_data: bool = True,
|
||||
) -> List[Union[BlockData, BlockTag]]:
|
||||
"""Extract the top level blocks with matching block types from a jinja
|
||||
"""Extract the top-level blocks with matching block types from a jinja
|
||||
file, with some special handling for block nesting.
|
||||
|
||||
:param data: The data to extract blocks from.
|
||||
@@ -621,12 +595,11 @@ def extract_toplevel_blocks(
|
||||
`collect_raw_data` is `True`) `BlockData` objects.
|
||||
"""
|
||||
return BlockIterator(data).lex_for_blocks(
|
||||
allowed_blocks=allowed_blocks,
|
||||
collect_raw_data=collect_raw_data
|
||||
allowed_blocks=allowed_blocks, collect_raw_data=collect_raw_data
|
||||
)
|
||||
|
||||
|
||||
GENERIC_TEST_KWARGS_NAME = '_dbt_generic_test_kwargs'
|
||||
GENERIC_TEST_KWARGS_NAME = "_dbt_generic_test_kwargs"
|
||||
|
||||
|
||||
def add_rendered_test_kwargs(
|
||||
@@ -638,25 +611,20 @@ def add_rendered_test_kwargs(
|
||||
renderer, then insert that value into the given context as the special test
|
||||
keyword arguments member.
|
||||
"""
|
||||
looks_like_func = r'^\s*(env_var|ref|var|source|doc)\s*\(.+\)\s*$'
|
||||
looks_like_func = r"^\s*(env_var|ref|var|source|doc)\s*\(.+\)\s*$"
|
||||
|
||||
def _convert_function(
|
||||
value: Any, keypath: Tuple[Union[str, int], ...]
|
||||
) -> Any:
|
||||
def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any:
|
||||
if isinstance(value, str):
|
||||
if keypath == ('column_name',):
|
||||
if keypath == ("column_name",):
|
||||
# special case: Don't render column names as native, make them
|
||||
# be strings
|
||||
return value
|
||||
|
||||
if re.match(looks_like_func, value) is not None:
|
||||
# curly braces to make rendering happy
|
||||
value = f'{{{{ {value} }}}}'
|
||||
value = f"{{{{ {value} }}}}"
|
||||
|
||||
value = get_rendered(
|
||||
value, context, node, capture_macros=capture_macros,
|
||||
native=True
|
||||
)
|
||||
value = get_rendered(value, context, node, capture_macros=capture_macros, native=True)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
@@ -8,11 +8,11 @@ def statically_extract_macro_calls(string, ctx, db_wrapper=None):
|
||||
env = get_environment(None, capture_macros=True)
|
||||
parsed = env.parse(string)
|
||||
|
||||
standard_calls = ['source', 'ref', 'config']
|
||||
standard_calls = ["source", "ref", "config"]
|
||||
possible_macro_calls = []
|
||||
for func_call in parsed.find_all(jinja2.nodes.Call):
|
||||
func_name = None
|
||||
if hasattr(func_call, 'node') and hasattr(func_call.node, 'name'):
|
||||
if hasattr(func_call, "node") and hasattr(func_call.node, "name"):
|
||||
func_name = func_call.node.name
|
||||
else:
|
||||
# func_call for dbt_utils.current_timestamp macro
|
||||
@@ -30,22 +30,25 @@ def statically_extract_macro_calls(string, ctx, db_wrapper=None):
|
||||
# dyn_args=None,
|
||||
# dyn_kwargs=None
|
||||
# )
|
||||
if (hasattr(func_call, 'node') and
|
||||
hasattr(func_call.node, 'node') and
|
||||
type(func_call.node.node).__name__ == 'Name' and
|
||||
hasattr(func_call.node, 'attr')):
|
||||
if (
|
||||
hasattr(func_call, "node")
|
||||
and hasattr(func_call.node, "node")
|
||||
and type(func_call.node.node).__name__ == "Name"
|
||||
and hasattr(func_call.node, "attr")
|
||||
):
|
||||
package_name = func_call.node.node.name
|
||||
macro_name = func_call.node.attr
|
||||
if package_name == 'adapter':
|
||||
if macro_name == 'dispatch':
|
||||
if package_name == "adapter":
|
||||
if macro_name == "dispatch":
|
||||
ad_macro_calls = statically_parse_adapter_dispatch(
|
||||
func_call, ctx, db_wrapper)
|
||||
func_call, ctx, db_wrapper
|
||||
)
|
||||
possible_macro_calls.extend(ad_macro_calls)
|
||||
else:
|
||||
# This skips calls such as adapter.parse_index
|
||||
continue
|
||||
else:
|
||||
func_name = f'{package_name}.{macro_name}'
|
||||
func_name = f"{package_name}.{macro_name}"
|
||||
else:
|
||||
continue
|
||||
if not func_name:
|
||||
@@ -108,40 +111,41 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper):
|
||||
# keyword arguments
|
||||
if func_call.kwargs:
|
||||
for kwarg in func_call.kwargs:
|
||||
if kwarg.key == 'macro_name':
|
||||
if kwarg.key == "macro_name":
|
||||
# This will remain to enable static resolution
|
||||
if type(kwarg.value).__name__ == 'Const':
|
||||
if type(kwarg.value).__name__ == "Const":
|
||||
func_name = kwarg.value.value
|
||||
possible_macro_calls.append(func_name)
|
||||
else:
|
||||
raise_compiler_error(f"The macro_name parameter ({kwarg.value.value}) "
|
||||
"to adapter.dispatch was not a string")
|
||||
elif kwarg.key == 'macro_namespace':
|
||||
raise_compiler_error(
|
||||
f"The macro_name parameter ({kwarg.value.value}) "
|
||||
"to adapter.dispatch was not a string"
|
||||
)
|
||||
elif kwarg.key == "macro_namespace":
|
||||
# This will remain to enable static resolution
|
||||
kwarg_type = type(kwarg.value).__name__
|
||||
if kwarg_type == 'Const':
|
||||
if kwarg_type == "Const":
|
||||
macro_namespace = kwarg.value.value
|
||||
else:
|
||||
raise_compiler_error("The macro_namespace parameter to adapter.dispatch "
|
||||
f"is a {kwarg_type}, not a string")
|
||||
raise_compiler_error(
|
||||
"The macro_namespace parameter to adapter.dispatch "
|
||||
f"is a {kwarg_type}, not a string"
|
||||
)
|
||||
|
||||
# positional arguments
|
||||
if packages_arg:
|
||||
if packages_arg_type == 'List':
|
||||
if packages_arg_type == "List":
|
||||
# This will remain to enable static resolution
|
||||
packages = []
|
||||
for item in packages_arg.items:
|
||||
packages.append(item.value)
|
||||
elif packages_arg_type == 'Const':
|
||||
elif packages_arg_type == "Const":
|
||||
# This will remain to enable static resolution
|
||||
macro_namespace = packages_arg.value
|
||||
|
||||
if db_wrapper:
|
||||
macro = db_wrapper.dispatch(
|
||||
func_name,
|
||||
macro_namespace=macro_namespace
|
||||
).macro
|
||||
func_name = f'{macro.package_name}.{macro.name}'
|
||||
macro = db_wrapper.dispatch(func_name, macro_namespace=macro_namespace).macro
|
||||
func_name = f"{macro.package_name}.{macro.name}"
|
||||
possible_macro_calls.append(func_name)
|
||||
else: # this is only for test/unit/test_macro_calls.py
|
||||
if macro_namespace:
|
||||
@@ -149,6 +153,6 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper):
|
||||
else:
|
||||
packages = []
|
||||
for package_name in packages:
|
||||
possible_macro_calls.append(f'{package_name}.{func_name}')
|
||||
possible_macro_calls.append(f"{package_name}.{func_name}")
|
||||
|
||||
return possible_macro_calls
|
||||
|
||||
@@ -1,25 +1,22 @@
|
||||
import functools
|
||||
import requests
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
RegistryProgressMakingGETRequest,
|
||||
RegistryProgressGETResponse
|
||||
)
|
||||
from dbt.events.types import RegistryProgressMakingGETRequest, RegistryProgressGETResponse
|
||||
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
|
||||
from dbt import deprecations
|
||||
import os
|
||||
|
||||
if os.getenv('DBT_PACKAGE_HUB_URL'):
|
||||
DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')
|
||||
if os.getenv("DBT_PACKAGE_HUB_URL"):
|
||||
DEFAULT_REGISTRY_BASE_URL = os.getenv("DBT_PACKAGE_HUB_URL")
|
||||
else:
|
||||
DEFAULT_REGISTRY_BASE_URL = 'https://hub.getdbt.com/'
|
||||
DEFAULT_REGISTRY_BASE_URL = "https://hub.getdbt.com/"
|
||||
|
||||
|
||||
def _get_url(url, registry_base_url=None):
|
||||
if registry_base_url is None:
|
||||
registry_base_url = DEFAULT_REGISTRY_BASE_URL
|
||||
|
||||
return '{}{}'.format(registry_base_url, url)
|
||||
return "{}{}".format(registry_base_url, url)
|
||||
|
||||
|
||||
def _get_with_retries(path, registry_base_url=None):
|
||||
@@ -33,54 +30,59 @@ def _get(path, registry_base_url=None):
|
||||
resp = requests.get(url, timeout=30)
|
||||
fire_event(RegistryProgressGETResponse(url=url, resp_code=resp.status_code))
|
||||
resp.raise_for_status()
|
||||
if resp is None:
|
||||
|
||||
# It is unexpected for the content of the response to be None so if it is, raising this error
|
||||
# will cause this function to retry (if called within _get_with_retries) and hopefully get
|
||||
# a response. This seems to happen when there's an issue with the Hub.
|
||||
# See https://github.com/dbt-labs/dbt-core/issues/4577
|
||||
if resp.json() is None:
|
||||
raise requests.exceptions.ContentDecodingError(
|
||||
'Request error: The response is None', response=resp
|
||||
"Request error: The response is None", response=resp
|
||||
)
|
||||
return resp.json()
|
||||
|
||||
|
||||
def index(registry_base_url=None):
|
||||
return _get_with_retries('api/v1/index.json', registry_base_url)
|
||||
return _get_with_retries("api/v1/index.json", registry_base_url)
|
||||
|
||||
|
||||
index_cached = memoized(index)
|
||||
|
||||
|
||||
def packages(registry_base_url=None):
|
||||
return _get_with_retries('api/v1/packages.json', registry_base_url)
|
||||
return _get_with_retries("api/v1/packages.json", registry_base_url)
|
||||
|
||||
|
||||
def package(name, registry_base_url=None):
|
||||
response = _get_with_retries('api/v1/{}.json'.format(name), registry_base_url)
|
||||
response = _get_with_retries("api/v1/{}.json".format(name), registry_base_url)
|
||||
|
||||
# Either redirectnamespace or redirectname in the JSON response indicate a redirect
|
||||
# redirectnamespace redirects based on package ownership
|
||||
# redirectname redirects based on package name
|
||||
# Both can be present at the same time, or neither. Fails gracefully to old name
|
||||
|
||||
if ('redirectnamespace' in response) or ('redirectname' in response):
|
||||
if ("redirectnamespace" in response) or ("redirectname" in response):
|
||||
|
||||
if ('redirectnamespace' in response) and response['redirectnamespace'] is not None:
|
||||
use_namespace = response['redirectnamespace']
|
||||
if ("redirectnamespace" in response) and response["redirectnamespace"] is not None:
|
||||
use_namespace = response["redirectnamespace"]
|
||||
else:
|
||||
use_namespace = response['namespace']
|
||||
use_namespace = response["namespace"]
|
||||
|
||||
if ('redirectname' in response) and response['redirectname'] is not None:
|
||||
use_name = response['redirectname']
|
||||
if ("redirectname" in response) and response["redirectname"] is not None:
|
||||
use_name = response["redirectname"]
|
||||
else:
|
||||
use_name = response['name']
|
||||
use_name = response["name"]
|
||||
|
||||
new_nwo = use_namespace + "/" + use_name
|
||||
deprecations.warn('package-redirect', old_name=name, new_name=new_nwo)
|
||||
deprecations.warn("package-redirect", old_name=name, new_name=new_nwo)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def package_version(name, version, registry_base_url=None):
|
||||
return _get_with_retries('api/v1/{}/{}.json'.format(name, version), registry_base_url)
|
||||
return _get_with_retries("api/v1/{}/{}.json".format(name, version), registry_base_url)
|
||||
|
||||
|
||||
def get_available_versions(name):
|
||||
response = package(name)
|
||||
return list(response['versions'])
|
||||
return list(response["versions"])
|
||||
|
||||
@@ -11,19 +11,21 @@ import sys
|
||||
import tarfile
|
||||
import requests
|
||||
import stat
|
||||
from typing import (
|
||||
Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union
|
||||
)
|
||||
from typing import Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union
|
||||
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
SystemErrorRetrievingModTime, SystemCouldNotWrite, SystemExecutingCmd, SystemStdOutMsg,
|
||||
SystemStdErrMsg, SystemReportReturnCode
|
||||
SystemErrorRetrievingModTime,
|
||||
SystemCouldNotWrite,
|
||||
SystemExecutingCmd,
|
||||
SystemStdOutMsg,
|
||||
SystemStdErrMsg,
|
||||
SystemReportReturnCode,
|
||||
)
|
||||
import dbt.exceptions
|
||||
from dbt.utils import _connection_exception_retry as connection_exception_retry
|
||||
|
||||
if sys.platform == 'win32':
|
||||
if sys.platform == "win32":
|
||||
from ctypes import WinDLL, c_bool
|
||||
else:
|
||||
WinDLL = None
|
||||
@@ -55,36 +57,35 @@ def find_matching(
|
||||
reobj = re.compile(regex, re.IGNORECASE)
|
||||
|
||||
for relative_path_to_search in relative_paths_to_search:
|
||||
absolute_path_to_search = os.path.join(
|
||||
root_path, relative_path_to_search)
|
||||
absolute_path_to_search = os.path.join(root_path, relative_path_to_search)
|
||||
walk_results = os.walk(absolute_path_to_search)
|
||||
|
||||
for current_path, subdirectories, local_files in walk_results:
|
||||
for local_file in local_files:
|
||||
absolute_path = os.path.join(current_path, local_file)
|
||||
relative_path = os.path.relpath(
|
||||
absolute_path, absolute_path_to_search
|
||||
)
|
||||
relative_path = os.path.relpath(absolute_path, absolute_path_to_search)
|
||||
modification_time = 0.0
|
||||
try:
|
||||
modification_time = os.path.getmtime(absolute_path)
|
||||
except OSError:
|
||||
fire_event(SystemErrorRetrievingModTime(path=absolute_path))
|
||||
if reobj.match(local_file):
|
||||
matching.append({
|
||||
'searched_path': relative_path_to_search,
|
||||
'absolute_path': absolute_path,
|
||||
'relative_path': relative_path,
|
||||
'modification_time': modification_time,
|
||||
})
|
||||
matching.append(
|
||||
{
|
||||
"searched_path": relative_path_to_search,
|
||||
"absolute_path": absolute_path,
|
||||
"relative_path": relative_path,
|
||||
"modification_time": modification_time,
|
||||
}
|
||||
)
|
||||
|
||||
return matching
|
||||
|
||||
|
||||
def load_file_contents(path: str, strip: bool = True) -> str:
|
||||
path = convert_path(path)
|
||||
with open(path, 'rb') as handle:
|
||||
to_return = handle.read().decode('utf-8')
|
||||
with open(path, "rb") as handle:
|
||||
to_return = handle.read().decode("utf-8")
|
||||
|
||||
if strip:
|
||||
to_return = to_return.strip()
|
||||
@@ -111,14 +112,14 @@ def make_directory(path: str) -> None:
|
||||
raise e
|
||||
|
||||
|
||||
def make_file(path: str, contents: str = '', overwrite: bool = False) -> bool:
|
||||
def make_file(path: str, contents: str = "", overwrite: bool = False) -> bool:
|
||||
"""
|
||||
Make a file at `path` assuming that the directory it resides in already
|
||||
exists. The file is saved with contents `contents`
|
||||
"""
|
||||
if overwrite or not os.path.exists(path):
|
||||
path = convert_path(path)
|
||||
with open(path, 'w') as fh:
|
||||
with open(path, "w") as fh:
|
||||
fh.write(contents)
|
||||
return True
|
||||
|
||||
@@ -130,7 +131,7 @@ def make_symlink(source: str, link_path: str) -> None:
|
||||
Create a symlink at `link_path` referring to `source`.
|
||||
"""
|
||||
if not supports_symlinks():
|
||||
dbt.exceptions.system_error('create a symbolic link')
|
||||
dbt.exceptions.system_error("create a symbolic link")
|
||||
|
||||
os.symlink(source, link_path)
|
||||
|
||||
@@ -139,11 +140,11 @@ def supports_symlinks() -> bool:
|
||||
return getattr(os, "symlink", None) is not None
|
||||
|
||||
|
||||
def write_file(path: str, contents: str = '') -> bool:
|
||||
def write_file(path: str, contents: str = "") -> bool:
|
||||
path = convert_path(path)
|
||||
try:
|
||||
make_directory(os.path.dirname(path))
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
f.write(str(contents))
|
||||
except Exception as exc:
|
||||
# note that you can't just catch FileNotFound, because sometimes
|
||||
@@ -152,15 +153,15 @@ def write_file(path: str, contents: str = '') -> bool:
|
||||
# sometimes windows fails to write paths that are less than the length
|
||||
# limit. So on windows, suppress all errors that happen from writing
|
||||
# to disk.
|
||||
if os.name == 'nt':
|
||||
if os.name == "nt":
|
||||
# sometimes we get a winerror of 3 which means the path was
|
||||
# definitely too long, but other times we don't and it means the
|
||||
# path was just probably too long. This is probably based on the
|
||||
# windows/python version.
|
||||
if getattr(exc, 'winerror', 0) == 3:
|
||||
reason = 'Path was too long'
|
||||
if getattr(exc, "winerror", 0) == 3:
|
||||
reason = "Path was too long"
|
||||
else:
|
||||
reason = 'Path was possibly too long'
|
||||
reason = "Path was possibly too long"
|
||||
# all our hard work and the path was still too long. Log and
|
||||
# continue.
|
||||
fire_event(SystemCouldNotWrite(path=path, reason=reason, exc=exc))
|
||||
@@ -177,9 +178,7 @@ def write_json(path: str, data: Dict[str, Any]) -> bool:
|
||||
return write_file(path, json.dumps(data, cls=dbt.utils.JSONEncoder))
|
||||
|
||||
|
||||
def _windows_rmdir_readonly(
|
||||
func: Callable[[str], Any], path: str, exc: Tuple[Any, OSError, Any]
|
||||
):
|
||||
def _windows_rmdir_readonly(func: Callable[[str], Any], path: str, exc: Tuple[Any, OSError, Any]):
|
||||
exception_val = exc[1]
|
||||
if exception_val.errno == errno.EACCES:
|
||||
os.chmod(path, stat.S_IWUSR)
|
||||
@@ -196,10 +195,7 @@ def resolve_path_from_base(path_to_resolve: str, base_path: str) -> str:
|
||||
If path_to_resolve is an absolute path or a user path (~), just
|
||||
resolve it to an absolute path and return.
|
||||
"""
|
||||
return os.path.abspath(
|
||||
os.path.join(
|
||||
base_path,
|
||||
os.path.expanduser(path_to_resolve)))
|
||||
return os.path.abspath(os.path.join(base_path, os.path.expanduser(path_to_resolve)))
|
||||
|
||||
|
||||
def rmdir(path: str) -> None:
|
||||
@@ -209,7 +205,7 @@ def rmdir(path: str) -> None:
|
||||
cloned via git) can cause rmtree to throw a PermissionError exception
|
||||
"""
|
||||
path = convert_path(path)
|
||||
if sys.platform == 'win32':
|
||||
if sys.platform == "win32":
|
||||
onerror = _windows_rmdir_readonly
|
||||
else:
|
||||
onerror = None
|
||||
@@ -228,7 +224,7 @@ def _win_prepare_path(path: str) -> str:
|
||||
# letter back in.
|
||||
# Unless it starts with '\\'. In that case, the path is a UNC mount point
|
||||
# and splitdrive will be fine.
|
||||
if not path.startswith('\\\\') and path.startswith('\\'):
|
||||
if not path.startswith("\\\\") and path.startswith("\\"):
|
||||
curdrive = os.path.splitdrive(os.getcwd())[0]
|
||||
path = curdrive + path
|
||||
|
||||
@@ -243,7 +239,7 @@ def _win_prepare_path(path: str) -> str:
|
||||
|
||||
|
||||
def _supports_long_paths() -> bool:
|
||||
if sys.platform != 'win32':
|
||||
if sys.platform != "win32":
|
||||
return True
|
||||
# Eryk Sun says to use `WinDLL('ntdll')` instead of `windll.ntdll` because
|
||||
# of pointer caching in a comment here:
|
||||
@@ -251,11 +247,11 @@ def _supports_long_paths() -> bool:
|
||||
# I don't know exaclty what he means, but I am inclined to believe him as
|
||||
# he's pretty active on Python windows bugs!
|
||||
try:
|
||||
dll = WinDLL('ntdll')
|
||||
dll = WinDLL("ntdll")
|
||||
except OSError: # I don't think this happens? you need ntdll to run python
|
||||
return False
|
||||
# not all windows versions have it at all
|
||||
if not hasattr(dll, 'RtlAreLongPathsEnabled'):
|
||||
if not hasattr(dll, "RtlAreLongPathsEnabled"):
|
||||
return False
|
||||
# tell windows we want to get back a single unsigned byte (a bool).
|
||||
dll.RtlAreLongPathsEnabled.restype = c_bool
|
||||
@@ -275,7 +271,7 @@ def convert_path(path: str) -> str:
|
||||
if _supports_long_paths():
|
||||
return path
|
||||
|
||||
prefix = '\\\\?\\'
|
||||
prefix = "\\\\?\\"
|
||||
# Nothing to do
|
||||
if path.startswith(prefix):
|
||||
return path
|
||||
@@ -306,44 +302,40 @@ def path_is_symlink(path: str) -> bool:
|
||||
|
||||
def open_dir_cmd() -> str:
|
||||
# https://docs.python.org/2/library/sys.html#sys.platform
|
||||
if sys.platform == 'win32':
|
||||
return 'start'
|
||||
if sys.platform == "win32":
|
||||
return "start"
|
||||
|
||||
elif sys.platform == 'darwin':
|
||||
return 'open'
|
||||
elif sys.platform == "darwin":
|
||||
return "open"
|
||||
|
||||
else:
|
||||
return 'xdg-open'
|
||||
return "xdg-open"
|
||||
|
||||
|
||||
def _handle_posix_cwd_error(
|
||||
exc: OSError, cwd: str, cmd: List[str]
|
||||
) -> NoReturn:
|
||||
def _handle_posix_cwd_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
if exc.errno == errno.ENOENT:
|
||||
message = 'Directory does not exist'
|
||||
message = "Directory does not exist"
|
||||
elif exc.errno == errno.EACCES:
|
||||
message = 'Current user cannot access directory, check permissions'
|
||||
message = "Current user cannot access directory, check permissions"
|
||||
elif exc.errno == errno.ENOTDIR:
|
||||
message = 'Not a directory'
|
||||
message = "Not a directory"
|
||||
else:
|
||||
message = 'Unknown OSError: {} - cwd'.format(str(exc))
|
||||
message = "Unknown OSError: {} - cwd".format(str(exc))
|
||||
raise dbt.exceptions.WorkingDirectoryError(cwd, cmd, message)
|
||||
|
||||
|
||||
def _handle_posix_cmd_error(
|
||||
exc: OSError, cwd: str, cmd: List[str]
|
||||
) -> NoReturn:
|
||||
def _handle_posix_cmd_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
if exc.errno == errno.ENOENT:
|
||||
message = "Could not find command, ensure it is in the user's PATH"
|
||||
elif exc.errno == errno.EACCES:
|
||||
message = 'User does not have permissions for this command'
|
||||
message = "User does not have permissions for this command"
|
||||
else:
|
||||
message = 'Unknown OSError: {} - cmd'.format(str(exc))
|
||||
message = "Unknown OSError: {} - cmd".format(str(exc))
|
||||
raise dbt.exceptions.ExecutableError(cwd, cmd, message)
|
||||
|
||||
|
||||
def _handle_posix_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
"""OSError handling for posix systems.
|
||||
"""OSError handling for POSIX systems.
|
||||
|
||||
Some things that could happen to trigger an OSError:
|
||||
- cwd could not exist
|
||||
@@ -363,7 +355,7 @@ def _handle_posix_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
- exc.errno == EACCES
|
||||
- exc.filename == None(?)
|
||||
"""
|
||||
if getattr(exc, 'filename', None) == cwd:
|
||||
if getattr(exc, "filename", None) == cwd:
|
||||
_handle_posix_cwd_error(exc, cwd, cmd)
|
||||
else:
|
||||
_handle_posix_cmd_error(exc, cwd, cmd)
|
||||
@@ -372,45 +364,45 @@ def _handle_posix_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
def _handle_windows_error(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
cls: Type[dbt.exceptions.Exception] = dbt.exceptions.CommandError
|
||||
if exc.errno == errno.ENOENT:
|
||||
message = ("Could not find command, ensure it is in the user's PATH "
|
||||
"and that the user has permissions to run it")
|
||||
message = (
|
||||
"Could not find command, ensure it is in the user's PATH "
|
||||
"and that the user has permissions to run it"
|
||||
)
|
||||
cls = dbt.exceptions.ExecutableError
|
||||
elif exc.errno == errno.ENOEXEC:
|
||||
message = ('Command was not executable, ensure it is valid')
|
||||
message = "Command was not executable, ensure it is valid"
|
||||
cls = dbt.exceptions.ExecutableError
|
||||
elif exc.errno == errno.ENOTDIR:
|
||||
message = ('Unable to cd: path does not exist, user does not have'
|
||||
' permissions, or not a directory')
|
||||
message = (
|
||||
"Unable to cd: path does not exist, user does not have"
|
||||
" permissions, or not a directory"
|
||||
)
|
||||
cls = dbt.exceptions.WorkingDirectoryError
|
||||
else:
|
||||
message = 'Unknown error: {} (errno={}: "{}")'.format(
|
||||
str(exc), exc.errno, errno.errorcode.get(exc.errno, '<Unknown!>')
|
||||
str(exc), exc.errno, errno.errorcode.get(exc.errno, "<Unknown!>")
|
||||
)
|
||||
raise cls(cwd, cmd, message)
|
||||
|
||||
|
||||
def _interpret_oserror(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
|
||||
"""Interpret an OSError exc and raise the appropriate dbt exception.
|
||||
|
||||
"""
|
||||
"""Interpret an OSError exception and raise the appropriate dbt exception."""
|
||||
if len(cmd) == 0:
|
||||
raise dbt.exceptions.CommandError(cwd, cmd)
|
||||
|
||||
# all of these functions raise unconditionally
|
||||
if os.name == 'nt':
|
||||
if os.name == "nt":
|
||||
_handle_windows_error(exc, cwd, cmd)
|
||||
else:
|
||||
_handle_posix_error(exc, cwd, cmd)
|
||||
|
||||
# this should not be reachable, raise _something_ at least!
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Unhandled exception in _interpret_oserror: {}'.format(exc)
|
||||
"Unhandled exception in _interpret_oserror: {}".format(exc)
|
||||
)
|
||||
|
||||
|
||||
def run_cmd(
|
||||
cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None
|
||||
) -> Tuple[bytes, bytes]:
|
||||
def run_cmd(cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None) -> Tuple[bytes, bytes]:
|
||||
fire_event(SystemExecutingCmd(cmd=cmd))
|
||||
if len(cmd) == 0:
|
||||
raise dbt.exceptions.CommandError(cwd, cmd)
|
||||
@@ -427,11 +419,8 @@ def run_cmd(
|
||||
if exe_pth:
|
||||
cmd = [os.path.abspath(exe_pth)] + list(cmd[1:])
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=full_env)
|
||||
cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=full_env
|
||||
)
|
||||
|
||||
out, err = proc.communicate()
|
||||
except OSError as exc:
|
||||
@@ -442,8 +431,7 @@ def run_cmd(
|
||||
|
||||
if proc.returncode != 0:
|
||||
fire_event(SystemReportReturnCode(returncode=proc.returncode))
|
||||
raise dbt.exceptions.CommandResultError(cwd, cmd, proc.returncode,
|
||||
out, err)
|
||||
raise dbt.exceptions.CommandResultError(cwd, cmd, proc.returncode, out, err)
|
||||
|
||||
return out, err
|
||||
|
||||
@@ -455,13 +443,11 @@ def download_with_retries(
|
||||
connection_exception_retry(download_fn, 5)
|
||||
|
||||
|
||||
def download(
|
||||
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
|
||||
) -> None:
|
||||
def download(url: str, path: str, timeout: Optional[Union[float, tuple]] = None) -> None:
|
||||
path = convert_path(path)
|
||||
connection_timeout = timeout or float(os.getenv('DBT_HTTP_TIMEOUT', 10))
|
||||
connection_timeout = timeout or float(os.getenv("DBT_HTTP_TIMEOUT", 10))
|
||||
response = requests.get(url, timeout=connection_timeout)
|
||||
with open(path, 'wb') as handle:
|
||||
with open(path, "wb") as handle:
|
||||
for block in response.iter_content(1024 * 64):
|
||||
handle.write(block)
|
||||
|
||||
@@ -480,12 +466,10 @@ def rename(from_path: str, to_path: str, force: bool = False) -> None:
|
||||
shutil.move(from_path, to_path)
|
||||
|
||||
|
||||
def untar_package(
|
||||
tar_path: str, dest_dir: str, rename_to: Optional[str] = None
|
||||
) -> None:
|
||||
def untar_package(tar_path: str, dest_dir: str, rename_to: Optional[str] = None) -> None:
|
||||
tar_path = convert_path(tar_path)
|
||||
tar_dir_name = None
|
||||
with tarfile.open(tar_path, 'r') as tarball:
|
||||
with tarfile.open(tar_path, "r:gz") as tarball:
|
||||
tarball.extractall(dest_dir)
|
||||
tar_dir_name = os.path.commonprefix(tarball.getnames())
|
||||
if rename_to:
|
||||
@@ -501,7 +485,7 @@ def chmod_and_retry(func, path, exc_info):
|
||||
We want to retry most operations here, but listdir is one that we know will
|
||||
be useless.
|
||||
"""
|
||||
if func is os.listdir or os.name != 'nt':
|
||||
if func is os.listdir or os.name != "nt":
|
||||
raise
|
||||
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
|
||||
# on error,this will raise.
|
||||
@@ -517,12 +501,12 @@ def move(src, dst):
|
||||
directory on windows when it has read-only files in it and the move is
|
||||
between two drives.
|
||||
|
||||
This is almost identical to the real shutil.move, except it uses our rmtree
|
||||
This is almost identical to the real shutil.move, except it, uses our rmtree
|
||||
and skips handling non-windows OSes since the existing one works ok there.
|
||||
"""
|
||||
src = convert_path(src)
|
||||
dst = convert_path(dst)
|
||||
if os.name != 'nt':
|
||||
if os.name != "nt":
|
||||
return shutil.move(src, dst)
|
||||
|
||||
if os.path.isdir(dst):
|
||||
@@ -530,7 +514,7 @@ def move(src, dst):
|
||||
os.rename(src, dst)
|
||||
return
|
||||
|
||||
dst = os.path.join(dst, os.path.basename(src.rstrip('/\\')))
|
||||
dst = os.path.join(dst, os.path.basename(src.rstrip("/\\")))
|
||||
if os.path.exists(dst):
|
||||
raise EnvironmentError("Path '{}' already exists".format(dst))
|
||||
|
||||
@@ -539,11 +523,10 @@ def move(src, dst):
|
||||
except OSError:
|
||||
# probably different drives
|
||||
if os.path.isdir(src):
|
||||
if _absnorm(dst + '\\').startswith(_absnorm(src + '\\')):
|
||||
if _absnorm(dst + "\\").startswith(_absnorm(src + "\\")):
|
||||
# dst is inside src
|
||||
raise EnvironmentError(
|
||||
"Cannot move a directory '{}' into itself '{}'"
|
||||
.format(src, dst)
|
||||
"Cannot move a directory '{}' into itself '{}'".format(src, dst)
|
||||
)
|
||||
shutil.copytree(src, dst, symlinks=True)
|
||||
rmtree(src)
|
||||
@@ -553,7 +536,7 @@ def move(src, dst):
|
||||
|
||||
|
||||
def rmtree(path):
|
||||
"""Recursively remove path. On permissions errors on windows, try to remove
|
||||
"""Recursively remove the path. On permissions errors on windows, try to remove
|
||||
the read-only flag and try again.
|
||||
"""
|
||||
path = convert_path(path)
|
||||
|
||||
@@ -4,15 +4,9 @@ import yaml
|
||||
|
||||
# the C version is faster, but it doesn't always exist
|
||||
try:
|
||||
from yaml import (
|
||||
CLoader as Loader,
|
||||
CSafeLoader as SafeLoader,
|
||||
CDumper as Dumper
|
||||
)
|
||||
from yaml import CLoader as Loader, CSafeLoader as SafeLoader, CDumper as Dumper
|
||||
except ImportError:
|
||||
from yaml import ( # type: ignore # noqa: F401
|
||||
Loader, SafeLoader, Dumper
|
||||
)
|
||||
from yaml import Loader, SafeLoader, Dumper # type: ignore # noqa: F401
|
||||
|
||||
|
||||
YAML_ERROR_MESSAGE = """
|
||||
@@ -32,14 +26,12 @@ def line_no(i, line, width=3):
|
||||
|
||||
|
||||
def prefix_with_line_numbers(string, no_start, no_end):
|
||||
line_list = string.split('\n')
|
||||
line_list = string.split("\n")
|
||||
|
||||
numbers = range(no_start, no_end)
|
||||
relevant_lines = line_list[no_start:no_end]
|
||||
|
||||
return "\n".join([
|
||||
line_no(i + 1, line) for (i, line) in zip(numbers, relevant_lines)
|
||||
])
|
||||
return "\n".join([line_no(i + 1, line) for (i, line) in zip(numbers, relevant_lines)])
|
||||
|
||||
|
||||
def contextualized_yaml_error(raw_contents, error):
|
||||
@@ -50,9 +42,9 @@ def contextualized_yaml_error(raw_contents, error):
|
||||
|
||||
nice_error = prefix_with_line_numbers(raw_contents, min_line, max_line)
|
||||
|
||||
return YAML_ERROR_MESSAGE.format(line_number=mark.line + 1,
|
||||
nice_error=nice_error,
|
||||
raw_error=error)
|
||||
return YAML_ERROR_MESSAGE.format(
|
||||
line_number=mark.line + 1, nice_error=nice_error, raw_error=error
|
||||
)
|
||||
|
||||
|
||||
def safe_load(contents) -> Optional[Dict[str, Any]]:
|
||||
@@ -63,7 +55,7 @@ def load_yaml_text(contents):
|
||||
try:
|
||||
return safe_load(contents)
|
||||
except (yaml.scanner.ScannerError, yaml.YAMLError) as e:
|
||||
if hasattr(e, 'problem_mark'):
|
||||
if hasattr(e, "problem_mark"):
|
||||
error = contextualized_yaml_error(contents, e)
|
||||
else:
|
||||
error = str(e)
|
||||
|
||||
@@ -3,13 +3,14 @@ from collections import defaultdict
|
||||
from typing import List, Dict, Any, Tuple, cast, Optional
|
||||
|
||||
import networkx as nx # type: ignore
|
||||
import pickle
|
||||
import sqlparse
|
||||
|
||||
from dbt import flags
|
||||
from dbt.adapters.factory import get_adapter
|
||||
from dbt.clients import jinja
|
||||
from dbt.clients.system import make_directory
|
||||
from dbt.context.providers import generate_runtime_model_context
|
||||
from dbt.context.providers import generate_runtime_model_context, generate_runtime_sql_operation_context
|
||||
from dbt.contracts.graph.manifest import Manifest, UniqueID
|
||||
from dbt.contracts.graph.compiled import (
|
||||
COMPILED_TYPES,
|
||||
@@ -18,6 +19,8 @@ from dbt.contracts.graph.compiled import (
|
||||
InjectedCTE,
|
||||
ManifestNode,
|
||||
NonSourceCompiledNode,
|
||||
CompiledSqlNode,
|
||||
CompiledRPCNode,
|
||||
)
|
||||
from dbt.contracts.graph.parsed import ParsedNode
|
||||
from dbt.exceptions import (
|
||||
@@ -32,29 +35,29 @@ from dbt.node_types import NodeType
|
||||
from dbt.events.format import pluralize
|
||||
import dbt.tracking
|
||||
|
||||
graph_file_name = 'graph.gpickle'
|
||||
graph_file_name = "graph.gpickle"
|
||||
|
||||
|
||||
def _compiled_type_for(model: ParsedNode):
|
||||
if type(model) not in COMPILED_TYPES:
|
||||
raise InternalException(
|
||||
f'Asked to compile {type(model)} node, but it has no compiled form'
|
||||
f"Asked to compile {type(model)} node, but it has no compiled form"
|
||||
)
|
||||
return COMPILED_TYPES[type(model)]
|
||||
|
||||
|
||||
def print_compile_stats(stats):
|
||||
names = {
|
||||
NodeType.Model: 'model',
|
||||
NodeType.Test: 'test',
|
||||
NodeType.Snapshot: 'snapshot',
|
||||
NodeType.Analysis: 'analysis',
|
||||
NodeType.Macro: 'macro',
|
||||
NodeType.Operation: 'operation',
|
||||
NodeType.Seed: 'seed file',
|
||||
NodeType.Source: 'source',
|
||||
NodeType.Exposure: 'exposure',
|
||||
NodeType.Metric: 'metric'
|
||||
NodeType.Model: "model",
|
||||
NodeType.Test: "test",
|
||||
NodeType.Snapshot: "snapshot",
|
||||
NodeType.Analysis: "analysis",
|
||||
NodeType.Macro: "macro",
|
||||
NodeType.Operation: "operation",
|
||||
NodeType.Seed: "seed file",
|
||||
NodeType.Source: "source",
|
||||
NodeType.Exposure: "exposure",
|
||||
NodeType.Metric: "metric",
|
||||
}
|
||||
|
||||
results = {k: 0 for k in names.keys()}
|
||||
@@ -65,10 +68,7 @@ def print_compile_stats(stats):
|
||||
resource_counts = {k.pluralize(): v for k, v in results.items()}
|
||||
dbt.tracking.track_resource_counts(resource_counts)
|
||||
|
||||
stat_line = ", ".join([
|
||||
pluralize(ct, names.get(t)) for t, ct in results.items()
|
||||
if t in names
|
||||
])
|
||||
stat_line = ", ".join([pluralize(ct, names.get(t)) for t, ct in results.items() if t in names])
|
||||
|
||||
fire_event(FoundStats(stat_line=stat_line))
|
||||
|
||||
@@ -112,13 +112,13 @@ def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
|
||||
|
||||
|
||||
def _get_tests_for_node(manifest: Manifest, unique_id: UniqueID) -> List[UniqueID]:
|
||||
""" Get a list of tests that depend on the node with the
|
||||
provided unique id """
|
||||
"""Get a list of tests that depend on the node with the
|
||||
provided unique id"""
|
||||
|
||||
tests = []
|
||||
if unique_id in manifest.child_map:
|
||||
for child_unique_id in manifest.child_map[unique_id]:
|
||||
if child_unique_id.startswith('test.'):
|
||||
if child_unique_id.startswith("test."):
|
||||
tests.append(child_unique_id)
|
||||
|
||||
return tests
|
||||
@@ -162,7 +162,8 @@ class Linker:
|
||||
for node_id in self.graph:
|
||||
data = manifest.expect(node_id).to_dict(omit_none=True)
|
||||
out_graph.add_node(node_id, **data)
|
||||
nx.write_gpickle(out_graph, outfile)
|
||||
with open(outfile, "wb") as outfh:
|
||||
pickle.dump(out_graph, outfh, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
class Compiler:
|
||||
@@ -181,10 +182,12 @@ class Compiler:
|
||||
manifest: Manifest,
|
||||
extra_context: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
|
||||
context = generate_runtime_model_context(
|
||||
node, self.config, manifest
|
||||
)
|
||||
|
||||
if isinstance(node, CompiledSqlNode) or isinstance(node, CompiledRPCNode):
|
||||
# or node.resource_type in ('SqlOperation', 'RPCCall'):
|
||||
context = generate_runtime_sql_operation_context(node, self.config, manifest)
|
||||
else:
|
||||
context = generate_runtime_model_context(node, self.config, manifest)
|
||||
context.update(extra_context)
|
||||
if isinstance(node, CompiledGenericTestNode):
|
||||
# for test nodes, add a special keyword args value to the context
|
||||
@@ -242,26 +245,21 @@ class Compiler:
|
||||
|
||||
with_stmt = None
|
||||
for token in parsed.tokens:
|
||||
if token.is_keyword and token.normalized == 'WITH':
|
||||
if token.is_keyword and token.normalized == "WITH":
|
||||
with_stmt = token
|
||||
break
|
||||
|
||||
if with_stmt is None:
|
||||
# no with stmt, add one, and inject CTEs right at the beginning
|
||||
first_token = parsed.token_first()
|
||||
with_stmt = sqlparse.sql.Token(sqlparse.tokens.Keyword, 'with')
|
||||
with_stmt = sqlparse.sql.Token(sqlparse.tokens.Keyword, "with")
|
||||
parsed.insert_before(first_token, with_stmt)
|
||||
else:
|
||||
# stmt exists, add a comma (which will come after injected CTEs)
|
||||
trailing_comma = sqlparse.sql.Token(
|
||||
sqlparse.tokens.Punctuation, ','
|
||||
)
|
||||
trailing_comma = sqlparse.sql.Token(sqlparse.tokens.Punctuation, ",")
|
||||
parsed.insert_after(with_stmt, trailing_comma)
|
||||
|
||||
token = sqlparse.sql.Token(
|
||||
sqlparse.tokens.Keyword,
|
||||
", ".join(c.sql for c in ctes)
|
||||
)
|
||||
token = sqlparse.sql.Token(sqlparse.tokens.Keyword, ", ".join(c.sql for c in ctes))
|
||||
parsed.insert_after(with_stmt, token)
|
||||
|
||||
return str(parsed)
|
||||
@@ -280,9 +278,7 @@ class Compiler:
|
||||
inserting CTEs into the SQL.
|
||||
"""
|
||||
if model.compiled_sql is None:
|
||||
raise RuntimeException(
|
||||
'Cannot inject ctes into an unparsed node', model
|
||||
)
|
||||
raise RuntimeException("Cannot inject ctes into an unparsed node", model)
|
||||
if model.extra_ctes_injected:
|
||||
return (model, model.extra_ctes)
|
||||
|
||||
@@ -303,17 +299,17 @@ class Compiler:
|
||||
for cte in model.extra_ctes:
|
||||
if cte.id not in manifest.nodes:
|
||||
raise InternalException(
|
||||
f'During compilation, found a cte reference that '
|
||||
f'could not be resolved: {cte.id}'
|
||||
f"During compilation, found a cte reference that "
|
||||
f"could not be resolved: {cte.id}"
|
||||
)
|
||||
cte_model = manifest.nodes[cte.id]
|
||||
|
||||
if not cte_model.is_ephemeral_model:
|
||||
raise InternalException(f'{cte.id} is not ephemeral')
|
||||
raise InternalException(f"{cte.id} is not ephemeral")
|
||||
|
||||
# This model has already been compiled, so it's been
|
||||
# through here before
|
||||
if getattr(cte_model, 'compiled', False):
|
||||
if getattr(cte_model, "compiled", False):
|
||||
assert isinstance(cte_model, tuple(COMPILED_TYPES.values()))
|
||||
cte_model = cast(NonSourceCompiledNode, cte_model)
|
||||
new_prepended_ctes = cte_model.extra_ctes
|
||||
@@ -322,13 +318,11 @@ class Compiler:
|
||||
else:
|
||||
# This is an ephemeral parsed model that we can compile.
|
||||
# Compile and update the node
|
||||
cte_model = self._compile_node(
|
||||
cte_model, manifest, extra_context)
|
||||
cte_model = self._compile_node(cte_model, manifest, extra_context)
|
||||
# recursively call this method
|
||||
cte_model, new_prepended_ctes = \
|
||||
self._recursively_prepend_ctes(
|
||||
cte_model, manifest, extra_context
|
||||
)
|
||||
cte_model, new_prepended_ctes = self._recursively_prepend_ctes(
|
||||
cte_model, manifest, extra_context
|
||||
)
|
||||
# Save compiled SQL file and sync manifest
|
||||
self._write_node(cte_model)
|
||||
manifest.sync_update_node(cte_model)
|
||||
@@ -336,10 +330,8 @@ class Compiler:
|
||||
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
|
||||
|
||||
new_cte_name = self.add_ephemeral_prefix(cte_model.name)
|
||||
rendered_sql = (
|
||||
cte_model._pre_injected_sql or cte_model.compiled_sql
|
||||
)
|
||||
sql = f' {new_cte_name} as (\n{rendered_sql}\n)'
|
||||
rendered_sql = cte_model._pre_injected_sql or cte_model.compiled_sql
|
||||
sql = f" {new_cte_name} as (\n{rendered_sql}\n)"
|
||||
|
||||
_add_prepended_cte(prepended_ctes, InjectedCTE(id=cte.id, sql=sql))
|
||||
|
||||
@@ -373,17 +365,17 @@ class Compiler:
|
||||
fire_event(CompilingNode(unique_id=node.unique_id))
|
||||
|
||||
data = node.to_dict(omit_none=True)
|
||||
data.update({
|
||||
'compiled': False,
|
||||
'compiled_sql': None,
|
||||
'extra_ctes_injected': False,
|
||||
'extra_ctes': [],
|
||||
})
|
||||
data.update(
|
||||
{
|
||||
"compiled": False,
|
||||
"compiled_sql": None,
|
||||
"extra_ctes_injected": False,
|
||||
"extra_ctes": [],
|
||||
}
|
||||
)
|
||||
compiled_node = _compiled_type_for(node).from_dict(data)
|
||||
|
||||
context = self._create_node_context(
|
||||
compiled_node, manifest, extra_context
|
||||
)
|
||||
context = self._create_node_context(compiled_node, manifest, extra_context)
|
||||
|
||||
compiled_node.compiled_sql = jinja.get_rendered(
|
||||
node.raw_sql,
|
||||
@@ -403,22 +395,14 @@ class Compiler:
|
||||
if flags.WRITE_JSON:
|
||||
linker.write_graph(graph_path, manifest)
|
||||
|
||||
def link_node(
|
||||
self, linker: Linker, node: GraphMemberNode, manifest: Manifest
|
||||
):
|
||||
def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest):
|
||||
linker.add_node(node.unique_id)
|
||||
|
||||
for dependency in node.depends_on_nodes:
|
||||
if dependency in manifest.nodes:
|
||||
linker.dependency(
|
||||
node.unique_id,
|
||||
(manifest.nodes[dependency].unique_id)
|
||||
)
|
||||
linker.dependency(node.unique_id, (manifest.nodes[dependency].unique_id))
|
||||
elif dependency in manifest.sources:
|
||||
linker.dependency(
|
||||
node.unique_id,
|
||||
(manifest.sources[dependency].unique_id)
|
||||
)
|
||||
linker.dependency(node.unique_id, (manifest.sources[dependency].unique_id))
|
||||
else:
|
||||
dependency_not_found(node, dependency)
|
||||
|
||||
@@ -442,10 +426,10 @@ class Compiler:
|
||||
self.add_test_edges(linker, manifest)
|
||||
|
||||
def add_test_edges(self, linker: Linker, manifest: Manifest) -> None:
|
||||
""" This method adds additional edges to the DAG. For a given non-test
|
||||
"""This method adds additional edges to the DAG. For a given non-test
|
||||
executable node, add an edge from an upstream test to the given node if
|
||||
the set of nodes the test depends on is a subset of the upstream nodes
|
||||
for the given node. """
|
||||
for the given node."""
|
||||
|
||||
# Given a graph:
|
||||
# model1 --> model2 --> model3
|
||||
@@ -465,25 +449,18 @@ class Compiler:
|
||||
# If node is executable (in manifest.nodes) and does _not_
|
||||
# represent a test, continue.
|
||||
if (
|
||||
node_id in manifest.nodes and
|
||||
manifest.nodes[node_id].resource_type != NodeType.Test
|
||||
node_id in manifest.nodes
|
||||
and manifest.nodes[node_id].resource_type != NodeType.Test
|
||||
):
|
||||
# Get *everything* upstream of the node
|
||||
all_upstream_nodes = nx.traversal.bfs_tree(
|
||||
linker.graph, node_id, reverse=True
|
||||
)
|
||||
all_upstream_nodes = nx.traversal.bfs_tree(linker.graph, node_id, reverse=True)
|
||||
# Get the set of upstream nodes not including the current node.
|
||||
upstream_nodes = set([
|
||||
n for n in all_upstream_nodes if n != node_id
|
||||
])
|
||||
upstream_nodes = set([n for n in all_upstream_nodes if n != node_id])
|
||||
|
||||
# Get all tests that depend on any upstream nodes.
|
||||
upstream_tests = []
|
||||
for upstream_node in upstream_nodes:
|
||||
upstream_tests += _get_tests_for_node(
|
||||
manifest,
|
||||
upstream_node
|
||||
)
|
||||
upstream_tests += _get_tests_for_node(manifest, upstream_node)
|
||||
|
||||
for upstream_test in upstream_tests:
|
||||
# Get the set of all nodes that the test depends on
|
||||
@@ -492,18 +469,13 @@ class Compiler:
|
||||
# relationship tests). Test nodes do not distinguish
|
||||
# between what node the test is "testing" and what
|
||||
# node(s) it depends on.
|
||||
test_depends_on = set(
|
||||
manifest.nodes[upstream_test].depends_on_nodes
|
||||
)
|
||||
test_depends_on = set(manifest.nodes[upstream_test].depends_on_nodes)
|
||||
|
||||
# If the set of nodes that an upstream test depends on
|
||||
# is a subset of all upstream nodes of the current node,
|
||||
# add an edge from the upstream test to the current node.
|
||||
if (test_depends_on.issubset(upstream_nodes)):
|
||||
linker.graph.add_edge(
|
||||
upstream_test,
|
||||
node_id
|
||||
)
|
||||
if test_depends_on.issubset(upstream_nodes):
|
||||
linker.graph.add_edge(upstream_test, node_id)
|
||||
|
||||
def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph:
|
||||
self.initialize()
|
||||
@@ -521,16 +493,13 @@ class Compiler:
|
||||
|
||||
# writes the "compiled_sql" into the target/compiled directory
|
||||
def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode:
|
||||
if (not node.extra_ctes_injected or
|
||||
node.resource_type == NodeType.Snapshot):
|
||||
if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot:
|
||||
return node
|
||||
fire_event(WritingInjectedSQLForNode(unique_id=node.unique_id))
|
||||
|
||||
if node.compiled_sql:
|
||||
node.compiled_path = node.write_node(
|
||||
self.config.target_path,
|
||||
'compiled',
|
||||
node.compiled_sql
|
||||
self.config.target_path, "compiled", node.compiled_sql
|
||||
)
|
||||
return node
|
||||
|
||||
@@ -549,9 +518,7 @@ class Compiler:
|
||||
"""
|
||||
node = self._compile_node(node, manifest, extra_context)
|
||||
|
||||
node, _ = self._recursively_prepend_ctes(
|
||||
node, manifest, extra_context
|
||||
)
|
||||
node, _ = self._recursively_prepend_ctes(node, manifest, extra_context)
|
||||
if write:
|
||||
self._write_node(node)
|
||||
return node
|
||||
|
||||
1
core/dbt/config/README.md
Normal file
1
core/dbt/config/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Config README
|
||||
@@ -23,7 +23,7 @@ from .renderer import ProfileRenderer
|
||||
|
||||
DEFAULT_THREADS = 1
|
||||
|
||||
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser('~'), '.dbt')
|
||||
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
|
||||
|
||||
INVALID_PROFILE_MESSAGE = """
|
||||
dbt encountered an error while trying to read your profiles.yml file.
|
||||
@@ -43,11 +43,13 @@ Here, [profile name] should be replaced with a profile name
|
||||
defined in your profiles.yml file. You can find profiles.yml here:
|
||||
|
||||
{profiles_file}/profiles.yml
|
||||
""".format(profiles_file=DEFAULT_PROFILES_DIR)
|
||||
""".format(
|
||||
profiles_file=DEFAULT_PROFILES_DIR
|
||||
)
|
||||
|
||||
|
||||
def read_profile(profiles_dir: str) -> Dict[str, Any]:
|
||||
path = os.path.join(profiles_dir, 'profiles.yml')
|
||||
path = os.path.join(profiles_dir, "profiles.yml")
|
||||
|
||||
contents = None
|
||||
if os.path.isfile(path):
|
||||
@@ -55,12 +57,8 @@ def read_profile(profiles_dir: str) -> Dict[str, Any]:
|
||||
contents = load_file_contents(path, strip=False)
|
||||
yaml_content = load_yaml_text(contents)
|
||||
if not yaml_content:
|
||||
msg = f'The profiles.yml file at {path} is empty'
|
||||
raise DbtProfileError(
|
||||
INVALID_PROFILE_MESSAGE.format(
|
||||
error_string=msg
|
||||
)
|
||||
)
|
||||
msg = f"The profiles.yml file at {path} is empty"
|
||||
raise DbtProfileError(INVALID_PROFILE_MESSAGE.format(error_string=msg))
|
||||
return yaml_content
|
||||
except ValidationException as e:
|
||||
msg = INVALID_PROFILE_MESSAGE.format(error_string=e)
|
||||
@@ -73,7 +71,7 @@ def read_user_config(directory: str) -> UserConfig:
|
||||
try:
|
||||
profile = read_profile(directory)
|
||||
if profile:
|
||||
user_config = coerce_dict_str(profile.get('config', {}))
|
||||
user_config = coerce_dict_str(profile.get("config", {}))
|
||||
if user_config is not None:
|
||||
UserConfig.validate(user_config)
|
||||
return UserConfig.from_dict(user_config)
|
||||
@@ -100,7 +98,7 @@ class Profile(HasCredentials):
|
||||
target_name: str,
|
||||
user_config: UserConfig,
|
||||
threads: int,
|
||||
credentials: Credentials
|
||||
credentials: Credentials,
|
||||
):
|
||||
"""Explicitly defining `__init__` to work around bug in Python 3.9.7
|
||||
https://bugs.python.org/issue45081
|
||||
@@ -112,9 +110,7 @@ class Profile(HasCredentials):
|
||||
self.credentials = credentials
|
||||
self.profile_env_vars = {} # never available on init
|
||||
|
||||
def to_profile_info(
|
||||
self, serialize_credentials: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
def to_profile_info(self, serialize_credentials: bool = False) -> Dict[str, Any]:
|
||||
"""Unlike to_project_config, this dict is not a mirror of any existing
|
||||
on-disk data structure. It's used when creating a new profile from an
|
||||
existing one.
|
||||
@@ -124,34 +120,33 @@ class Profile(HasCredentials):
|
||||
:returns dict: The serialized profile.
|
||||
"""
|
||||
result = {
|
||||
'profile_name': self.profile_name,
|
||||
'target_name': self.target_name,
|
||||
'user_config': self.user_config,
|
||||
'threads': self.threads,
|
||||
'credentials': self.credentials,
|
||||
"profile_name": self.profile_name,
|
||||
"target_name": self.target_name,
|
||||
"user_config": self.user_config,
|
||||
"threads": self.threads,
|
||||
"credentials": self.credentials,
|
||||
}
|
||||
if serialize_credentials:
|
||||
result['user_config'] = self.user_config.to_dict(omit_none=True)
|
||||
result['credentials'] = self.credentials.to_dict(omit_none=True)
|
||||
result["user_config"] = self.user_config.to_dict(omit_none=True)
|
||||
result["credentials"] = self.credentials.to_dict(omit_none=True)
|
||||
return result
|
||||
|
||||
def to_target_dict(self) -> Dict[str, Any]:
|
||||
target = dict(
|
||||
self.credentials.connection_info(with_aliases=True)
|
||||
target = dict(self.credentials.connection_info(with_aliases=True))
|
||||
target.update(
|
||||
{
|
||||
"type": self.credentials.type,
|
||||
"threads": self.threads,
|
||||
"name": self.target_name,
|
||||
"target_name": self.target_name,
|
||||
"profile_name": self.profile_name,
|
||||
"config": self.user_config.to_dict(omit_none=True),
|
||||
}
|
||||
)
|
||||
target.update({
|
||||
'type': self.credentials.type,
|
||||
'threads': self.threads,
|
||||
'name': self.target_name,
|
||||
'target_name': self.target_name,
|
||||
'profile_name': self.profile_name,
|
||||
'config': self.user_config.to_dict(omit_none=True),
|
||||
})
|
||||
return target
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not (isinstance(other, self.__class__) and
|
||||
isinstance(self, other.__class__)):
|
||||
if not (isinstance(other, self.__class__) and isinstance(self, other.__class__)):
|
||||
return NotImplemented
|
||||
return self.to_profile_info() == other.to_profile_info()
|
||||
|
||||
@@ -171,14 +166,17 @@ class Profile(HasCredentials):
|
||||
) -> Credentials:
|
||||
# avoid an import cycle
|
||||
from dbt.adapters.factory import load_plugin
|
||||
|
||||
# credentials carry their 'type' in their actual type, not their
|
||||
# attributes. We do want this in order to pick our Credentials class.
|
||||
if 'type' not in profile:
|
||||
if "type" not in profile:
|
||||
raise DbtProfileError(
|
||||
'required field "type" not found in profile {} and target {}'
|
||||
.format(profile_name, target_name))
|
||||
'required field "type" not found in profile {} and target {}'.format(
|
||||
profile_name, target_name
|
||||
)
|
||||
)
|
||||
|
||||
typename = profile.pop('type')
|
||||
typename = profile.pop("type")
|
||||
try:
|
||||
cls = load_plugin(typename)
|
||||
data = cls.translate_aliases(profile)
|
||||
@@ -187,8 +185,9 @@ class Profile(HasCredentials):
|
||||
except (RuntimeException, ValidationError) as e:
|
||||
msg = str(e) if isinstance(e, RuntimeException) else e.message
|
||||
raise DbtProfileError(
|
||||
'Credentials in profile "{}", target "{}" invalid: {}'
|
||||
.format(profile_name, target_name, msg)
|
||||
'Credentials in profile "{}", target "{}" invalid: {}'.format(
|
||||
profile_name, target_name, msg
|
||||
)
|
||||
) from e
|
||||
|
||||
return credentials
|
||||
@@ -209,19 +208,19 @@ class Profile(HasCredentials):
|
||||
def _get_profile_data(
|
||||
profile: Dict[str, Any], profile_name: str, target_name: str
|
||||
) -> Dict[str, Any]:
|
||||
if 'outputs' not in profile:
|
||||
raise DbtProfileError(
|
||||
"outputs not specified in profile '{}'".format(profile_name)
|
||||
)
|
||||
outputs = profile['outputs']
|
||||
if "outputs" not in profile:
|
||||
raise DbtProfileError("outputs not specified in profile '{}'".format(profile_name))
|
||||
outputs = profile["outputs"]
|
||||
|
||||
if target_name not in outputs:
|
||||
outputs = '\n'.join(' - {}'.format(output)
|
||||
for output in outputs)
|
||||
msg = ("The profile '{}' does not have a target named '{}'. The "
|
||||
"valid target names for this profile are:\n{}"
|
||||
.format(profile_name, target_name, outputs))
|
||||
raise DbtProfileError(msg, result_type='invalid_target')
|
||||
outputs = "\n".join(" - {}".format(output) for output in outputs)
|
||||
msg = (
|
||||
"The profile '{}' does not have a target named '{}'. The "
|
||||
"valid target names for this profile are:\n{}".format(
|
||||
profile_name, target_name, outputs
|
||||
)
|
||||
)
|
||||
raise DbtProfileError(msg, result_type="invalid_target")
|
||||
profile_data = outputs[target_name]
|
||||
|
||||
if not isinstance(profile_data, dict):
|
||||
@@ -229,7 +228,7 @@ class Profile(HasCredentials):
|
||||
f"output '{target_name}' of profile '{profile_name}' is "
|
||||
f"misconfigured in profiles.yml"
|
||||
)
|
||||
raise DbtProfileError(msg, result_type='invalid_target')
|
||||
raise DbtProfileError(msg, result_type="invalid_target")
|
||||
|
||||
return profile_data
|
||||
|
||||
@@ -240,8 +239,8 @@ class Profile(HasCredentials):
|
||||
threads: int,
|
||||
profile_name: str,
|
||||
target_name: str,
|
||||
user_config: Optional[Dict[str, Any]] = None
|
||||
) -> 'Profile':
|
||||
user_config: Optional[Dict[str, Any]] = None,
|
||||
) -> "Profile":
|
||||
"""Create a profile from an existing set of Credentials and the
|
||||
remaining information.
|
||||
|
||||
@@ -264,7 +263,7 @@ class Profile(HasCredentials):
|
||||
target_name=target_name,
|
||||
user_config=user_config_obj,
|
||||
threads=threads,
|
||||
credentials=credentials
|
||||
credentials=credentials,
|
||||
)
|
||||
profile.validate()
|
||||
return profile
|
||||
@@ -289,16 +288,14 @@ class Profile(HasCredentials):
|
||||
# name to extract a profile that we can render.
|
||||
if target_override is not None:
|
||||
target_name = target_override
|
||||
elif 'target' in raw_profile:
|
||||
elif "target" in raw_profile:
|
||||
# render the target if it was parsed from yaml
|
||||
target_name = renderer.render_value(raw_profile['target'])
|
||||
target_name = renderer.render_value(raw_profile["target"])
|
||||
else:
|
||||
target_name = 'default'
|
||||
target_name = "default"
|
||||
fire_event(MissingProfileTarget(profile_name=profile_name, target_name=target_name))
|
||||
|
||||
raw_profile_data = cls._get_profile_data(
|
||||
raw_profile, profile_name, target_name
|
||||
)
|
||||
raw_profile_data = cls._get_profile_data(raw_profile, profile_name, target_name)
|
||||
|
||||
try:
|
||||
profile_data = renderer.render_data(raw_profile_data)
|
||||
@@ -315,7 +312,7 @@ class Profile(HasCredentials):
|
||||
user_config: Optional[Dict[str, Any]] = None,
|
||||
target_override: Optional[str] = None,
|
||||
threads_override: Optional[int] = None,
|
||||
) -> 'Profile':
|
||||
) -> "Profile":
|
||||
"""Create a profile from its raw profile information.
|
||||
|
||||
(this is an intermediate step, mostly useful for unit testing)
|
||||
@@ -336,7 +333,7 @@ class Profile(HasCredentials):
|
||||
"""
|
||||
# user_config is not rendered.
|
||||
if user_config is None:
|
||||
user_config = raw_profile.get('config')
|
||||
user_config = raw_profile.get("config")
|
||||
# TODO: should it be, and the values coerced to bool?
|
||||
target_name, profile_data = cls.render_profile(
|
||||
raw_profile, profile_name, target_override, renderer
|
||||
@@ -344,7 +341,7 @@ class Profile(HasCredentials):
|
||||
|
||||
# valid connections never include the number of threads, but it's
|
||||
# stored on a per-connection level in the raw configs
|
||||
threads = profile_data.pop('threads', DEFAULT_THREADS)
|
||||
threads = profile_data.pop("threads", DEFAULT_THREADS)
|
||||
if threads_override is not None:
|
||||
threads = threads_override
|
||||
|
||||
@@ -357,7 +354,7 @@ class Profile(HasCredentials):
|
||||
profile_name=profile_name,
|
||||
target_name=target_name,
|
||||
threads=threads,
|
||||
user_config=user_config
|
||||
user_config=user_config,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -368,7 +365,7 @@ class Profile(HasCredentials):
|
||||
renderer: ProfileRenderer,
|
||||
target_override: Optional[str] = None,
|
||||
threads_override: Optional[int] = None,
|
||||
) -> 'Profile':
|
||||
) -> "Profile":
|
||||
"""
|
||||
:param raw_profiles: The profile data, from disk as yaml.
|
||||
:param profile_name: The profile name to use.
|
||||
@@ -384,23 +381,15 @@ class Profile(HasCredentials):
|
||||
:returns: The new Profile object.
|
||||
"""
|
||||
if profile_name not in raw_profiles:
|
||||
raise DbtProjectError(
|
||||
"Could not find profile named '{}'".format(profile_name)
|
||||
)
|
||||
raise DbtProjectError("Could not find profile named '{}'".format(profile_name))
|
||||
|
||||
# First, we've already got our final decision on profile name, and we
|
||||
# don't render keys, so we can pluck that out
|
||||
raw_profile = raw_profiles[profile_name]
|
||||
if not raw_profile:
|
||||
msg = (
|
||||
f'Profile {profile_name} in profiles.yml is empty'
|
||||
)
|
||||
raise DbtProfileError(
|
||||
INVALID_PROFILE_MESSAGE.format(
|
||||
error_string=msg
|
||||
)
|
||||
)
|
||||
user_config = raw_profiles.get('config')
|
||||
msg = f"Profile {profile_name} in profiles.yml is empty"
|
||||
raise DbtProfileError(INVALID_PROFILE_MESSAGE.format(error_string=msg))
|
||||
user_config = raw_profiles.get("config")
|
||||
|
||||
return cls.from_raw_profile_info(
|
||||
raw_profile=raw_profile,
|
||||
@@ -417,7 +406,7 @@ class Profile(HasCredentials):
|
||||
args: Any,
|
||||
renderer: ProfileRenderer,
|
||||
project_profile_name: Optional[str],
|
||||
) -> 'Profile':
|
||||
) -> "Profile":
|
||||
"""Given the raw profiles as read from disk and the name of the desired
|
||||
profile if specified, return the profile component of the runtime
|
||||
config.
|
||||
@@ -432,15 +421,14 @@ class Profile(HasCredentials):
|
||||
target could not be found.
|
||||
:returns Profile: The new Profile object.
|
||||
"""
|
||||
threads_override = getattr(args, 'threads', None)
|
||||
target_override = getattr(args, 'target', None)
|
||||
threads_override = getattr(args, "threads", None)
|
||||
target_override = getattr(args, "target", None)
|
||||
raw_profiles = read_profile(flags.PROFILES_DIR)
|
||||
profile_name = cls.pick_profile_name(getattr(args, 'profile', None),
|
||||
project_profile_name)
|
||||
profile_name = cls.pick_profile_name(getattr(args, "profile", None), project_profile_name)
|
||||
return cls.from_raw_profiles(
|
||||
raw_profiles=raw_profiles,
|
||||
profile_name=profile_name,
|
||||
renderer=renderer,
|
||||
target_override=target_override,
|
||||
threads_override=threads_override
|
||||
threads_override=threads_override,
|
||||
)
|
||||
|
||||
@@ -2,7 +2,13 @@ from copy import deepcopy
|
||||
from dataclasses import dataclass, field
|
||||
from itertools import chain
|
||||
from typing import (
|
||||
List, Dict, Any, Optional, TypeVar, Union, Mapping,
|
||||
List,
|
||||
Dict,
|
||||
Any,
|
||||
Optional,
|
||||
TypeVar,
|
||||
Union,
|
||||
Mapping,
|
||||
)
|
||||
from typing_extensions import Protocol, runtime_checkable
|
||||
|
||||
@@ -45,7 +51,7 @@ INVALID_VERSION_ERROR = """\
|
||||
This version of dbt is not supported with the '{package}' package.
|
||||
Installed version of dbt: {installed}
|
||||
Required version of dbt for '{package}': {version_spec}
|
||||
Check the requirements for the '{package}' package, or run dbt again with \
|
||||
Check for a different version of the '{package}' package, or run dbt again with \
|
||||
--no-version-check
|
||||
"""
|
||||
|
||||
@@ -54,7 +60,7 @@ IMPOSSIBLE_VERSION_ERROR = """\
|
||||
The package version requirement can never be satisfied for the '{package}
|
||||
package.
|
||||
Required versions of dbt for '{package}': {version_spec}
|
||||
Check the requirements for the '{package}' package, or run dbt again with \
|
||||
Check for a different version of the '{package}' package, or run dbt again with \
|
||||
--no-version-check
|
||||
"""
|
||||
|
||||
@@ -83,9 +89,7 @@ def _load_yaml(path):
|
||||
|
||||
|
||||
def package_data_from_root(project_root):
|
||||
package_filepath = resolve_path_from_base(
|
||||
'packages.yml', project_root
|
||||
)
|
||||
package_filepath = resolve_path_from_base("packages.yml", project_root)
|
||||
|
||||
if path_exists(package_filepath):
|
||||
packages_dict = _load_yaml(package_filepath)
|
||||
@@ -96,15 +100,13 @@ def package_data_from_root(project_root):
|
||||
|
||||
def package_config_from_data(packages_data: Dict[str, Any]):
|
||||
if not packages_data:
|
||||
packages_data = {'packages': []}
|
||||
packages_data = {"packages": []}
|
||||
|
||||
try:
|
||||
PackageConfig.validate(packages_data)
|
||||
packages = PackageConfig.from_dict(packages_data)
|
||||
except ValidationError as e:
|
||||
raise DbtProjectError(
|
||||
MALFORMED_PACKAGE_ERROR.format(error=str(e.message))
|
||||
) from e
|
||||
raise DbtProjectError(MALFORMED_PACKAGE_ERROR.format(error=str(e.message))) from e
|
||||
return packages
|
||||
|
||||
|
||||
@@ -119,7 +121,7 @@ def _parse_versions(versions: Union[List[str], str]) -> List[VersionSpecifier]:
|
||||
Regardless, this will return a list of VersionSpecifiers
|
||||
"""
|
||||
if isinstance(versions, str):
|
||||
versions = versions.split(',')
|
||||
versions = versions.split(",")
|
||||
return [VersionSpecifier.from_version_string(v) for v in versions]
|
||||
|
||||
|
||||
@@ -130,11 +132,10 @@ def _all_source_paths(
|
||||
analysis_paths: List[str],
|
||||
macro_paths: List[str],
|
||||
) -> List[str]:
|
||||
return list(chain(model_paths, seed_paths, snapshot_paths, analysis_paths,
|
||||
macro_paths))
|
||||
return list(chain(model_paths, seed_paths, snapshot_paths, analysis_paths, macro_paths))
|
||||
|
||||
|
||||
T = TypeVar('T')
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def value_or(value: Optional[T], default: T) -> T:
|
||||
@@ -147,30 +148,27 @@ def value_or(value: Optional[T], default: T) -> T:
|
||||
def _raw_project_from(project_root: str) -> Dict[str, Any]:
|
||||
|
||||
project_root = os.path.normpath(project_root)
|
||||
project_yaml_filepath = os.path.join(project_root, 'dbt_project.yml')
|
||||
project_yaml_filepath = os.path.join(project_root, "dbt_project.yml")
|
||||
|
||||
# get the project.yml contents
|
||||
if not path_exists(project_yaml_filepath):
|
||||
raise DbtProjectError(
|
||||
'no dbt_project.yml found at expected path {}'
|
||||
.format(project_yaml_filepath)
|
||||
"no dbt_project.yml found at expected path {}".format(project_yaml_filepath)
|
||||
)
|
||||
|
||||
project_dict = _load_yaml(project_yaml_filepath)
|
||||
|
||||
if not isinstance(project_dict, dict):
|
||||
raise DbtProjectError(
|
||||
'dbt_project.yml does not parse to a dictionary'
|
||||
)
|
||||
raise DbtProjectError("dbt_project.yml does not parse to a dictionary")
|
||||
|
||||
return project_dict
|
||||
|
||||
|
||||
def _query_comment_from_cfg(
|
||||
cfg_query_comment: Union[QueryComment, NoValue, str, None]
|
||||
cfg_query_comment: Union[QueryComment, NoValue, str, None]
|
||||
) -> QueryComment:
|
||||
if not cfg_query_comment:
|
||||
return QueryComment(comment='')
|
||||
return QueryComment(comment="")
|
||||
|
||||
if isinstance(cfg_query_comment, str):
|
||||
return QueryComment(comment=cfg_query_comment)
|
||||
@@ -186,10 +184,7 @@ def validate_version(dbt_version: List[VersionSpecifier], project_name: str):
|
||||
installed = get_installed_version()
|
||||
if not versions_compatible(*dbt_version):
|
||||
msg = IMPOSSIBLE_VERSION_ERROR.format(
|
||||
package=project_name,
|
||||
version_spec=[
|
||||
x.to_version_string() for x in dbt_version
|
||||
]
|
||||
package=project_name, version_spec=[x.to_version_string() for x in dbt_version]
|
||||
)
|
||||
raise DbtProjectError(msg)
|
||||
|
||||
@@ -197,9 +192,7 @@ def validate_version(dbt_version: List[VersionSpecifier], project_name: str):
|
||||
msg = INVALID_VERSION_ERROR.format(
|
||||
package=project_name,
|
||||
installed=installed.to_version_string(),
|
||||
version_spec=[
|
||||
x.to_version_string() for x in dbt_version
|
||||
]
|
||||
version_spec=[x.to_version_string() for x in dbt_version],
|
||||
)
|
||||
raise DbtProjectError(msg)
|
||||
|
||||
@@ -208,8 +201,8 @@ def _get_required_version(
|
||||
project_dict: Dict[str, Any],
|
||||
verify_version: bool,
|
||||
) -> List[VersionSpecifier]:
|
||||
dbt_raw_version: Union[List[str], str] = '>=0.0.0'
|
||||
required = project_dict.get('require-dbt-version')
|
||||
dbt_raw_version: Union[List[str], str] = ">=0.0.0"
|
||||
required = project_dict.get("require-dbt-version")
|
||||
if required is not None:
|
||||
dbt_raw_version = required
|
||||
|
||||
@@ -220,46 +213,39 @@ def _get_required_version(
|
||||
|
||||
if verify_version:
|
||||
# no name is also an error that we want to raise
|
||||
if 'name' not in project_dict:
|
||||
if "name" not in project_dict:
|
||||
raise DbtProjectError(
|
||||
'Required "name" field not present in project',
|
||||
)
|
||||
validate_version(dbt_version, project_dict['name'])
|
||||
validate_version(dbt_version, project_dict["name"])
|
||||
|
||||
return dbt_version
|
||||
|
||||
|
||||
@dataclass
|
||||
class RenderComponents:
|
||||
project_dict: Dict[str, Any] = field(
|
||||
metadata=dict(description='The project dictionary')
|
||||
)
|
||||
packages_dict: Dict[str, Any] = field(
|
||||
metadata=dict(description='The packages dictionary')
|
||||
)
|
||||
selectors_dict: Dict[str, Any] = field(
|
||||
metadata=dict(description='The selectors dictionary')
|
||||
)
|
||||
project_dict: Dict[str, Any] = field(metadata=dict(description="The project dictionary"))
|
||||
packages_dict: Dict[str, Any] = field(metadata=dict(description="The packages dictionary"))
|
||||
selectors_dict: Dict[str, Any] = field(metadata=dict(description="The selectors dictionary"))
|
||||
|
||||
|
||||
@dataclass
|
||||
class PartialProject(RenderComponents):
|
||||
profile_name: Optional[str] = field(metadata=dict(
|
||||
description='The unrendered profile name in the project, if set'
|
||||
))
|
||||
project_name: Optional[str] = field(metadata=dict(
|
||||
description=(
|
||||
'The name of the project. This should always be set and will not '
|
||||
'be rendered'
|
||||
profile_name: Optional[str] = field(
|
||||
metadata=dict(description="The unrendered profile name in the project, if set")
|
||||
)
|
||||
project_name: Optional[str] = field(
|
||||
metadata=dict(
|
||||
description=(
|
||||
"The name of the project. This should always be set and will not " "be rendered"
|
||||
)
|
||||
)
|
||||
))
|
||||
)
|
||||
project_root: str = field(
|
||||
metadata=dict(description='The root directory of the project'),
|
||||
metadata=dict(description="The root directory of the project"),
|
||||
)
|
||||
verify_version: bool = field(
|
||||
metadata=dict(description=(
|
||||
'If True, verify the dbt version matches the required version'
|
||||
))
|
||||
metadata=dict(description=("If True, verify the dbt version matches the required version"))
|
||||
)
|
||||
|
||||
def render_profile_name(self, renderer) -> Optional[str]:
|
||||
@@ -272,9 +258,7 @@ class PartialProject(RenderComponents):
|
||||
renderer: DbtProjectYamlRenderer,
|
||||
) -> RenderComponents:
|
||||
|
||||
rendered_project = renderer.render_project(
|
||||
self.project_dict, self.project_root
|
||||
)
|
||||
rendered_project = renderer.render_project(self.project_dict, self.project_root)
|
||||
rendered_packages = renderer.render_packages(self.packages_dict)
|
||||
rendered_selectors = renderer.render_selectors(self.selectors_dict)
|
||||
|
||||
@@ -285,31 +269,34 @@ class PartialProject(RenderComponents):
|
||||
)
|
||||
|
||||
# Called by 'collect_parts' in RuntimeConfig
|
||||
def render(self, renderer: DbtProjectYamlRenderer) -> 'Project':
|
||||
def render(self, renderer: DbtProjectYamlRenderer) -> "Project":
|
||||
try:
|
||||
rendered = self.get_rendered(renderer)
|
||||
return self.create_project(rendered)
|
||||
except DbtProjectError as exc:
|
||||
if exc.path is None:
|
||||
exc.path = os.path.join(self.project_root, 'dbt_project.yml')
|
||||
exc.path = os.path.join(self.project_root, "dbt_project.yml")
|
||||
raise
|
||||
|
||||
def check_config_path(self, project_dict, deprecated_path, exp_path):
|
||||
if deprecated_path in project_dict:
|
||||
if exp_path in project_dict:
|
||||
msg = (
|
||||
'{deprecated_path} and {exp_path} cannot both be defined. The '
|
||||
'`{deprecated_path}` config has been deprecated in favor of `{exp_path}`. '
|
||||
'Please update your `dbt_project.yml` configuration to reflect this '
|
||||
'change.'
|
||||
"{deprecated_path} and {exp_path} cannot both be defined. The "
|
||||
"`{deprecated_path}` config has been deprecated in favor of `{exp_path}`. "
|
||||
"Please update your `dbt_project.yml` configuration to reflect this "
|
||||
"change."
|
||||
)
|
||||
raise DbtProjectError(msg.format(deprecated_path=deprecated_path,
|
||||
exp_path=exp_path))
|
||||
deprecations.warn(f'project-config-{deprecated_path}',
|
||||
deprecated_path=deprecated_path,
|
||||
exp_path=exp_path)
|
||||
raise DbtProjectError(
|
||||
msg.format(deprecated_path=deprecated_path, exp_path=exp_path)
|
||||
)
|
||||
deprecations.warn(
|
||||
f"project-config-{deprecated_path}",
|
||||
deprecated_path=deprecated_path,
|
||||
exp_path=exp_path,
|
||||
)
|
||||
|
||||
def create_project(self, rendered: RenderComponents) -> 'Project':
|
||||
def create_project(self, rendered: RenderComponents) -> "Project":
|
||||
unrendered = RenderComponents(
|
||||
project_dict=self.project_dict,
|
||||
packages_dict=self.packages_dict,
|
||||
@@ -320,14 +307,12 @@ class PartialProject(RenderComponents):
|
||||
verify_version=self.verify_version,
|
||||
)
|
||||
|
||||
self.check_config_path(rendered.project_dict, 'source-paths', 'model-paths')
|
||||
self.check_config_path(rendered.project_dict, 'data-paths', 'seed-paths')
|
||||
self.check_config_path(rendered.project_dict, "source-paths", "model-paths")
|
||||
self.check_config_path(rendered.project_dict, "data-paths", "seed-paths")
|
||||
|
||||
try:
|
||||
ProjectContract.validate(rendered.project_dict)
|
||||
cfg = ProjectContract.from_dict(
|
||||
rendered.project_dict
|
||||
)
|
||||
cfg = ProjectContract.from_dict(rendered.project_dict)
|
||||
except ValidationError as e:
|
||||
raise DbtProjectError(validator_error_message(e)) from e
|
||||
# name/version are required in the Project definition, so we can assume
|
||||
@@ -337,7 +322,7 @@ class PartialProject(RenderComponents):
|
||||
# this is added at project_dict parse time and should always be here
|
||||
# once we see it.
|
||||
if cfg.project_root is None:
|
||||
raise DbtProjectError('cfg must have a project root!')
|
||||
raise DbtProjectError("cfg must have a project root!")
|
||||
else:
|
||||
project_root = cfg.project_root
|
||||
# this is only optional in the sense that if it's not present, it needs
|
||||
@@ -347,30 +332,30 @@ class PartialProject(RenderComponents):
|
||||
|
||||
# `source_paths` is deprecated but still allowed. Copy it into
|
||||
# `model_paths` to simlify logic throughout the rest of the system.
|
||||
model_paths: List[str] = value_or(cfg.model_paths
|
||||
if 'model-paths' in rendered.project_dict
|
||||
else cfg.source_paths, ['models'])
|
||||
macro_paths: List[str] = value_or(cfg.macro_paths, ['macros'])
|
||||
model_paths: List[str] = value_or(
|
||||
cfg.model_paths if "model-paths" in rendered.project_dict else cfg.source_paths,
|
||||
["models"],
|
||||
)
|
||||
macro_paths: List[str] = value_or(cfg.macro_paths, ["macros"])
|
||||
# `data_paths` is deprecated but still allowed. Copy it into
|
||||
# `seed_paths` to simlify logic throughout the rest of the system.
|
||||
seed_paths: List[str] = value_or(cfg.seed_paths
|
||||
if 'seed-paths' in rendered.project_dict
|
||||
else cfg.data_paths, ['seeds'])
|
||||
test_paths: List[str] = value_or(cfg.test_paths, ['tests'])
|
||||
analysis_paths: List[str] = value_or(cfg.analysis_paths, ['analyses'])
|
||||
snapshot_paths: List[str] = value_or(cfg.snapshot_paths, ['snapshots'])
|
||||
seed_paths: List[str] = value_or(
|
||||
cfg.seed_paths if "seed-paths" in rendered.project_dict else cfg.data_paths, ["seeds"]
|
||||
)
|
||||
test_paths: List[str] = value_or(cfg.test_paths, ["tests"])
|
||||
analysis_paths: List[str] = value_or(cfg.analysis_paths, ["analyses"])
|
||||
snapshot_paths: List[str] = value_or(cfg.snapshot_paths, ["snapshots"])
|
||||
|
||||
all_source_paths: List[str] = _all_source_paths(
|
||||
model_paths, seed_paths, snapshot_paths, analysis_paths,
|
||||
macro_paths
|
||||
model_paths, seed_paths, snapshot_paths, analysis_paths, macro_paths
|
||||
)
|
||||
|
||||
docs_paths: List[str] = value_or(cfg.docs_paths, all_source_paths)
|
||||
asset_paths: List[str] = value_or(cfg.asset_paths, [])
|
||||
target_path: str = value_or(cfg.target_path, 'target')
|
||||
target_path: str = value_or(cfg.target_path, "target")
|
||||
clean_targets: List[str] = value_or(cfg.clean_targets, [target_path])
|
||||
log_path: str = value_or(cfg.log_path, 'logs')
|
||||
packages_install_path: str = value_or(cfg.packages_install_path, 'dbt_packages')
|
||||
log_path: str = value_or(cfg.log_path, "logs")
|
||||
packages_install_path: str = value_or(cfg.packages_install_path, "dbt_packages")
|
||||
# in the default case we'll populate this once we know the adapter type
|
||||
# It would be nice to just pass along a Quoting here, but that would
|
||||
# break many things
|
||||
@@ -408,11 +393,12 @@ class PartialProject(RenderComponents):
|
||||
packages = package_config_from_data(rendered.packages_dict)
|
||||
selectors = selector_config_from_data(rendered.selectors_dict)
|
||||
manifest_selectors: Dict[str, Any] = {}
|
||||
if rendered.selectors_dict and rendered.selectors_dict['selectors']:
|
||||
if rendered.selectors_dict and rendered.selectors_dict["selectors"]:
|
||||
# this is a dict with a single key 'selectors' pointing to a list
|
||||
# of dicts.
|
||||
manifest_selectors = SelectorDict.parse_from_selectors_list(
|
||||
rendered.selectors_dict['selectors'])
|
||||
rendered.selectors_dict["selectors"]
|
||||
)
|
||||
project = Project(
|
||||
project_name=name,
|
||||
version=version,
|
||||
@@ -463,10 +449,9 @@ class PartialProject(RenderComponents):
|
||||
*,
|
||||
verify_version: bool = False,
|
||||
):
|
||||
"""Construct a partial project from its constituent dicts.
|
||||
"""
|
||||
project_name = project_dict.get('name')
|
||||
profile_name = project_dict.get('profile')
|
||||
"""Construct a partial project from its constituent dicts."""
|
||||
project_name = project_dict.get("name")
|
||||
profile_name = project_dict.get("profile")
|
||||
|
||||
return cls(
|
||||
profile_name=profile_name,
|
||||
@@ -481,14 +466,14 @@ class PartialProject(RenderComponents):
|
||||
@classmethod
|
||||
def from_project_root(
|
||||
cls, project_root: str, *, verify_version: bool = False
|
||||
) -> 'PartialProject':
|
||||
) -> "PartialProject":
|
||||
project_root = os.path.normpath(project_root)
|
||||
project_dict = _raw_project_from(project_root)
|
||||
config_version = project_dict.get('config-version', 1)
|
||||
config_version = project_dict.get("config-version", 1)
|
||||
if config_version != 2:
|
||||
raise DbtProjectError(
|
||||
f'Invalid config version: {config_version}, expected 2',
|
||||
path=os.path.join(project_root, 'dbt_project.yml')
|
||||
f"Invalid config version: {config_version}, expected 2",
|
||||
path=os.path.join(project_root, "dbt_project.yml"),
|
||||
)
|
||||
|
||||
packages_dict = package_data_from_root(project_root)
|
||||
@@ -505,15 +490,10 @@ class PartialProject(RenderComponents):
|
||||
class VarProvider:
|
||||
"""Var providers are tied to a particular Project."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vars: Dict[str, Dict[str, Any]]
|
||||
) -> None:
|
||||
def __init__(self, vars: Dict[str, Dict[str, Any]]) -> None:
|
||||
self.vars = vars
|
||||
|
||||
def vars_for(
|
||||
self, node: IsFQNResource, adapter_type: str
|
||||
) -> Mapping[str, Any]:
|
||||
def vars_for(self, node: IsFQNResource, adapter_type: str) -> Mapping[str, Any]:
|
||||
# in v2, vars are only either project or globally scoped
|
||||
merged = MultiDict([self.vars])
|
||||
merged.add(self.vars.get(node.package_name, {}))
|
||||
@@ -565,15 +545,18 @@ class Project:
|
||||
@property
|
||||
def all_source_paths(self) -> List[str]:
|
||||
return _all_source_paths(
|
||||
self.model_paths, self.seed_paths, self.snapshot_paths,
|
||||
self.analysis_paths, self.macro_paths
|
||||
self.model_paths,
|
||||
self.seed_paths,
|
||||
self.snapshot_paths,
|
||||
self.analysis_paths,
|
||||
self.macro_paths,
|
||||
)
|
||||
|
||||
@property
|
||||
def generic_test_paths(self):
|
||||
generic_test_paths = []
|
||||
for test_path in self.test_paths:
|
||||
generic_test_paths.append(os.path.join(test_path, 'generic'))
|
||||
generic_test_paths.append(os.path.join(test_path, "generic"))
|
||||
return generic_test_paths
|
||||
|
||||
def __str__(self):
|
||||
@@ -581,11 +564,11 @@ class Project:
|
||||
return str(cfg)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not (isinstance(other, self.__class__) and
|
||||
isinstance(self, other.__class__)):
|
||||
if not (isinstance(other, self.__class__) and isinstance(self, other.__class__)):
|
||||
return False
|
||||
return self.to_project_config(with_packages=True) == \
|
||||
other.to_project_config(with_packages=True)
|
||||
return self.to_project_config(with_packages=True) == other.to_project_config(
|
||||
with_packages=True
|
||||
)
|
||||
|
||||
def to_project_config(self, with_packages=False):
|
||||
"""Return a dict representation of the config that could be written to
|
||||
@@ -595,40 +578,39 @@ class Project:
|
||||
file in the root.
|
||||
:returns dict: The serialized profile.
|
||||
"""
|
||||
result = deepcopy({
|
||||
'name': self.project_name,
|
||||
'version': self.version,
|
||||
'project-root': self.project_root,
|
||||
'profile': self.profile_name,
|
||||
'model-paths': self.model_paths,
|
||||
'macro-paths': self.macro_paths,
|
||||
'seed-paths': self.seed_paths,
|
||||
'test-paths': self.test_paths,
|
||||
'analysis-paths': self.analysis_paths,
|
||||
'docs-paths': self.docs_paths,
|
||||
'asset-paths': self.asset_paths,
|
||||
'target-path': self.target_path,
|
||||
'snapshot-paths': self.snapshot_paths,
|
||||
'clean-targets': self.clean_targets,
|
||||
'log-path': self.log_path,
|
||||
'quoting': self.quoting,
|
||||
'models': self.models,
|
||||
'on-run-start': self.on_run_start,
|
||||
'on-run-end': self.on_run_end,
|
||||
'dispatch': self.dispatch,
|
||||
'seeds': self.seeds,
|
||||
'snapshots': self.snapshots,
|
||||
'sources': self.sources,
|
||||
'tests': self.tests,
|
||||
'vars': self.vars.to_dict(),
|
||||
'require-dbt-version': [
|
||||
v.to_version_string() for v in self.dbt_version
|
||||
],
|
||||
'config-version': self.config_version,
|
||||
})
|
||||
result = deepcopy(
|
||||
{
|
||||
"name": self.project_name,
|
||||
"version": self.version,
|
||||
"project-root": self.project_root,
|
||||
"profile": self.profile_name,
|
||||
"model-paths": self.model_paths,
|
||||
"macro-paths": self.macro_paths,
|
||||
"seed-paths": self.seed_paths,
|
||||
"test-paths": self.test_paths,
|
||||
"analysis-paths": self.analysis_paths,
|
||||
"docs-paths": self.docs_paths,
|
||||
"asset-paths": self.asset_paths,
|
||||
"target-path": self.target_path,
|
||||
"snapshot-paths": self.snapshot_paths,
|
||||
"clean-targets": self.clean_targets,
|
||||
"log-path": self.log_path,
|
||||
"quoting": self.quoting,
|
||||
"models": self.models,
|
||||
"on-run-start": self.on_run_start,
|
||||
"on-run-end": self.on_run_end,
|
||||
"dispatch": self.dispatch,
|
||||
"seeds": self.seeds,
|
||||
"snapshots": self.snapshots,
|
||||
"sources": self.sources,
|
||||
"tests": self.tests,
|
||||
"vars": self.vars.to_dict(),
|
||||
"require-dbt-version": [v.to_version_string() for v in self.dbt_version],
|
||||
"config-version": self.config_version,
|
||||
}
|
||||
)
|
||||
if self.query_comment:
|
||||
result['query-comment'] = \
|
||||
self.query_comment.to_dict(omit_none=True)
|
||||
result["query-comment"] = self.query_comment.to_dict(omit_none=True)
|
||||
|
||||
if with_packages:
|
||||
result.update(self.packages.to_dict(omit_none=True))
|
||||
@@ -642,9 +624,7 @@ class Project:
|
||||
raise DbtProjectError(validator_error_message(e)) from e
|
||||
|
||||
@classmethod
|
||||
def partial_load(
|
||||
cls, project_root: str, *, verify_version: bool = False
|
||||
) -> PartialProject:
|
||||
def partial_load(cls, project_root: str, *, verify_version: bool = False) -> PartialProject:
|
||||
return PartialProject.from_project_root(
|
||||
project_root,
|
||||
verify_version=verify_version,
|
||||
@@ -657,18 +637,17 @@ class Project:
|
||||
renderer: DbtProjectYamlRenderer,
|
||||
*,
|
||||
verify_version: bool = False,
|
||||
) -> 'Project':
|
||||
) -> "Project":
|
||||
partial = cls.partial_load(project_root, verify_version=verify_version)
|
||||
return partial.render(renderer)
|
||||
|
||||
def hashed_name(self):
|
||||
return hashlib.md5(self.project_name.encode('utf-8')).hexdigest()
|
||||
return hashlib.md5(self.project_name.encode("utf-8")).hexdigest()
|
||||
|
||||
def get_selector(self, name: str) -> Union[SelectionSpec, bool]:
|
||||
if name not in self.selectors:
|
||||
raise RuntimeException(
|
||||
f'Could not find selector named {name}, expected one of '
|
||||
f'{list(self.selectors)}'
|
||||
f"Could not find selector named {name}, expected one of " f"{list(self.selectors)}"
|
||||
)
|
||||
return self.selectors[name]["definition"]
|
||||
|
||||
@@ -685,6 +664,6 @@ class Project:
|
||||
|
||||
def get_macro_search_order(self, macro_namespace: str):
|
||||
for dispatch_entry in self.dispatch:
|
||||
if dispatch_entry['macro_namespace'] == macro_namespace:
|
||||
return dispatch_entry['search_order']
|
||||
if dispatch_entry["macro_namespace"] == macro_namespace:
|
||||
return dispatch_entry["search_order"]
|
||||
return None
|
||||
|
||||
@@ -5,9 +5,7 @@ from dbt.context.target import TargetContext
|
||||
from dbt.context.secret import SecretContext
|
||||
from dbt.context.base import BaseContext
|
||||
from dbt.contracts.connection import HasCredentials
|
||||
from dbt.exceptions import (
|
||||
DbtProjectError, CompilationException, RecursionException
|
||||
)
|
||||
from dbt.exceptions import DbtProjectError, CompilationException, RecursionException
|
||||
from dbt.utils import deep_map_render
|
||||
|
||||
|
||||
@@ -20,7 +18,7 @@ class BaseRenderer:
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return 'Rendering'
|
||||
return "Rendering"
|
||||
|
||||
def should_render_keypath(self, keypath: Keypath) -> bool:
|
||||
return True
|
||||
@@ -31,9 +29,7 @@ class BaseRenderer:
|
||||
|
||||
return self.render_value(value, keypath)
|
||||
|
||||
def render_value(
|
||||
self, value: Any, keypath: Optional[Keypath] = None
|
||||
) -> Any:
|
||||
def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any:
|
||||
# keypath is ignored.
|
||||
# if it wasn't read as a string, ignore it
|
||||
if not isinstance(value, str):
|
||||
@@ -42,18 +38,15 @@ class BaseRenderer:
|
||||
with catch_jinja():
|
||||
return get_rendered(value, self.context, native=True)
|
||||
except CompilationException as exc:
|
||||
msg = f'Could not render {value}: {exc.msg}'
|
||||
msg = f"Could not render {value}: {exc.msg}"
|
||||
raise CompilationException(msg) from exc
|
||||
|
||||
def render_data(
|
||||
self, data: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
try:
|
||||
return deep_map_render(self.render_entry, data)
|
||||
except RecursionException:
|
||||
raise DbtProjectError(
|
||||
f'Cycle detected: {self.name} input has a reference to itself',
|
||||
project=data
|
||||
f"Cycle detected: {self.name} input has a reference to itself", project=data
|
||||
)
|
||||
|
||||
|
||||
@@ -80,15 +73,15 @@ class ProjectPostprocessor(Dict[Keypath, Callable[[Any], Any]]):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self[('on-run-start',)] = _list_if_none_or_string
|
||||
self[('on-run-end',)] = _list_if_none_or_string
|
||||
self[("on-run-start",)] = _list_if_none_or_string
|
||||
self[("on-run-end",)] = _list_if_none_or_string
|
||||
|
||||
for k in ('models', 'seeds', 'snapshots'):
|
||||
for k in ("models", "seeds", "snapshots"):
|
||||
self[(k,)] = _dict_if_none
|
||||
self[(k, 'vars')] = _dict_if_none
|
||||
self[(k, 'pre-hook')] = _list_if_none_or_string
|
||||
self[(k, 'post-hook')] = _list_if_none_or_string
|
||||
self[('seeds', 'column_types')] = _dict_if_none
|
||||
self[(k, "vars")] = _dict_if_none
|
||||
self[(k, "pre-hook")] = _list_if_none_or_string
|
||||
self[(k, "post-hook")] = _list_if_none_or_string
|
||||
self[("seeds", "column_types")] = _dict_if_none
|
||||
|
||||
def postprocess(self, value: Any, key: Keypath) -> Any:
|
||||
if key in self:
|
||||
@@ -102,8 +95,7 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
_KEYPATH_HANDLERS = ProjectPostprocessor()
|
||||
|
||||
def __init__(
|
||||
self, profile: Optional[HasCredentials] = None,
|
||||
cli_vars: Optional[Dict[str, Any]] = None
|
||||
self, profile: Optional[HasCredentials] = None, cli_vars: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
# Generate contexts here because we want to save the context
|
||||
# object in order to retrieve the env_vars. This is almost always
|
||||
@@ -120,7 +112,7 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
'Project config'
|
||||
"Project config"
|
||||
|
||||
def get_package_renderer(self) -> BaseRenderer:
|
||||
return PackageRenderer(self.context)
|
||||
@@ -135,7 +127,7 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
) -> Dict[str, Any]:
|
||||
"""Render the project and insert the project root after rendering."""
|
||||
rendered_project = self.render_data(project)
|
||||
rendered_project['project-root'] = project_root
|
||||
rendered_project["project-root"] = project_root
|
||||
return rendered_project
|
||||
|
||||
def render_packages(self, packages: Dict[str, Any]):
|
||||
@@ -157,20 +149,17 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
|
||||
first = keypath[0]
|
||||
# run hooks are not rendered
|
||||
if first in {'on-run-start', 'on-run-end', 'query-comment'}:
|
||||
if first in {"on-run-start", "on-run-end", "query-comment"}:
|
||||
return False
|
||||
|
||||
# don't render vars blocks until runtime
|
||||
if first == 'vars':
|
||||
if first == "vars":
|
||||
return False
|
||||
|
||||
if first in {'seeds', 'models', 'snapshots', 'tests'}:
|
||||
keypath_parts = {
|
||||
(k.lstrip('+ ') if isinstance(k, str) else k)
|
||||
for k in keypath
|
||||
}
|
||||
if first in {"seeds", "models", "snapshots", "tests"}:
|
||||
keypath_parts = {(k.lstrip("+ ") if isinstance(k, str) else k) for k in keypath}
|
||||
# model-level hooks
|
||||
if 'pre-hook' in keypath_parts or 'post-hook' in keypath_parts:
|
||||
if "pre-hook" in keypath_parts or "post-hook" in keypath_parts:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -179,13 +168,11 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
class SelectorRenderer(BaseRenderer):
|
||||
@property
|
||||
def name(self):
|
||||
return 'Selector config'
|
||||
return "Selector config"
|
||||
|
||||
|
||||
class SecretRenderer(BaseRenderer):
|
||||
def __init__(
|
||||
self, cli_vars: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
def __init__(self, cli_vars: Optional[Dict[str, Any]] = None) -> None:
|
||||
# Generate contexts here because we want to save the context
|
||||
# object in order to retrieve the env_vars.
|
||||
if cli_vars is None:
|
||||
@@ -196,16 +183,16 @@ class SecretRenderer(BaseRenderer):
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return 'Secret'
|
||||
return "Secret"
|
||||
|
||||
|
||||
class ProfileRenderer(SecretRenderer):
|
||||
@property
|
||||
def name(self):
|
||||
return 'Profile'
|
||||
return "Profile"
|
||||
|
||||
|
||||
class PackageRenderer(SecretRenderer):
|
||||
@property
|
||||
def name(self):
|
||||
return 'Packages config'
|
||||
return "Packages config"
|
||||
|
||||
@@ -1,44 +1,36 @@
|
||||
import itertools
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass, fields
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Dict, Any, Optional, Mapping, Iterator, Iterable, Tuple, List, MutableSet,
|
||||
Type
|
||||
)
|
||||
from typing import Dict, Any, Optional, Mapping, Iterator, Iterable, Tuple, List, MutableSet, Type
|
||||
|
||||
from .profile import Profile
|
||||
from .project import Project
|
||||
from .renderer import DbtProjectYamlRenderer, ProfileRenderer
|
||||
from .utils import parse_cli_vars
|
||||
from dbt import flags
|
||||
from dbt import tracking
|
||||
from dbt.adapters.factory import get_relation_class_by_name, get_include_paths
|
||||
from dbt.helper_types import FQNPath, PathSet
|
||||
from dbt.helper_types import FQNPath, PathSet, DictDefaultEmptyStr
|
||||
from dbt.config.profile import read_user_config
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
|
||||
from dbt.contracts.graph.manifest import ManifestMetadata
|
||||
from dbt.contracts.relation import ComponentName
|
||||
from dbt.events.types import ProfileLoadError, ProfileNotFound
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.ui import warning_tag
|
||||
|
||||
from dbt.contracts.project import Configuration, UserConfig
|
||||
from dbt.exceptions import (
|
||||
RuntimeException,
|
||||
DbtProfileError,
|
||||
DbtProjectError,
|
||||
validator_error_message,
|
||||
warn_or_error,
|
||||
raise_compiler_error
|
||||
raise_compiler_error,
|
||||
)
|
||||
|
||||
from dbt.dataclass_schema import ValidationError
|
||||
|
||||
|
||||
def _project_quoting_dict(
|
||||
proj: Project, profile: Profile
|
||||
) -> Dict[ComponentName, bool]:
|
||||
def _project_quoting_dict(proj: Project, profile: Profile) -> Dict[ComponentName, bool]:
|
||||
src: Dict[str, Any] = profile.credentials.translate_aliases(proj.quoting)
|
||||
result: Dict[ComponentName, bool] = {}
|
||||
for key in ComponentName:
|
||||
@@ -54,7 +46,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
args: Any
|
||||
profile_name: str
|
||||
cli_vars: Dict[str, Any]
|
||||
dependencies: Optional[Mapping[str, 'RuntimeConfig']] = None
|
||||
dependencies: Optional[Mapping[str, "RuntimeConfig"]] = None
|
||||
|
||||
def __post_init__(self):
|
||||
self.validate()
|
||||
@@ -66,8 +58,8 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
project: Project,
|
||||
profile: Profile,
|
||||
args: Any,
|
||||
dependencies: Optional[Mapping[str, 'RuntimeConfig']] = None,
|
||||
) -> 'RuntimeConfig':
|
||||
dependencies: Optional[Mapping[str, "RuntimeConfig"]] = None,
|
||||
) -> "RuntimeConfig":
|
||||
"""Instantiate a RuntimeConfig from its components.
|
||||
|
||||
:param profile: A parsed dbt Profile.
|
||||
@@ -81,7 +73,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
.replace_dict(_project_quoting_dict(project, profile))
|
||||
).to_dict(omit_none=True)
|
||||
|
||||
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, 'vars', '{}'))
|
||||
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}"))
|
||||
|
||||
return cls(
|
||||
project_name=project.project_name,
|
||||
@@ -129,7 +121,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
)
|
||||
|
||||
# Called by 'load_projects' in this class
|
||||
def new_project(self, project_root: str) -> 'RuntimeConfig':
|
||||
def new_project(self, project_root: str) -> "RuntimeConfig":
|
||||
"""Given a new project root, read in its project dictionary, supply the
|
||||
existing project's profile info, and create a new project file.
|
||||
|
||||
@@ -171,7 +163,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
"""
|
||||
result = self.to_project_config(with_packages=True)
|
||||
result.update(self.to_profile_info(serialize_credentials=True))
|
||||
result['cli_vars'] = deepcopy(self.cli_vars)
|
||||
result["cli_vars"] = deepcopy(self.cli_vars)
|
||||
return result
|
||||
|
||||
def validate(self):
|
||||
@@ -191,31 +183,23 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
profile_renderer: ProfileRenderer,
|
||||
profile_name: Optional[str],
|
||||
) -> Profile:
|
||||
return Profile.render_from_args(
|
||||
args, profile_renderer, profile_name
|
||||
)
|
||||
|
||||
return Profile.render_from_args(args, profile_renderer, profile_name)
|
||||
|
||||
@classmethod
|
||||
def collect_parts(
|
||||
cls: Type['RuntimeConfig'], args: Any
|
||||
) -> Tuple[Project, Profile]:
|
||||
def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]:
|
||||
# profile_name from the project
|
||||
project_root = args.project_dir if args.project_dir else os.getcwd()
|
||||
version_check = bool(flags.VERSION_CHECK)
|
||||
partial = Project.partial_load(
|
||||
project_root,
|
||||
verify_version=version_check
|
||||
)
|
||||
partial = Project.partial_load(project_root, verify_version=version_check)
|
||||
|
||||
# build the profile using the base renderer and the one fact we know
|
||||
# Note: only the named profile section is rendered. The rest of the
|
||||
# profile is ignored.
|
||||
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, 'vars', '{}'))
|
||||
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}"))
|
||||
profile_renderer = ProfileRenderer(cli_vars)
|
||||
profile_name = partial.render_profile_name(profile_renderer)
|
||||
profile = cls._get_rendered_profile(
|
||||
args, profile_renderer, profile_name
|
||||
)
|
||||
profile = cls._get_rendered_profile(args, profile_renderer, profile_name)
|
||||
# Save env_vars encountered in rendering for partial parsing
|
||||
profile.profile_env_vars = profile_renderer.ctx_obj.env_vars
|
||||
|
||||
@@ -229,7 +213,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
|
||||
# Called in main.py, lib.py, task/base.py
|
||||
@classmethod
|
||||
def from_args(cls, args: Any) -> 'RuntimeConfig':
|
||||
def from_args(cls, args: Any) -> "RuntimeConfig":
|
||||
"""Given arguments, read in dbt_project.yml from the current directory,
|
||||
read in packages.yml if it exists, and use them to find the profile to
|
||||
load.
|
||||
@@ -248,10 +232,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
)
|
||||
|
||||
def get_metadata(self) -> ManifestMetadata:
|
||||
return ManifestMetadata(
|
||||
project_id=self.hashed_name(),
|
||||
adapter_type=self.credentials.type
|
||||
)
|
||||
return ManifestMetadata(project_id=self.hashed_name(), adapter_type=self.credentials.type)
|
||||
|
||||
def _get_v2_config_paths(
|
||||
self,
|
||||
@@ -260,7 +241,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
paths: MutableSet[FQNPath],
|
||||
) -> PathSet:
|
||||
for key, value in config.items():
|
||||
if isinstance(value, dict) and not key.startswith('+'):
|
||||
if isinstance(value, dict) and not key.startswith("+"):
|
||||
self._get_config_paths(value, path + (key,), paths)
|
||||
else:
|
||||
paths.add(path)
|
||||
@@ -276,7 +257,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
paths = set()
|
||||
|
||||
for key, value in config.items():
|
||||
if isinstance(value, dict) and not key.startswith('+'):
|
||||
if isinstance(value, dict) and not key.startswith("+"):
|
||||
self._get_v2_config_paths(value, path + (key,), paths)
|
||||
else:
|
||||
paths.add(path)
|
||||
@@ -288,11 +269,11 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
a configured path in the resource.
|
||||
"""
|
||||
return {
|
||||
'models': self._get_config_paths(self.models),
|
||||
'seeds': self._get_config_paths(self.seeds),
|
||||
'snapshots': self._get_config_paths(self.snapshots),
|
||||
'sources': self._get_config_paths(self.sources),
|
||||
'tests': self._get_config_paths(self.tests),
|
||||
"models": self._get_config_paths(self.models),
|
||||
"seeds": self._get_config_paths(self.seeds),
|
||||
"snapshots": self._get_config_paths(self.snapshots),
|
||||
"sources": self._get_config_paths(self.sources),
|
||||
"tests": self._get_config_paths(self.tests),
|
||||
}
|
||||
|
||||
def get_unused_resource_config_paths(
|
||||
@@ -313,9 +294,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
|
||||
for config_path in config_paths:
|
||||
if not _is_config_used(config_path, fqns):
|
||||
unused_resource_config_paths.append(
|
||||
(resource_type,) + config_path
|
||||
)
|
||||
unused_resource_config_paths.append((resource_type,) + config_path)
|
||||
return unused_resource_config_paths
|
||||
|
||||
def warn_for_unused_resource_config_paths(
|
||||
@@ -328,13 +307,12 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
return
|
||||
|
||||
msg = UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE.format(
|
||||
len(unused),
|
||||
'\n'.join('- {}'.format('.'.join(u)) for u in unused)
|
||||
len(unused), "\n".join("- {}".format(".".join(u)) for u in unused)
|
||||
)
|
||||
|
||||
warn_or_error(msg, log_fmt=warning_tag('{}'))
|
||||
warn_or_error(msg, log_fmt=warning_tag("{}"))
|
||||
|
||||
def load_dependencies(self) -> Mapping[str, 'RuntimeConfig']:
|
||||
def load_dependencies(self) -> Mapping[str, "RuntimeConfig"]:
|
||||
if self.dependencies is None:
|
||||
all_projects = {self.project_name: self}
|
||||
internal_packages = get_include_paths(self.credentials.type)
|
||||
@@ -343,23 +321,20 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
count_packages_installed = len(tuple(self._get_project_directories()))
|
||||
if count_packages_specified > count_packages_installed:
|
||||
raise_compiler_error(
|
||||
f'dbt found {count_packages_specified} package(s) '
|
||||
f'specified in packages.yml, but only '
|
||||
f'{count_packages_installed} package(s) installed '
|
||||
f"dbt found {count_packages_specified} package(s) "
|
||||
f"specified in packages.yml, but only "
|
||||
f"{count_packages_installed} package(s) installed "
|
||||
f'in {self.packages_install_path}. Run "dbt deps" to '
|
||||
f'install package dependencies.'
|
||||
f"install package dependencies."
|
||||
)
|
||||
project_paths = itertools.chain(
|
||||
internal_packages,
|
||||
self._get_project_directories()
|
||||
)
|
||||
project_paths = itertools.chain(internal_packages, self._get_project_directories())
|
||||
for project_name, project in self.load_projects(project_paths):
|
||||
if project_name in all_projects:
|
||||
raise_compiler_error(
|
||||
f'dbt found more than one package with the name '
|
||||
f"dbt found more than one package with the name "
|
||||
f'"{project_name}" included in this project. Package '
|
||||
f'names must be unique in a project. Please rename '
|
||||
f'one of these packages.'
|
||||
f"names must be unique in a project. Please rename "
|
||||
f"one of these packages."
|
||||
)
|
||||
all_projects[project_name] = project
|
||||
self.dependencies = all_projects
|
||||
@@ -369,16 +344,14 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
self.dependencies = None
|
||||
|
||||
# Called by 'load_dependencies' in this class
|
||||
def load_projects(
|
||||
self, paths: Iterable[Path]
|
||||
) -> Iterator[Tuple[str, 'RuntimeConfig']]:
|
||||
def load_projects(self, paths: Iterable[Path]) -> Iterator[Tuple[str, "RuntimeConfig"]]:
|
||||
for path in paths:
|
||||
try:
|
||||
project = self.new_project(str(path))
|
||||
except DbtProjectError as e:
|
||||
raise DbtProjectError(
|
||||
f'Failed to read package: {e}',
|
||||
result_type='invalid_project',
|
||||
f"Failed to read package: {e}",
|
||||
result_type="invalid_project",
|
||||
path=path,
|
||||
) from e
|
||||
else:
|
||||
@@ -389,13 +362,13 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
|
||||
if root.exists():
|
||||
for path in root.iterdir():
|
||||
if path.is_dir() and not path.name.startswith('__'):
|
||||
if path.is_dir() and not path.name.startswith("__"):
|
||||
yield path
|
||||
|
||||
|
||||
class UnsetCredentials(Credentials):
|
||||
def __init__(self):
|
||||
super().__init__('', '')
|
||||
super().__init__("", "")
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
@@ -412,37 +385,28 @@ class UnsetCredentials(Credentials):
|
||||
return ()
|
||||
|
||||
|
||||
class UnsetConfig(UserConfig):
|
||||
def __getattribute__(self, name):
|
||||
if name in {f.name for f in fields(UserConfig)}:
|
||||
raise AttributeError(
|
||||
f"'UnsetConfig' object has no attribute {name}"
|
||||
)
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
return {}
|
||||
|
||||
|
||||
# This is used by UnsetProfileConfig, for commands which do
|
||||
# not require a profile, i.e. dbt deps and clean
|
||||
class UnsetProfile(Profile):
|
||||
def __init__(self):
|
||||
self.credentials = UnsetCredentials()
|
||||
self.user_config = UnsetConfig()
|
||||
self.profile_name = ''
|
||||
self.target_name = ''
|
||||
self.user_config = UserConfig() # This will be read in _get_rendered_profile
|
||||
self.profile_name = ""
|
||||
self.target_name = ""
|
||||
self.threads = -1
|
||||
|
||||
def to_target_dict(self):
|
||||
return {}
|
||||
return DictDefaultEmptyStr({})
|
||||
|
||||
def __getattribute__(self, name):
|
||||
if name in {'profile_name', 'target_name', 'threads'}:
|
||||
raise RuntimeException(
|
||||
f'Error: disallowed attribute "{name}" - no profile!'
|
||||
)
|
||||
if name in {"profile_name", "target_name", "threads"}:
|
||||
raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!')
|
||||
|
||||
return Profile.__getattribute__(self, name)
|
||||
|
||||
|
||||
# This class is used by the dbt deps and clean commands, because they don't
|
||||
# require a functioning profile.
|
||||
@dataclass
|
||||
class UnsetProfileConfig(RuntimeConfig):
|
||||
"""This class acts a lot _like_ a RuntimeConfig, except if your profile is
|
||||
@@ -459,17 +423,15 @@ class UnsetProfileConfig(RuntimeConfig):
|
||||
|
||||
def __getattribute__(self, name):
|
||||
# Override __getattribute__ to check that the attribute isn't 'banned'.
|
||||
if name in {'profile_name', 'target_name'}:
|
||||
raise RuntimeException(
|
||||
f'Error: disallowed attribute "{name}" - no profile!'
|
||||
)
|
||||
if name in {"profile_name", "target_name"}:
|
||||
raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!')
|
||||
|
||||
# avoid every attribute access triggering infinite recursion
|
||||
return RuntimeConfig.__getattribute__(self, name)
|
||||
|
||||
def to_target_dict(self):
|
||||
# re-override the poisoned profile behavior
|
||||
return {}
|
||||
return DictDefaultEmptyStr({})
|
||||
|
||||
@classmethod
|
||||
def from_parts(
|
||||
@@ -477,8 +439,8 @@ class UnsetProfileConfig(RuntimeConfig):
|
||||
project: Project,
|
||||
profile: Profile,
|
||||
args: Any,
|
||||
dependencies: Optional[Mapping[str, 'RuntimeConfig']] = None,
|
||||
) -> 'RuntimeConfig':
|
||||
dependencies: Optional[Mapping[str, "RuntimeConfig"]] = None,
|
||||
) -> "RuntimeConfig":
|
||||
"""Instantiate a RuntimeConfig from its components.
|
||||
|
||||
:param profile: Ignored.
|
||||
@@ -486,7 +448,7 @@ class UnsetProfileConfig(RuntimeConfig):
|
||||
:param args: The parsed command-line arguments.
|
||||
:returns RuntimeConfig: The new configuration.
|
||||
"""
|
||||
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, 'vars', '{}'))
|
||||
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}"))
|
||||
|
||||
return cls(
|
||||
project_name=project.project_name,
|
||||
@@ -523,10 +485,10 @@ class UnsetProfileConfig(RuntimeConfig):
|
||||
unrendered=project.unrendered,
|
||||
project_env_vars=project.project_env_vars,
|
||||
profile_env_vars=profile.profile_env_vars,
|
||||
profile_name='',
|
||||
target_name='',
|
||||
user_config=UnsetConfig(),
|
||||
threads=getattr(args, 'threads', 1),
|
||||
profile_name="",
|
||||
target_name="",
|
||||
user_config=UserConfig(),
|
||||
threads=getattr(args, "threads", 1),
|
||||
credentials=UnsetCredentials(),
|
||||
args=args,
|
||||
cli_vars=cli_vars,
|
||||
@@ -540,21 +502,16 @@ class UnsetProfileConfig(RuntimeConfig):
|
||||
profile_renderer: ProfileRenderer,
|
||||
profile_name: Optional[str],
|
||||
) -> Profile:
|
||||
try:
|
||||
profile = Profile.render_from_args(
|
||||
args, profile_renderer, profile_name
|
||||
)
|
||||
except (DbtProjectError, DbtProfileError) as exc:
|
||||
fire_event(ProfileLoadError(exc=exc))
|
||||
fire_event(ProfileNotFound(profile_name=profile_name))
|
||||
# return the poisoned form
|
||||
profile = UnsetProfile()
|
||||
# disable anonymous usage statistics
|
||||
tracking.disable_tracking()
|
||||
|
||||
profile = UnsetProfile()
|
||||
# The profile (for warehouse connection) is not needed, but we want
|
||||
# to get the UserConfig, which is also in profiles.yml
|
||||
user_config = read_user_config(flags.PROFILES_DIR)
|
||||
profile.user_config = user_config
|
||||
return profile
|
||||
|
||||
@classmethod
|
||||
def from_args(cls: Type[RuntimeConfig], args: Any) -> 'RuntimeConfig':
|
||||
def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig":
|
||||
"""Given arguments, read in dbt_project.yml from the current directory,
|
||||
read in packages.yml if it exists, and use them to find the profile to
|
||||
load.
|
||||
@@ -565,15 +522,8 @@ class UnsetProfileConfig(RuntimeConfig):
|
||||
:raises ValidationException: If the cli variables are invalid.
|
||||
"""
|
||||
project, profile = cls.collect_parts(args)
|
||||
if not isinstance(profile, UnsetProfile):
|
||||
# if it's a real profile, return a real config
|
||||
cls = RuntimeConfig
|
||||
|
||||
return cls.from_parts(
|
||||
project=project,
|
||||
profile=profile,
|
||||
args=args
|
||||
)
|
||||
return cls.from_parts(project=project, profile=profile, args=args)
|
||||
|
||||
|
||||
UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE = """\
|
||||
@@ -587,6 +537,6 @@ There are {} unused configuration paths:
|
||||
def _is_config_used(path, fqns):
|
||||
if fqns:
|
||||
for fqn in fqns:
|
||||
if len(path) <= len(fqn) and fqn[:len(path)] == path:
|
||||
if len(path) <= len(fqn) and fqn[: len(path)] == path:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Union
|
||||
from dbt.clients.yaml_helper import ( # noqa: F401
|
||||
yaml, Loader, Dumper, load_yaml_text
|
||||
)
|
||||
from dbt.clients.yaml_helper import yaml, Loader, Dumper, load_yaml_text # noqa: F401
|
||||
from dbt.dataclass_schema import ValidationError
|
||||
|
||||
from .renderer import SelectorRenderer
|
||||
@@ -30,9 +28,8 @@ Validator Error:
|
||||
|
||||
|
||||
class SelectorConfig(Dict[str, Dict[str, Union[SelectionSpec, bool]]]):
|
||||
|
||||
@classmethod
|
||||
def selectors_from_dict(cls, data: Dict[str, Any]) -> 'SelectorConfig':
|
||||
def selectors_from_dict(cls, data: Dict[str, Any]) -> "SelectorConfig":
|
||||
try:
|
||||
SelectorFile.validate(data)
|
||||
selector_file = SelectorFile.from_dict(data)
|
||||
@@ -46,12 +43,12 @@ class SelectorConfig(Dict[str, Dict[str, Union[SelectionSpec, bool]]]):
|
||||
f"union, intersection, string, dictionary. No lists. "
|
||||
f"\nhttps://docs.getdbt.com/reference/node-selection/"
|
||||
f"yaml-selectors",
|
||||
result_type='invalid_selector'
|
||||
result_type="invalid_selector",
|
||||
) from exc
|
||||
except RuntimeException as exc:
|
||||
raise DbtSelectorsError(
|
||||
f'Could not read selector file data: {exc}',
|
||||
result_type='invalid_selector',
|
||||
f"Could not read selector file data: {exc}",
|
||||
result_type="invalid_selector",
|
||||
) from exc
|
||||
|
||||
return cls(selectors)
|
||||
@@ -61,26 +58,28 @@ class SelectorConfig(Dict[str, Dict[str, Union[SelectionSpec, bool]]]):
|
||||
cls,
|
||||
data: Dict[str, Any],
|
||||
renderer: SelectorRenderer,
|
||||
) -> 'SelectorConfig':
|
||||
) -> "SelectorConfig":
|
||||
try:
|
||||
rendered = renderer.render_data(data)
|
||||
except (ValidationError, RuntimeException) as exc:
|
||||
raise DbtSelectorsError(
|
||||
f'Could not render selector data: {exc}',
|
||||
result_type='invalid_selector',
|
||||
f"Could not render selector data: {exc}",
|
||||
result_type="invalid_selector",
|
||||
) from exc
|
||||
return cls.selectors_from_dict(rendered)
|
||||
|
||||
@classmethod
|
||||
def from_path(
|
||||
cls, path: Path, renderer: SelectorRenderer,
|
||||
) -> 'SelectorConfig':
|
||||
cls,
|
||||
path: Path,
|
||||
renderer: SelectorRenderer,
|
||||
) -> "SelectorConfig":
|
||||
try:
|
||||
data = load_yaml_text(load_file_contents(str(path)))
|
||||
except (ValidationError, RuntimeException) as exc:
|
||||
raise DbtSelectorsError(
|
||||
f'Could not read selector file: {exc}',
|
||||
result_type='invalid_selector',
|
||||
f"Could not read selector file: {exc}",
|
||||
result_type="invalid_selector",
|
||||
path=path,
|
||||
) from exc
|
||||
|
||||
@@ -92,9 +91,7 @@ class SelectorConfig(Dict[str, Dict[str, Union[SelectionSpec, bool]]]):
|
||||
|
||||
|
||||
def selector_data_from_root(project_root: str) -> Dict[str, Any]:
|
||||
selector_filepath = resolve_path_from_base(
|
||||
'selectors.yml', project_root
|
||||
)
|
||||
selector_filepath = resolve_path_from_base("selectors.yml", project_root)
|
||||
|
||||
if path_exists(selector_filepath):
|
||||
selectors_dict = load_yaml_text(load_file_contents(selector_filepath))
|
||||
@@ -103,18 +100,16 @@ def selector_data_from_root(project_root: str) -> Dict[str, Any]:
|
||||
return selectors_dict
|
||||
|
||||
|
||||
def selector_config_from_data(
|
||||
selectors_data: Dict[str, Any]
|
||||
) -> SelectorConfig:
|
||||
def selector_config_from_data(selectors_data: Dict[str, Any]) -> SelectorConfig:
|
||||
if not selectors_data:
|
||||
selectors_data = {'selectors': []}
|
||||
selectors_data = {"selectors": []}
|
||||
|
||||
try:
|
||||
selectors = SelectorConfig.selectors_from_dict(selectors_data)
|
||||
except ValidationError as e:
|
||||
raise DbtSelectorsError(
|
||||
MALFORMED_SELECTOR_ERROR.format(error=str(e.message)),
|
||||
result_type='invalid_selector',
|
||||
result_type="invalid_selector",
|
||||
) from e
|
||||
return selectors
|
||||
|
||||
@@ -144,7 +139,6 @@ def validate_selector_default(selector_file: SelectorFile) -> None:
|
||||
# be necessary to make changes here. Ideally it would be
|
||||
# good to combine the two flows into one at some point.
|
||||
class SelectorDict:
|
||||
|
||||
@classmethod
|
||||
def parse_dict_definition(cls, definition):
|
||||
key = list(definition)[0]
|
||||
@@ -155,10 +149,10 @@ class SelectorDict:
|
||||
new_value = cls.parse_from_definition(sel_def)
|
||||
new_values.append(new_value)
|
||||
value = new_values
|
||||
if key == 'exclude':
|
||||
if key == "exclude":
|
||||
definition = {key: value}
|
||||
elif len(definition) == 1:
|
||||
definition = {'method': key, 'value': value}
|
||||
definition = {"method": key, "value": value}
|
||||
return definition
|
||||
|
||||
@classmethod
|
||||
@@ -180,10 +174,10 @@ class SelectorDict:
|
||||
def parse_from_definition(cls, definition):
|
||||
if isinstance(definition, str):
|
||||
definition = SelectionCriteria.dict_from_single_spec(definition)
|
||||
elif 'union' in definition:
|
||||
definition = cls.parse_a_definition('union', definition)
|
||||
elif 'intersection' in definition:
|
||||
definition = cls.parse_a_definition('intersection', definition)
|
||||
elif "union" in definition:
|
||||
definition = cls.parse_a_definition("union", definition)
|
||||
elif "intersection" in definition:
|
||||
definition = cls.parse_a_definition("intersection", definition)
|
||||
elif isinstance(definition, dict):
|
||||
definition = cls.parse_dict_definition(definition)
|
||||
return definition
|
||||
@@ -194,8 +188,8 @@ class SelectorDict:
|
||||
def parse_from_selectors_list(cls, selectors):
|
||||
selector_dict = {}
|
||||
for selector in selectors:
|
||||
sel_name = selector['name']
|
||||
sel_name = selector["name"]
|
||||
selector_dict[sel_name] = selector
|
||||
definition = cls.parse_from_definition(selector['definition'])
|
||||
selector_dict[sel_name]['definition'] = definition
|
||||
definition = cls.parse_from_definition(selector["definition"])
|
||||
selector_dict[sel_name]["definition"] = definition
|
||||
return selector_dict
|
||||
|
||||
@@ -16,7 +16,8 @@ def parse_cli_vars(var_string: str) -> Dict[str, Any]:
|
||||
type_name = var_type.__name__
|
||||
raise_compiler_error(
|
||||
"The --vars argument must be a YAML dictionary, but was "
|
||||
"of type '{}'".format(type_name))
|
||||
"of type '{}'".format(type_name)
|
||||
)
|
||||
except ValidationException:
|
||||
fire_event(InvalidVarsYAML())
|
||||
raise
|
||||
|
||||
1
core/dbt/context/README.md
Normal file
1
core/dbt/context/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Contexts and Jinja rendering
|
||||
@@ -1,18 +1,17 @@
|
||||
import json
|
||||
import os
|
||||
from typing import (
|
||||
Any, Dict, NoReturn, Optional, Mapping
|
||||
)
|
||||
from typing import Any, Dict, NoReturn, Optional, Mapping
|
||||
|
||||
from dbt import flags
|
||||
from dbt import tracking
|
||||
from dbt.clients.jinja import get_rendered
|
||||
from dbt.clients.yaml_helper import ( # noqa: F401
|
||||
yaml, safe_load, SafeLoader, Loader, Dumper
|
||||
)
|
||||
from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401
|
||||
from dbt.contracts.graph.compiled import CompiledResource
|
||||
from dbt.exceptions import (
|
||||
raise_compiler_error, MacroReturn, raise_parsing_error, disallow_secret_env_var
|
||||
raise_compiler_error,
|
||||
MacroReturn,
|
||||
raise_parsing_error,
|
||||
disallow_secret_env_var,
|
||||
)
|
||||
from dbt.logger import SECRET_ENV_PREFIX
|
||||
from dbt.events.functions import fire_event, get_invocation_id
|
||||
@@ -62,38 +61,27 @@ import re
|
||||
def get_pytz_module_context() -> Dict[str, Any]:
|
||||
context_exports = pytz.__all__ # type: ignore
|
||||
|
||||
return {
|
||||
name: getattr(pytz, name) for name in context_exports
|
||||
}
|
||||
return {name: getattr(pytz, name) for name in context_exports}
|
||||
|
||||
|
||||
def get_datetime_module_context() -> Dict[str, Any]:
|
||||
context_exports = [
|
||||
'date',
|
||||
'datetime',
|
||||
'time',
|
||||
'timedelta',
|
||||
'tzinfo'
|
||||
]
|
||||
context_exports = ["date", "datetime", "time", "timedelta", "tzinfo"]
|
||||
|
||||
return {
|
||||
name: getattr(datetime, name) for name in context_exports
|
||||
}
|
||||
return {name: getattr(datetime, name) for name in context_exports}
|
||||
|
||||
|
||||
def get_re_module_context() -> Dict[str, Any]:
|
||||
context_exports = re.__all__
|
||||
# TODO CT-211
|
||||
context_exports = re.__all__ # type: ignore[attr-defined]
|
||||
|
||||
return {
|
||||
name: getattr(re, name) for name in context_exports
|
||||
}
|
||||
return {name: getattr(re, name) for name in context_exports}
|
||||
|
||||
|
||||
def get_context_modules() -> Dict[str, Dict[str, Any]]:
|
||||
return {
|
||||
'pytz': get_pytz_module_context(),
|
||||
'datetime': get_datetime_module_context(),
|
||||
're': get_re_module_context(),
|
||||
"pytz": get_pytz_module_context(),
|
||||
"datetime": get_datetime_module_context(),
|
||||
"re": get_re_module_context(),
|
||||
}
|
||||
|
||||
|
||||
@@ -127,8 +115,8 @@ class ContextMeta(type):
|
||||
new_dct = {}
|
||||
|
||||
for base in bases:
|
||||
context_members.update(getattr(base, '_context_members_', {}))
|
||||
context_attrs.update(getattr(base, '_context_attrs_', {}))
|
||||
context_members.update(getattr(base, "_context_members_", {}))
|
||||
context_attrs.update(getattr(base, "_context_attrs_", {}))
|
||||
|
||||
for key, value in dct.items():
|
||||
if isinstance(value, ContextMember):
|
||||
@@ -137,21 +125,20 @@ class ContextMeta(type):
|
||||
context_attrs[context_key] = key
|
||||
value = value.inner
|
||||
new_dct[key] = value
|
||||
new_dct['_context_members_'] = context_members
|
||||
new_dct['_context_attrs_'] = context_attrs
|
||||
new_dct["_context_members_"] = context_members
|
||||
new_dct["_context_attrs_"] = context_attrs
|
||||
return type.__new__(mcls, name, bases, new_dct)
|
||||
|
||||
|
||||
class Var:
|
||||
UndefinedVarError = "Required var '{}' not found in config:\nVars "\
|
||||
"supplied to {} = {}"
|
||||
UndefinedVarError = "Required var '{}' not found in config:\nVars " "supplied to {} = {}"
|
||||
_VAR_NOTSET = object()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
context: Mapping[str, Any],
|
||||
cli_vars: Mapping[str, Any],
|
||||
node: Optional[CompiledResource] = None
|
||||
node: Optional[CompiledResource] = None,
|
||||
) -> None:
|
||||
self._context: Mapping[str, Any] = context
|
||||
self._cli_vars: Mapping[str, Any] = cli_vars
|
||||
@@ -166,14 +153,12 @@ class Var:
|
||||
if self._node is not None:
|
||||
return self._node.name
|
||||
else:
|
||||
return '<Configuration>'
|
||||
return "<Configuration>"
|
||||
|
||||
def get_missing_var(self, var_name):
|
||||
dct = {k: self._merged[k] for k in self._merged}
|
||||
pretty_vars = json.dumps(dct, sort_keys=True, indent=4)
|
||||
msg = self.UndefinedVarError.format(
|
||||
var_name, self.node_name, pretty_vars
|
||||
)
|
||||
msg = self.UndefinedVarError.format(var_name, self.node_name, pretty_vars)
|
||||
raise_compiler_error(msg, self._node)
|
||||
|
||||
def has_var(self, var_name: str):
|
||||
@@ -206,7 +191,7 @@ class BaseContext(metaclass=ContextMeta):
|
||||
def generate_builtins(self):
|
||||
builtins: Dict[str, Any] = {}
|
||||
for key, value in self._context_members_.items():
|
||||
if hasattr(value, '__get__'):
|
||||
if hasattr(value, "__get__"):
|
||||
# handle properties, bound methods, etc
|
||||
value = value.__get__(self)
|
||||
builtins[key] = value
|
||||
@@ -214,9 +199,9 @@ class BaseContext(metaclass=ContextMeta):
|
||||
|
||||
# no dbtClassMixin so this is not an actual override
|
||||
def to_dict(self):
|
||||
self._ctx['context'] = self._ctx
|
||||
self._ctx["context"] = self._ctx
|
||||
builtins = self.generate_builtins()
|
||||
self._ctx['builtins'] = builtins
|
||||
self._ctx["builtins"] = builtins
|
||||
self._ctx.update(builtins)
|
||||
return self._ctx
|
||||
|
||||
@@ -331,18 +316,20 @@ class BaseContext(metaclass=ContextMeta):
|
||||
msg = f"Env var required but not provided: '{var}'"
|
||||
raise_parsing_error(msg)
|
||||
|
||||
if os.environ.get('DBT_MACRO_DEBUGGING'):
|
||||
if os.environ.get("DBT_MACRO_DEBUGGING"):
|
||||
|
||||
@contextmember
|
||||
@staticmethod
|
||||
def debug():
|
||||
"""Enter a debugger at this line in the compiled jinja code."""
|
||||
import sys
|
||||
import ipdb # type: ignore
|
||||
|
||||
frame = sys._getframe(3)
|
||||
ipdb.set_trace(frame)
|
||||
return ''
|
||||
return ""
|
||||
|
||||
@contextmember('return')
|
||||
@contextmember("return")
|
||||
@staticmethod
|
||||
def _return(data: Any) -> NoReturn:
|
||||
"""The `return` function can be used in macros to return data to the
|
||||
@@ -393,9 +380,7 @@ class BaseContext(metaclass=ContextMeta):
|
||||
|
||||
@contextmember
|
||||
@staticmethod
|
||||
def tojson(
|
||||
value: Any, default: Any = None, sort_keys: bool = False
|
||||
) -> Any:
|
||||
def tojson(value: Any, default: Any = None, sort_keys: bool = False) -> Any:
|
||||
"""The `tojson` context method can be used to serialize a Python
|
||||
object primitive, eg. a `dict` or `list` to a json string.
|
||||
|
||||
@@ -488,10 +473,10 @@ class BaseContext(metaclass=ContextMeta):
|
||||
{% endmacro %}"
|
||||
"""
|
||||
if info:
|
||||
fire_event(MacroEventInfo(msg))
|
||||
fire_event(MacroEventInfo(msg=msg))
|
||||
else:
|
||||
fire_event(MacroEventDebug(msg))
|
||||
return ''
|
||||
fire_event(MacroEventDebug(msg=msg))
|
||||
return ""
|
||||
|
||||
@contextproperty
|
||||
def run_started_at(self) -> Optional[datetime.datetime]:
|
||||
@@ -571,6 +556,22 @@ class BaseContext(metaclass=ContextMeta):
|
||||
"""
|
||||
return flags
|
||||
|
||||
@contextmember
|
||||
@staticmethod
|
||||
def print(msg: str) -> str:
|
||||
"""Prints a line to stdout.
|
||||
|
||||
:param msg: The message to print
|
||||
|
||||
> macros/my_log_macro.sql
|
||||
|
||||
{% macro some_macro(arg1, arg2) %}
|
||||
{{ print("Running some_macro: " ~ arg1 ~ ", " ~ arg2) }}
|
||||
{% endmacro %}"
|
||||
"""
|
||||
print(msg)
|
||||
return ""
|
||||
|
||||
|
||||
def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ctx = BaseContext(cli_vars)
|
||||
|
||||
@@ -15,9 +15,7 @@ class ConfiguredContext(TargetContext):
|
||||
# subclasses are SchemaYamlContext, MacroResolvingContext, ManifestContext
|
||||
config: AdapterRequiredConfig
|
||||
|
||||
def __init__(
|
||||
self, config: AdapterRequiredConfig
|
||||
) -> None:
|
||||
def __init__(self, config: AdapterRequiredConfig) -> None:
|
||||
super().__init__(config, config.cli_vars)
|
||||
|
||||
@contextproperty
|
||||
@@ -67,7 +65,7 @@ class ConfiguredVar(Var):
|
||||
return self.get_missing_var(var_name)
|
||||
|
||||
|
||||
class SchemaYamlVars():
|
||||
class SchemaYamlVars:
|
||||
def __init__(self):
|
||||
self.env_vars = {}
|
||||
self.vars = {}
|
||||
@@ -82,9 +80,7 @@ class SchemaYamlContext(ConfiguredContext):
|
||||
|
||||
@contextproperty
|
||||
def var(self) -> ConfiguredVar:
|
||||
return ConfiguredVar(
|
||||
self._ctx, self.config, self._project_name
|
||||
)
|
||||
return ConfiguredVar(self._ctx, self.config, self._project_name)
|
||||
|
||||
@contextmember
|
||||
def env_var(self, var: str, default: Optional[str] = None) -> str:
|
||||
@@ -111,13 +107,11 @@ class MacroResolvingContext(ConfiguredContext):
|
||||
|
||||
@contextproperty
|
||||
def var(self) -> ConfiguredVar:
|
||||
return ConfiguredVar(
|
||||
self._ctx, self.config, self.config.project_name
|
||||
)
|
||||
return ConfiguredVar(self._ctx, self.config, self.config.project_name)
|
||||
|
||||
|
||||
def generate_schema_yml_context(
|
||||
config: AdapterRequiredConfig, project_name: str, schema_yaml_vars: SchemaYamlVars = None
|
||||
config: AdapterRequiredConfig, project_name: str, schema_yaml_vars: SchemaYamlVars = None
|
||||
) -> Dict[str, Any]:
|
||||
ctx = SchemaYamlContext(config, project_name, schema_yaml_vars)
|
||||
return ctx.to_dict()
|
||||
|
||||
@@ -17,8 +17,8 @@ class ModelParts(IsFQNResource):
|
||||
package_name: str
|
||||
|
||||
|
||||
T = TypeVar('T') # any old type
|
||||
C = TypeVar('C', bound=BaseConfig)
|
||||
T = TypeVar("T") # any old type
|
||||
C = TypeVar("C", bound=BaseConfig)
|
||||
|
||||
|
||||
class ConfigSource:
|
||||
@@ -36,15 +36,15 @@ class UnrenderedConfig(ConfigSource):
|
||||
def get_config_dict(self, resource_type: NodeType) -> Dict[str, Any]:
|
||||
unrendered = self.project.unrendered.project_dict
|
||||
if resource_type == NodeType.Seed:
|
||||
model_configs = unrendered.get('seeds')
|
||||
model_configs = unrendered.get("seeds")
|
||||
elif resource_type == NodeType.Snapshot:
|
||||
model_configs = unrendered.get('snapshots')
|
||||
model_configs = unrendered.get("snapshots")
|
||||
elif resource_type == NodeType.Source:
|
||||
model_configs = unrendered.get('sources')
|
||||
model_configs = unrendered.get("sources")
|
||||
elif resource_type == NodeType.Test:
|
||||
model_configs = unrendered.get('tests')
|
||||
model_configs = unrendered.get("tests")
|
||||
else:
|
||||
model_configs = unrendered.get('models')
|
||||
model_configs = unrendered.get("models")
|
||||
|
||||
if model_configs is None:
|
||||
return {}
|
||||
@@ -83,8 +83,8 @@ class BaseContextConfigGenerator(Generic[T]):
|
||||
dependencies = self._active_project.load_dependencies()
|
||||
if project_name not in dependencies:
|
||||
raise InternalException(
|
||||
f'Project name {project_name} not found in dependencies '
|
||||
f'(found {list(dependencies)})'
|
||||
f"Project name {project_name} not found in dependencies "
|
||||
f"(found {list(dependencies)})"
|
||||
)
|
||||
return dependencies[project_name]
|
||||
|
||||
@@ -96,7 +96,7 @@ class BaseContextConfigGenerator(Generic[T]):
|
||||
for level_config in fqn_search(model_configs, fqn):
|
||||
result = {}
|
||||
for key, value in level_config.items():
|
||||
if key.startswith('+'):
|
||||
if key.startswith("+"):
|
||||
result[key[1:].strip()] = deepcopy(value)
|
||||
elif not isinstance(value, dict):
|
||||
result[key] = deepcopy(value)
|
||||
@@ -109,9 +109,7 @@ class BaseContextConfigGenerator(Generic[T]):
|
||||
return self._project_configs(self._active_project, fqn, resource_type)
|
||||
|
||||
@abstractmethod
|
||||
def _update_from_config(
|
||||
self, result: T, partial: Dict[str, Any], validate: bool = False
|
||||
) -> T:
|
||||
def _update_from_config(self, result: T, partial: Dict[str, Any], validate: bool = False) -> T:
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
@@ -125,7 +123,7 @@ class BaseContextConfigGenerator(Generic[T]):
|
||||
resource_type: NodeType,
|
||||
project_name: str,
|
||||
base: bool,
|
||||
patch_config_dict: Dict[str, Any] = None
|
||||
patch_config_dict: Dict[str, Any] = None,
|
||||
) -> BaseConfig:
|
||||
own_config = self.get_node_project(project_name)
|
||||
|
||||
@@ -150,7 +148,8 @@ class BaseContextConfigGenerator(Generic[T]):
|
||||
result = self._update_from_config(result, fqn_config)
|
||||
|
||||
# this is mostly impactful in the snapshot config case
|
||||
return result
|
||||
# TODO CT-211
|
||||
return result # type: ignore[return-value]
|
||||
|
||||
@abstractmethod
|
||||
def calculate_node_config_dict(
|
||||
@@ -181,16 +180,10 @@ class ContextConfigGenerator(BaseContextConfigGenerator[C]):
|
||||
result = config_cls.from_dict({})
|
||||
return result
|
||||
|
||||
def _update_from_config(
|
||||
self, result: C, partial: Dict[str, Any], validate: bool = False
|
||||
) -> C:
|
||||
translated = self._active_project.credentials.translate_aliases(
|
||||
partial
|
||||
)
|
||||
def _update_from_config(self, result: C, partial: Dict[str, Any], validate: bool = False) -> C:
|
||||
translated = self._active_project.credentials.translate_aliases(partial)
|
||||
return result.update_from(
|
||||
translated,
|
||||
self._active_project.credentials.type,
|
||||
validate=validate
|
||||
translated, self._active_project.credentials.type, validate=validate
|
||||
)
|
||||
|
||||
def calculate_node_config_dict(
|
||||
@@ -200,7 +193,7 @@ class ContextConfigGenerator(BaseContextConfigGenerator[C]):
|
||||
resource_type: NodeType,
|
||||
project_name: str,
|
||||
base: bool,
|
||||
patch_config_dict: dict = None
|
||||
patch_config_dict: dict = None,
|
||||
) -> Dict[str, Any]:
|
||||
config = self.calculate_node_config(
|
||||
config_call_dict=config_call_dict,
|
||||
@@ -208,7 +201,7 @@ class ContextConfigGenerator(BaseContextConfigGenerator[C]):
|
||||
resource_type=resource_type,
|
||||
project_name=project_name,
|
||||
base=base,
|
||||
patch_config_dict=patch_config_dict
|
||||
patch_config_dict=patch_config_dict,
|
||||
)
|
||||
finalized = config.finalize_and_validate()
|
||||
return finalized.to_dict(omit_none=True)
|
||||
@@ -225,22 +218,19 @@ class UnrenderedConfigGenerator(BaseContextConfigGenerator[Dict[str, Any]]):
|
||||
resource_type: NodeType,
|
||||
project_name: str,
|
||||
base: bool,
|
||||
patch_config_dict: dict = None
|
||||
patch_config_dict: dict = None,
|
||||
) -> Dict[str, Any]:
|
||||
# TODO CT-211
|
||||
return self.calculate_node_config(
|
||||
config_call_dict=config_call_dict,
|
||||
fqn=fqn,
|
||||
resource_type=resource_type,
|
||||
project_name=project_name,
|
||||
base=base,
|
||||
patch_config_dict=patch_config_dict
|
||||
)
|
||||
patch_config_dict=patch_config_dict,
|
||||
) # type: ignore[return-value]
|
||||
|
||||
def initial_result(
|
||||
self,
|
||||
resource_type: NodeType,
|
||||
base: bool
|
||||
) -> Dict[str, Any]:
|
||||
def initial_result(self, resource_type: NodeType, base: bool) -> Dict[str, Any]:
|
||||
return {}
|
||||
|
||||
def _update_from_config(
|
||||
@@ -249,9 +239,7 @@ class UnrenderedConfigGenerator(BaseContextConfigGenerator[Dict[str, Any]]):
|
||||
partial: Dict[str, Any],
|
||||
validate: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
translated = self._active_project.credentials.translate_aliases(
|
||||
partial
|
||||
)
|
||||
translated = self._active_project.credentials.translate_aliases(partial)
|
||||
result.update(translated)
|
||||
return result
|
||||
|
||||
@@ -279,11 +267,11 @@ class ContextConfig:
|
||||
for k, v in opts.items():
|
||||
# MergeBehavior for post-hook and pre-hook is to collect all
|
||||
# values, instead of overwriting
|
||||
if k in BaseConfig.mergebehavior['append']:
|
||||
if k in BaseConfig.mergebehavior["append"]:
|
||||
if not isinstance(v, list):
|
||||
v = [v]
|
||||
if k in BaseConfig.mergebehavior['update'] and not isinstance(v, dict):
|
||||
raise InternalException(f'expected dict, got {v}')
|
||||
if k in BaseConfig.mergebehavior["update"] and not isinstance(v, dict):
|
||||
raise InternalException(f"expected dict, got {v}")
|
||||
if k in config_call_dict and isinstance(config_call_dict[k], list):
|
||||
config_call_dict[k].extend(v)
|
||||
elif k in config_call_dict and isinstance(config_call_dict[k], dict):
|
||||
@@ -292,16 +280,14 @@ class ContextConfig:
|
||||
config_call_dict[k] = v
|
||||
|
||||
def build_config_dict(
|
||||
self,
|
||||
base: bool = False,
|
||||
*,
|
||||
rendered: bool = True,
|
||||
patch_config_dict: dict = None
|
||||
self, base: bool = False, *, rendered: bool = True, patch_config_dict: dict = None
|
||||
) -> Dict[str, Any]:
|
||||
if rendered:
|
||||
src = ContextConfigGenerator(self._active_project)
|
||||
# TODO CT-211
|
||||
src = ContextConfigGenerator(self._active_project) # type: ignore[var-annotated]
|
||||
else:
|
||||
src = UnrenderedConfigGenerator(self._active_project)
|
||||
# TODO CT-211
|
||||
src = UnrenderedConfigGenerator(self._active_project) # type: ignore[assignment]
|
||||
|
||||
return src.calculate_node_config_dict(
|
||||
config_call_dict=self._config_call_dict,
|
||||
@@ -309,5 +295,5 @@ class ContextConfig:
|
||||
resource_type=self._resource_type,
|
||||
project_name=self._project_name,
|
||||
base=base,
|
||||
patch_config_dict=patch_config_dict
|
||||
patch_config_dict=patch_config_dict,
|
||||
)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from typing import (
|
||||
Any, Dict, Union
|
||||
)
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from dbt.exceptions import (
|
||||
doc_invalid_args,
|
||||
@@ -68,7 +66,8 @@ class DocsRuntimeContext(SchemaYamlContext):
|
||||
file_id = target_doc.file_id
|
||||
if file_id in self.manifest.files:
|
||||
source_file = self.manifest.files[file_id]
|
||||
source_file.add_node(self.node.unique_id)
|
||||
# TODO CT-211
|
||||
source_file.add_node(self.node.unique_id) # type: ignore[union-attr]
|
||||
else:
|
||||
doc_target_not_found(self.node, doc_name, doc_package_name)
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from typing import (
|
||||
Dict, MutableMapping, Optional
|
||||
)
|
||||
from typing import Dict, MutableMapping, Optional
|
||||
from dbt.contracts.graph.parsed import ParsedMacro
|
||||
from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
@@ -49,8 +47,7 @@ class MacroResolver:
|
||||
for pkg in reversed(self.internal_package_names):
|
||||
if pkg in self.internal_packages:
|
||||
# Turn the internal packages into a flat namespace
|
||||
self.internal_packages_namespace.update(
|
||||
self.internal_packages[pkg])
|
||||
self.internal_packages_namespace.update(self.internal_packages[pkg])
|
||||
|
||||
# search order:
|
||||
# local_namespace (package of particular node), not including
|
||||
@@ -89,9 +86,7 @@ class MacroResolver:
|
||||
package_namespaces[macro.package_name] = namespace
|
||||
|
||||
if macro.name in namespace:
|
||||
raise_duplicate_macro_name(
|
||||
macro, macro, macro.package_name
|
||||
)
|
||||
raise_duplicate_macro_name(macro, macro, macro.package_name)
|
||||
package_namespaces[macro.package_name][macro.name] = macro
|
||||
|
||||
def add_macro(self, macro: ParsedMacro):
|
||||
@@ -114,8 +109,7 @@ class MacroResolver:
|
||||
|
||||
def get_macro(self, local_package, macro_name):
|
||||
local_package_macros = {}
|
||||
if (local_package not in self.internal_package_names and
|
||||
local_package in self.packages):
|
||||
if local_package not in self.internal_package_names and local_package in self.packages:
|
||||
local_package_macros = self.packages[local_package]
|
||||
# First: search the local packages for this macro
|
||||
if macro_name in local_package_macros:
|
||||
@@ -140,9 +134,7 @@ class MacroResolver:
|
||||
# is that you can limit the number of macros provided to the
|
||||
# context dictionary in the 'to_dict' manifest method.
|
||||
class TestMacroNamespace:
|
||||
def __init__(
|
||||
self, macro_resolver, ctx, node, thread_ctx, depends_on_macros
|
||||
):
|
||||
def __init__(self, macro_resolver, ctx, node, thread_ctx, depends_on_macros):
|
||||
self.macro_resolver = macro_resolver
|
||||
self.ctx = ctx
|
||||
self.node = node # can be none
|
||||
@@ -155,11 +147,14 @@ class TestMacroNamespace:
|
||||
for macro_unique_id in dep_macros:
|
||||
if macro_unique_id in self.macro_resolver.macros:
|
||||
# Split up the macro unique_id to get the project_name
|
||||
(_, project_name, macro_name) = macro_unique_id.split('.')
|
||||
(_, project_name, macro_name) = macro_unique_id.split(".")
|
||||
# Save the plain macro_name in the local_namespace
|
||||
macro = self.macro_resolver.macros[macro_unique_id]
|
||||
macro_gen = MacroGenerator(
|
||||
macro, self.ctx, self.node, self.thread_ctx,
|
||||
macro,
|
||||
self.ctx,
|
||||
self.node,
|
||||
self.thread_ctx,
|
||||
)
|
||||
self.local_namespace[macro_name] = macro_gen
|
||||
# We also need the two part macro name
|
||||
@@ -177,9 +172,7 @@ class TestMacroNamespace:
|
||||
if macro.depends_on.macros:
|
||||
self.recursively_get_depends_on_macros(macro.depends_on.macros, dep_macros)
|
||||
|
||||
def get_from_package(
|
||||
self, package_name: Optional[str], name: str
|
||||
) -> Optional[MacroGenerator]:
|
||||
def get_from_package(self, package_name: Optional[str], name: str) -> Optional[MacroGenerator]:
|
||||
macro = None
|
||||
if package_name is None:
|
||||
macro = self.macro_resolver.macros_by_name.get(name)
|
||||
@@ -188,12 +181,8 @@ class TestMacroNamespace:
|
||||
elif package_name in self.macro_resolver.packages:
|
||||
macro = self.macro_resolver.packages[package_name].get(name)
|
||||
else:
|
||||
raise_compiler_error(
|
||||
f"Could not find package '{package_name}'"
|
||||
)
|
||||
raise_compiler_error(f"Could not find package '{package_name}'")
|
||||
if not macro:
|
||||
return None
|
||||
macro_func = MacroGenerator(
|
||||
macro, self.ctx, self.node, self.thread_ctx
|
||||
)
|
||||
macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx)
|
||||
return macro_func
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
from typing import (
|
||||
Any, Dict, Iterable, Union, Optional, List, Iterator, Mapping, Set
|
||||
)
|
||||
from typing import Any, Dict, Iterable, Union, Optional, List, Iterator, Mapping, Set
|
||||
|
||||
from dbt.clients.jinja import MacroGenerator, MacroStack
|
||||
from dbt.contracts.graph.parsed import ParsedMacro
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
from dbt.exceptions import (
|
||||
raise_duplicate_macro_name, raise_compiler_error
|
||||
)
|
||||
from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error
|
||||
|
||||
|
||||
FlatNamespace = Dict[str, MacroGenerator]
|
||||
@@ -27,7 +23,7 @@ class MacroNamespace(Mapping):
|
||||
def __init__(
|
||||
self,
|
||||
global_namespace: FlatNamespace, # root package macros
|
||||
local_namespace: FlatNamespace, # packages for *this* node
|
||||
local_namespace: FlatNamespace, # packages for *this* node
|
||||
global_project_namespace: FlatNamespace, # internal packages
|
||||
packages: Dict[str, FlatNamespace], # non-internal packages
|
||||
):
|
||||
@@ -37,11 +33,13 @@ class MacroNamespace(Mapping):
|
||||
self.global_project_namespace: FlatNamespace = global_project_namespace
|
||||
|
||||
def _search_order(self) -> Iterable[Union[FullNamespace, FlatNamespace]]:
|
||||
yield self.local_namespace # local package
|
||||
yield self.local_namespace # local package
|
||||
yield self.global_namespace # root package
|
||||
yield self.packages # non-internal packages
|
||||
# TODO CT-211
|
||||
yield self.packages # type: ignore[misc] # non-internal packages
|
||||
yield {
|
||||
GLOBAL_PROJECT_NAME: self.global_project_namespace, # dbt
|
||||
# TODO CT-211
|
||||
GLOBAL_PROJECT_NAME: self.global_project_namespace, # type: ignore[misc] # dbt
|
||||
}
|
||||
yield self.global_project_namespace # other internal project besides dbt
|
||||
|
||||
@@ -68,9 +66,7 @@ class MacroNamespace(Mapping):
|
||||
return dct[key]
|
||||
raise KeyError(key)
|
||||
|
||||
def get_from_package(
|
||||
self, package_name: Optional[str], name: str
|
||||
) -> Optional[MacroGenerator]:
|
||||
def get_from_package(self, package_name: Optional[str], name: str) -> Optional[MacroGenerator]:
|
||||
pkg: FlatNamespace
|
||||
if package_name is None:
|
||||
return self.get(name)
|
||||
@@ -79,9 +75,7 @@ class MacroNamespace(Mapping):
|
||||
elif package_name in self.packages:
|
||||
return self.packages[package_name].get(name)
|
||||
else:
|
||||
raise_compiler_error(
|
||||
f"Could not find package '{package_name}'"
|
||||
)
|
||||
raise_compiler_error(f"Could not find package '{package_name}'")
|
||||
|
||||
|
||||
# This class builds the MacroNamespace by adding macros to
|
||||
@@ -128,9 +122,7 @@ class MacroNamespaceBuilder:
|
||||
hierarchy[macro.package_name] = namespace
|
||||
|
||||
if macro.name in namespace:
|
||||
raise_duplicate_macro_name(
|
||||
macro_func.macro, macro, macro.package_name
|
||||
)
|
||||
raise_duplicate_macro_name(macro_func.macro, macro, macro.package_name)
|
||||
hierarchy[macro.package_name][macro.name] = macro_func
|
||||
|
||||
def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]):
|
||||
@@ -139,9 +131,7 @@ class MacroNamespaceBuilder:
|
||||
# MacroGenerator is in clients/jinja.py
|
||||
# a MacroGenerator object is a callable object that will
|
||||
# execute the MacroGenerator.__call__ function
|
||||
macro_func: MacroGenerator = MacroGenerator(
|
||||
macro, ctx, self.node, self.thread_ctx
|
||||
)
|
||||
macro_func: MacroGenerator = MacroGenerator(macro, ctx, self.node, self.thread_ctx)
|
||||
|
||||
# internal macros (from plugins) will be processed separately from
|
||||
# project macros, so store them in a different place
|
||||
|
||||
@@ -17,6 +17,7 @@ class ManifestContext(ConfiguredContext):
|
||||
The given macros can override any previous context values, which will be
|
||||
available as if they were accessed relative to the package name.
|
||||
"""
|
||||
|
||||
# subclasses are QueryHeaderContext and ProviderContext
|
||||
def __init__(
|
||||
self,
|
||||
@@ -38,16 +39,13 @@ class ManifestContext(ConfiguredContext):
|
||||
# this takes all the macros in the manifest and adds them
|
||||
# to the MacroNamespaceBuilder stored in self.namespace
|
||||
builder = self._get_namespace_builder()
|
||||
return builder.build_namespace(
|
||||
self.manifest.macros.values(), self._ctx
|
||||
)
|
||||
return builder.build_namespace(self.manifest.macros.values(), self._ctx)
|
||||
|
||||
def _get_namespace_builder(self) -> MacroNamespaceBuilder:
|
||||
# avoid an import loop
|
||||
from dbt.adapters.factory import get_adapter_package_names
|
||||
internal_packages: List[str] = get_adapter_package_names(
|
||||
self.config.credentials.type
|
||||
)
|
||||
|
||||
internal_packages: List[str] = get_adapter_package_names(self.config.credentials.type)
|
||||
return MacroNamespaceBuilder(
|
||||
self.config.project_name,
|
||||
self.search_package,
|
||||
@@ -70,14 +68,10 @@ class ManifestContext(ConfiguredContext):
|
||||
|
||||
|
||||
class QueryHeaderContext(ManifestContext):
|
||||
def __init__(
|
||||
self, config: AdapterRequiredConfig, manifest: Manifest
|
||||
) -> None:
|
||||
def __init__(self, config: AdapterRequiredConfig, manifest: Manifest) -> None:
|
||||
super().__init__(config, manifest, config.project_name)
|
||||
|
||||
|
||||
def generate_query_header_context(
|
||||
config: AdapterRequiredConfig, manifest: Manifest
|
||||
):
|
||||
def generate_query_header_context(config: AdapterRequiredConfig, manifest: Manifest):
|
||||
ctx = QueryHeaderContext(config, manifest)
|
||||
return ctx.to_dict()
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
import abc
|
||||
import os
|
||||
from typing import (
|
||||
Callable, Any, Dict, Optional, Union, List, TypeVar, Type, Iterable,
|
||||
Callable,
|
||||
Any,
|
||||
Dict,
|
||||
Optional,
|
||||
Union,
|
||||
List,
|
||||
TypeVar,
|
||||
Type,
|
||||
Iterable,
|
||||
Mapping,
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
|
||||
from dbt.adapters.base.column import Column
|
||||
from dbt.adapters.factory import (
|
||||
get_adapter, get_adapter_package_names, get_adapter_type_names
|
||||
)
|
||||
from dbt.adapters.factory import get_adapter, get_adapter_package_names, get_adapter_type_names
|
||||
from dbt.clients import agate_helper
|
||||
from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack
|
||||
from dbt.config import RuntimeConfig, Project
|
||||
@@ -21,13 +27,13 @@ from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace
|
||||
from .macros import MacroNamespaceBuilder, MacroNamespace
|
||||
from .manifest import ManifestContext
|
||||
from dbt.contracts.connection import AdapterResponse
|
||||
from dbt.contracts.graph.manifest import (
|
||||
Manifest, Disabled
|
||||
)
|
||||
from dbt.contracts.graph.manifest import Manifest, Disabled
|
||||
from dbt.contracts.graph.compiled import (
|
||||
CompiledResource,
|
||||
CompiledSeedNode,
|
||||
ManifestNode,
|
||||
CompiledSqlNode,
|
||||
CompiledRPCNode,
|
||||
)
|
||||
from dbt.contracts.graph.parsed import (
|
||||
ParsedMacro,
|
||||
@@ -56,9 +62,7 @@ from dbt.exceptions import (
|
||||
from dbt.config import IsFQNResource
|
||||
from dbt.node_types import NodeType
|
||||
|
||||
from dbt.utils import (
|
||||
merge, AttrDict, MultiDict
|
||||
)
|
||||
from dbt.utils import merge, AttrDict, MultiDict
|
||||
|
||||
import agate
|
||||
|
||||
@@ -81,10 +85,7 @@ class RelationProxy:
|
||||
return self._relation_type.create_from_source(*args, **kwargs)
|
||||
|
||||
def create(self, *args, **kwargs):
|
||||
kwargs['quote_policy'] = merge(
|
||||
self._quoting_config,
|
||||
kwargs.pop('quote_policy', {})
|
||||
)
|
||||
kwargs["quote_policy"] = merge(self._quoting_config, kwargs.pop("quote_policy", {}))
|
||||
return self._relation_type.create(*args, **kwargs)
|
||||
|
||||
|
||||
@@ -100,7 +101,7 @@ class BaseDatabaseWrapper:
|
||||
self._namespace = namespace
|
||||
|
||||
def __getattr__(self, name):
|
||||
raise NotImplementedError('subclasses need to implement this')
|
||||
raise NotImplementedError("subclasses need to implement this")
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
@@ -117,7 +118,7 @@ class BaseDatabaseWrapper:
|
||||
# 1. current adapter
|
||||
# 2. any parent adapters (dependencies)
|
||||
# 3. 'default'
|
||||
search_prefixes = get_adapter_type_names(self._adapter.type()) + ['default']
|
||||
search_prefixes = get_adapter_type_names(self._adapter.type()) + ["default"]
|
||||
return search_prefixes
|
||||
|
||||
def dispatch(
|
||||
@@ -128,8 +129,8 @@ class BaseDatabaseWrapper:
|
||||
) -> MacroGenerator:
|
||||
search_packages: List[Optional[str]]
|
||||
|
||||
if '.' in macro_name:
|
||||
suggest_macro_namespace, suggest_macro_name = macro_name.split('.', 1)
|
||||
if "." in macro_name:
|
||||
suggest_macro_namespace, suggest_macro_name = macro_name.split(".", 1)
|
||||
msg = (
|
||||
f'In adapter.dispatch, got a macro name of "{macro_name}", '
|
||||
f'but "." is not a valid macro name component. Did you mean '
|
||||
@@ -152,7 +153,7 @@ class BaseDatabaseWrapper:
|
||||
else:
|
||||
# Not a string and not None so must be a list
|
||||
raise CompilationException(
|
||||
f'In adapter.dispatch, got a list macro_namespace argument '
|
||||
f"In adapter.dispatch, got a list macro_namespace argument "
|
||||
f'("{macro_namespace}"), but macro_namespace should be None or a string.'
|
||||
)
|
||||
|
||||
@@ -160,12 +161,10 @@ class BaseDatabaseWrapper:
|
||||
|
||||
for package_name in search_packages:
|
||||
for prefix in self._get_adapter_macro_prefixes():
|
||||
search_name = f'{prefix}__{macro_name}'
|
||||
search_name = f"{prefix}__{macro_name}"
|
||||
try:
|
||||
# this uses the namespace from the context
|
||||
macro = self._namespace.get_from_package(
|
||||
package_name, search_name
|
||||
)
|
||||
macro = self._namespace.get_from_package(package_name, search_name)
|
||||
except CompilationException:
|
||||
# Only raise CompilationException if macro is not found in
|
||||
# any package
|
||||
@@ -174,16 +173,13 @@ class BaseDatabaseWrapper:
|
||||
if package_name is None:
|
||||
attempts.append(search_name)
|
||||
else:
|
||||
attempts.append(f'{package_name}.{search_name}')
|
||||
attempts.append(f"{package_name}.{search_name}")
|
||||
|
||||
if macro is not None:
|
||||
return macro
|
||||
|
||||
searched = ', '.join(repr(a) for a in attempts)
|
||||
msg = (
|
||||
f"In dispatch: No macro named '{macro_name}' found\n"
|
||||
f" Searched for: {searched}"
|
||||
)
|
||||
searched = ", ".join(repr(a) for a in attempts)
|
||||
msg = f"In dispatch: No macro named '{macro_name}' found\n" f" Searched for: {searched}"
|
||||
raise CompilationException(msg)
|
||||
|
||||
|
||||
@@ -209,14 +205,10 @@ class BaseResolver(metaclass=abc.ABCMeta):
|
||||
|
||||
class BaseRefResolver(BaseResolver):
|
||||
@abc.abstractmethod
|
||||
def resolve(
|
||||
self, name: str, package: Optional[str] = None
|
||||
) -> RelationProxy:
|
||||
def resolve(self, name: str, package: Optional[str] = None) -> RelationProxy:
|
||||
...
|
||||
|
||||
def _repack_args(
|
||||
self, name: str, package: Optional[str]
|
||||
) -> List[str]:
|
||||
def _repack_args(self, name: str, package: Optional[str]) -> List[str]:
|
||||
if package is None:
|
||||
return [name]
|
||||
else:
|
||||
@@ -225,14 +217,12 @@ class BaseRefResolver(BaseResolver):
|
||||
def validate_args(self, name: str, package: Optional[str]):
|
||||
if not isinstance(name, str):
|
||||
raise CompilationException(
|
||||
f'The name argument to ref() must be a string, got '
|
||||
f'{type(name)}'
|
||||
f"The name argument to ref() must be a string, got " f"{type(name)}"
|
||||
)
|
||||
|
||||
if package is not None and not isinstance(package, str):
|
||||
raise CompilationException(
|
||||
f'The package argument to ref() must be a string or None, got '
|
||||
f'{type(package)}'
|
||||
f"The package argument to ref() must be a string or None, got " f"{type(package)}"
|
||||
)
|
||||
|
||||
def __call__(self, *args: str) -> RelationProxy:
|
||||
@@ -257,20 +247,19 @@ class BaseSourceResolver(BaseResolver):
|
||||
def validate_args(self, source_name: str, table_name: str):
|
||||
if not isinstance(source_name, str):
|
||||
raise CompilationException(
|
||||
f'The source name (first) argument to source() must be a '
|
||||
f'string, got {type(source_name)}'
|
||||
f"The source name (first) argument to source() must be a "
|
||||
f"string, got {type(source_name)}"
|
||||
)
|
||||
if not isinstance(table_name, str):
|
||||
raise CompilationException(
|
||||
f'The table name (second) argument to source() must be a '
|
||||
f'string, got {type(table_name)}'
|
||||
f"The table name (second) argument to source() must be a "
|
||||
f"string, got {type(table_name)}"
|
||||
)
|
||||
|
||||
def __call__(self, *args: str) -> RelationProxy:
|
||||
if len(args) != 2:
|
||||
raise_compiler_error(
|
||||
f"source() takes exactly two arguments ({len(args)} given)",
|
||||
self.model
|
||||
f"source() takes exactly two arguments ({len(args)} given)", self.model
|
||||
)
|
||||
self.validate_args(args[0], args[1])
|
||||
return self.resolve(args[0], args[1])
|
||||
@@ -288,14 +277,15 @@ class ParseConfigObject(Config):
|
||||
self.context_config = context_config
|
||||
|
||||
def _transform_config(self, config):
|
||||
for oldkey in ('pre_hook', 'post_hook'):
|
||||
for oldkey in ("pre_hook", "post_hook"):
|
||||
if oldkey in config:
|
||||
newkey = oldkey.replace('_', '-')
|
||||
newkey = oldkey.replace("_", "-")
|
||||
if newkey in config:
|
||||
raise_compiler_error(
|
||||
'Invalid config, has conflicting keys "{}" and "{}"'
|
||||
.format(oldkey, newkey),
|
||||
self.model
|
||||
'Invalid config, has conflicting keys "{}" and "{}"'.format(
|
||||
oldkey, newkey
|
||||
),
|
||||
self.model,
|
||||
)
|
||||
config[newkey] = config.pop(oldkey)
|
||||
return config
|
||||
@@ -306,29 +296,25 @@ class ParseConfigObject(Config):
|
||||
elif len(args) == 0 and len(kwargs) > 0:
|
||||
opts = kwargs
|
||||
else:
|
||||
raise_compiler_error(
|
||||
"Invalid inline model config",
|
||||
self.model)
|
||||
raise_compiler_error("Invalid inline model config", self.model)
|
||||
|
||||
opts = self._transform_config(opts)
|
||||
|
||||
# it's ok to have a parse context with no context config, but you must
|
||||
# not call it!
|
||||
if self.context_config is None:
|
||||
raise RuntimeException(
|
||||
'At parse time, did not receive a context config'
|
||||
)
|
||||
raise RuntimeException("At parse time, did not receive a context config")
|
||||
self.context_config.add_config_call(opts)
|
||||
return ''
|
||||
return ""
|
||||
|
||||
def set(self, name, value):
|
||||
return self.__call__({name: value})
|
||||
|
||||
def require(self, name, validator=None):
|
||||
return ''
|
||||
return ""
|
||||
|
||||
def get(self, name, default=None, validator=None):
|
||||
return ''
|
||||
return ""
|
||||
|
||||
def persist_relation_docs(self) -> bool:
|
||||
return False
|
||||
@@ -338,14 +324,12 @@ class ParseConfigObject(Config):
|
||||
|
||||
|
||||
class RuntimeConfigObject(Config):
|
||||
def __init__(
|
||||
self, model, context_config: Optional[ContextConfig] = None
|
||||
):
|
||||
def __init__(self, model, context_config: Optional[ContextConfig] = None):
|
||||
self.model = model
|
||||
# we never use or get a config, only the parser cares
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return ''
|
||||
return ""
|
||||
|
||||
def set(self, name, value):
|
||||
return self.__call__({name: value})
|
||||
@@ -355,7 +339,7 @@ class RuntimeConfigObject(Config):
|
||||
|
||||
def _lookup(self, name, default=_MISSING):
|
||||
# if this is a macro, there might be no `model.config`.
|
||||
if not hasattr(self.model, 'config'):
|
||||
if not hasattr(self.model, "config"):
|
||||
result = default
|
||||
else:
|
||||
result = self.model.config.get(name, default)
|
||||
@@ -380,22 +364,24 @@ class RuntimeConfigObject(Config):
|
||||
return to_return
|
||||
|
||||
def persist_relation_docs(self) -> bool:
|
||||
persist_docs = self.get('persist_docs', default={})
|
||||
persist_docs = self.get("persist_docs", default={})
|
||||
if not isinstance(persist_docs, dict):
|
||||
raise_compiler_error(
|
||||
f"Invalid value provided for 'persist_docs'. Expected dict "
|
||||
f"but received {type(persist_docs)}")
|
||||
f"but received {type(persist_docs)}"
|
||||
)
|
||||
|
||||
return persist_docs.get('relation', False)
|
||||
return persist_docs.get("relation", False)
|
||||
|
||||
def persist_column_docs(self) -> bool:
|
||||
persist_docs = self.get('persist_docs', default={})
|
||||
persist_docs = self.get("persist_docs", default={})
|
||||
if not isinstance(persist_docs, dict):
|
||||
raise_compiler_error(
|
||||
f"Invalid value provided for 'persist_docs'. Expected dict "
|
||||
f"but received {type(persist_docs)}")
|
||||
f"but received {type(persist_docs)}"
|
||||
)
|
||||
|
||||
return persist_docs.get('columns', False)
|
||||
return persist_docs.get("columns", False)
|
||||
|
||||
|
||||
# `adapter` implementations
|
||||
@@ -405,8 +391,7 @@ class ParseDatabaseWrapper(BaseDatabaseWrapper):
|
||||
"""
|
||||
|
||||
def __getattr__(self, name):
|
||||
override = (name in self._adapter._available_ and
|
||||
name in self._adapter._parse_replacements_)
|
||||
override = name in self._adapter._available_ and name in self._adapter._parse_replacements_
|
||||
|
||||
if override:
|
||||
return self._adapter._parse_replacements_[name]
|
||||
@@ -414,9 +399,7 @@ class ParseDatabaseWrapper(BaseDatabaseWrapper):
|
||||
return getattr(self._adapter, name)
|
||||
else:
|
||||
raise AttributeError(
|
||||
"'{}' object has no attribute '{}'".format(
|
||||
self.__class__.__name__, name
|
||||
)
|
||||
"'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
|
||||
)
|
||||
|
||||
|
||||
@@ -430,17 +413,13 @@ class RuntimeDatabaseWrapper(BaseDatabaseWrapper):
|
||||
return getattr(self._adapter, name)
|
||||
else:
|
||||
raise AttributeError(
|
||||
"'{}' object has no attribute '{}'".format(
|
||||
self.__class__.__name__, name
|
||||
)
|
||||
"'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
|
||||
)
|
||||
|
||||
|
||||
# `ref` implementations
|
||||
class ParseRefResolver(BaseRefResolver):
|
||||
def resolve(
|
||||
self, name: str, package: Optional[str] = None
|
||||
) -> RelationProxy:
|
||||
def resolve(self, name: str, package: Optional[str] = None) -> RelationProxy:
|
||||
self.model.refs.append(self._repack_args(name, package))
|
||||
|
||||
return self.Relation.create_from(self.config, self.model)
|
||||
@@ -450,9 +429,7 @@ ResolveRef = Union[Disabled, ManifestNode]
|
||||
|
||||
|
||||
class RuntimeRefResolver(BaseRefResolver):
|
||||
def resolve(
|
||||
self, target_name: str, target_package: Optional[str] = None
|
||||
) -> RelationProxy:
|
||||
def resolve(self, target_name: str, target_package: Optional[str] = None) -> RelationProxy:
|
||||
target_model = self.manifest.resolve_ref(
|
||||
target_name,
|
||||
target_package,
|
||||
@@ -470,22 +447,15 @@ class RuntimeRefResolver(BaseRefResolver):
|
||||
self.validate(target_model, target_name, target_package)
|
||||
return self.create_relation(target_model, target_name)
|
||||
|
||||
def create_relation(
|
||||
self, target_model: ManifestNode, name: str
|
||||
) -> RelationProxy:
|
||||
def create_relation(self, target_model: ManifestNode, name: str) -> RelationProxy:
|
||||
if target_model.is_ephemeral_model:
|
||||
self.model.set_cte(target_model.unique_id, None)
|
||||
return self.Relation.create_ephemeral_from_node(
|
||||
self.config, target_model
|
||||
)
|
||||
return self.Relation.create_ephemeral_from_node(self.config, target_model)
|
||||
else:
|
||||
return self.Relation.create_from(self.config, target_model)
|
||||
|
||||
def validate(
|
||||
self,
|
||||
resolved: ManifestNode,
|
||||
target_name: str,
|
||||
target_package: Optional[str]
|
||||
self, resolved: ManifestNode, target_name: str, target_package: Optional[str]
|
||||
) -> None:
|
||||
if resolved.unique_id not in self.model.depends_on.nodes:
|
||||
args = self._repack_args(target_name, target_package)
|
||||
@@ -501,16 +471,15 @@ class OperationRefResolver(RuntimeRefResolver):
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def create_relation(
|
||||
self, target_model: ManifestNode, name: str
|
||||
) -> RelationProxy:
|
||||
if target_model.is_ephemeral_model:
|
||||
def create_relation(self, target_model: ManifestNode, name: str) -> RelationProxy:
|
||||
if not hasattr(self.model, "set_cte") and target_model.is_ephemeral_model:
|
||||
# In operations, we can't ref() ephemeral nodes, because
|
||||
# ParsedMacros do not support set_cte
|
||||
raise_compiler_error(
|
||||
'Operations can not ref() ephemeral nodes, but {} is ephemeral'
|
||||
.format(target_model.name),
|
||||
self.model
|
||||
"Macros run as operations cannot ref() ephemeral nodes, but {} is ephemeral".format(
|
||||
target_model.name
|
||||
),
|
||||
self.model,
|
||||
)
|
||||
else:
|
||||
return super().create_relation(target_model, name)
|
||||
@@ -561,10 +530,7 @@ class ModelConfiguredVar(Var):
|
||||
if package_name != self._config.project_name:
|
||||
if package_name not in dependencies:
|
||||
# I don't think this is actually reachable
|
||||
raise_compiler_error(
|
||||
f'Node package named {package_name} not found!',
|
||||
self._node
|
||||
)
|
||||
raise_compiler_error(f"Node package named {package_name} not found!", self._node)
|
||||
yield dependencies[package_name]
|
||||
yield self._config
|
||||
|
||||
@@ -635,7 +601,7 @@ class OperationProvider(RuntimeProvider):
|
||||
ref = OperationRefResolver
|
||||
|
||||
|
||||
T = TypeVar('T')
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
# Base context collection, used for parsing configs.
|
||||
@@ -650,9 +616,7 @@ class ProviderContext(ManifestContext):
|
||||
context_config: Optional[ContextConfig],
|
||||
) -> None:
|
||||
if provider is None:
|
||||
raise InternalException(
|
||||
f"Invalid provider given to context: {provider}"
|
||||
)
|
||||
raise InternalException(f"Invalid provider given to context: {provider}")
|
||||
# mypy appeasement - we know it'll be a RuntimeConfig
|
||||
self.config: RuntimeConfig
|
||||
self.model: Union[ParsedMacro, ManifestNode] = model
|
||||
@@ -662,16 +626,12 @@ class ProviderContext(ManifestContext):
|
||||
self.provider: Provider = provider
|
||||
self.adapter = get_adapter(self.config)
|
||||
# The macro namespace is used in creating the DatabaseWrapper
|
||||
self.db_wrapper = self.provider.DatabaseWrapper(
|
||||
self.adapter, self.namespace
|
||||
)
|
||||
self.db_wrapper = self.provider.DatabaseWrapper(self.adapter, self.namespace)
|
||||
|
||||
# This overrides the method in ManifestContext, and provides
|
||||
# a model, which the ManifestContext builder does not
|
||||
def _get_namespace_builder(self):
|
||||
internal_packages = get_adapter_package_names(
|
||||
self.config.credentials.type
|
||||
)
|
||||
internal_packages = get_adapter_package_names(self.config.credentials.type)
|
||||
return MacroNamespaceBuilder(
|
||||
self.config.project_name,
|
||||
self.search_package,
|
||||
@@ -690,19 +650,19 @@ class ProviderContext(ManifestContext):
|
||||
|
||||
@contextmember
|
||||
def store_result(
|
||||
self, name: str,
|
||||
response: Any,
|
||||
agate_table: Optional[agate.Table] = None
|
||||
self, name: str, response: Any, agate_table: Optional[agate.Table] = None
|
||||
) -> str:
|
||||
if agate_table is None:
|
||||
agate_table = agate_helper.empty_table()
|
||||
|
||||
self.sql_results[name] = AttrDict({
|
||||
'response': response,
|
||||
'data': agate_helper.as_matrix(agate_table),
|
||||
'table': agate_table
|
||||
})
|
||||
return ''
|
||||
self.sql_results[name] = AttrDict(
|
||||
{
|
||||
"response": response,
|
||||
"data": agate_helper.as_matrix(agate_table),
|
||||
"table": agate_table,
|
||||
}
|
||||
)
|
||||
return ""
|
||||
|
||||
@contextmember
|
||||
def store_raw_result(
|
||||
@@ -711,10 +671,9 @@ class ProviderContext(ManifestContext):
|
||||
message=Optional[str],
|
||||
code=Optional[str],
|
||||
rows_affected=Optional[str],
|
||||
agate_table: Optional[agate.Table] = None
|
||||
agate_table: Optional[agate.Table] = None,
|
||||
) -> str:
|
||||
response = AdapterResponse(
|
||||
_message=message, code=code, rows_affected=rows_affected)
|
||||
response = AdapterResponse(_message=message, code=code, rows_affected=rows_affected)
|
||||
return self.store_result(name, response, agate_table)
|
||||
|
||||
@contextproperty
|
||||
@@ -727,25 +686,24 @@ class ProviderContext(ManifestContext):
|
||||
elif value == arg:
|
||||
return
|
||||
raise ValidationException(
|
||||
'Expected value "{}" to be one of {}'
|
||||
.format(value, ','.join(map(str, args))))
|
||||
'Expected value "{}" to be one of {}'.format(value, ",".join(map(str, args)))
|
||||
)
|
||||
|
||||
return inner
|
||||
|
||||
return AttrDict({
|
||||
'any': validate_any,
|
||||
})
|
||||
return AttrDict(
|
||||
{
|
||||
"any": validate_any,
|
||||
}
|
||||
)
|
||||
|
||||
@contextmember
|
||||
def write(self, payload: str) -> str:
|
||||
# macros/source defs aren't 'writeable'.
|
||||
if isinstance(self.model, (ParsedMacro, ParsedSourceDefinition)):
|
||||
raise_compiler_error(
|
||||
'cannot "write" macros or sources'
|
||||
)
|
||||
self.model.build_path = self.model.write_node(
|
||||
self.config.target_path, 'run', payload
|
||||
)
|
||||
return ''
|
||||
raise_compiler_error('cannot "write" macros or sources')
|
||||
self.model.build_path = self.model.write_node(self.config.target_path, "run", payload)
|
||||
return ""
|
||||
|
||||
@contextmember
|
||||
def render(self, string: str) -> str:
|
||||
@@ -758,20 +716,15 @@ class ProviderContext(ManifestContext):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception:
|
||||
raise_compiler_error(
|
||||
message_if_exception, self.model
|
||||
)
|
||||
raise_compiler_error(message_if_exception, self.model)
|
||||
|
||||
@contextmember
|
||||
def load_agate_table(self) -> agate.Table:
|
||||
if not isinstance(self.model, (ParsedSeedNode, CompiledSeedNode)):
|
||||
raise_compiler_error(
|
||||
'can only load_agate_table for seeds (got a {})'
|
||||
.format(self.model.resource_type)
|
||||
"can only load_agate_table for seeds (got a {})".format(self.model.resource_type)
|
||||
)
|
||||
path = os.path.join(
|
||||
self.model.root_path, self.model.original_file_path
|
||||
)
|
||||
path = os.path.join(self.model.root_path, self.model.original_file_path)
|
||||
column_types = self.model.config.column_types
|
||||
try:
|
||||
table = agate_helper.from_csv(path, text_columns=column_types)
|
||||
@@ -819,17 +772,13 @@ class ProviderContext(ManifestContext):
|
||||
|
||||
select * from {{ ref('package_name', 'model_name') }}"
|
||||
"""
|
||||
return self.provider.ref(
|
||||
self.db_wrapper, self.model, self.config, self.manifest
|
||||
)
|
||||
return self.provider.ref(self.db_wrapper, self.model, self.config, self.manifest)
|
||||
|
||||
@contextproperty
|
||||
def source(self) -> Callable:
|
||||
return self.provider.source(
|
||||
self.db_wrapper, self.model, self.config, self.manifest
|
||||
)
|
||||
return self.provider.source(self.db_wrapper, self.model, self.config, self.manifest)
|
||||
|
||||
@contextproperty('config')
|
||||
@contextproperty("config")
|
||||
def ctx_config(self) -> Config:
|
||||
"""The `config` variable exists to handle end-user configuration for
|
||||
custom materializations. Configs like `unique_key` can be implemented
|
||||
@@ -1001,7 +950,7 @@ class ProviderContext(ManifestContext):
|
||||
node=self.model,
|
||||
)
|
||||
|
||||
@contextproperty('adapter')
|
||||
@contextproperty("adapter")
|
||||
def ctx_adapter(self) -> BaseDatabaseWrapper:
|
||||
"""`adapter` is a wrapper around the internal database adapter used by
|
||||
dbt. It allows users to make calls to the database in their dbt models.
|
||||
@@ -1013,8 +962,8 @@ class ProviderContext(ManifestContext):
|
||||
@contextproperty
|
||||
def api(self) -> Dict[str, Any]:
|
||||
return {
|
||||
'Relation': self.db_wrapper.Relation,
|
||||
'Column': self.adapter.Column,
|
||||
"Relation": self.db_wrapper.Relation,
|
||||
"Column": self.adapter.Column,
|
||||
}
|
||||
|
||||
@contextproperty
|
||||
@@ -1132,7 +1081,7 @@ class ProviderContext(ManifestContext):
|
||||
""" # noqa
|
||||
return self.manifest.flat_graph
|
||||
|
||||
@contextproperty('model')
|
||||
@contextproperty("model")
|
||||
def ctx_model(self) -> Dict[str, Any]:
|
||||
return self.model.to_dict(omit_none=True)
|
||||
|
||||
@@ -1154,15 +1103,13 @@ class ProviderContext(ManifestContext):
|
||||
|
||||
@contextmember
|
||||
def adapter_macro(self, name: str, *args, **kwargs):
|
||||
"""This was deprecated in v0.18 in favor of adapter.dispatch
|
||||
"""
|
||||
"""This was deprecated in v0.18 in favor of adapter.dispatch"""
|
||||
msg = (
|
||||
'The "adapter_macro" macro has been deprecated. Instead, use '
|
||||
'the `adapter.dispatch` method to find a macro and call the '
|
||||
'result. For more information, see: '
|
||||
'https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch)'
|
||||
' adapter_macro was called for: {macro_name}'
|
||||
.format(macro_name=name)
|
||||
"the `adapter.dispatch` method to find a macro and call the "
|
||||
"result. For more information, see: "
|
||||
"https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch)"
|
||||
" adapter_macro was called for: {macro_name}".format(macro_name=name)
|
||||
)
|
||||
raise CompilationException(msg)
|
||||
|
||||
@@ -1184,12 +1131,15 @@ class ProviderContext(ManifestContext):
|
||||
if return_value is not None:
|
||||
# Save the env_var value in the manifest and the var name in the source_file.
|
||||
# If this is compiling, do not save because it's irrelevant to parsing.
|
||||
if self.model and not hasattr(self.model, 'compiled'):
|
||||
if self.model and not hasattr(self.model, "compiled"):
|
||||
self.manifest.env_vars[var] = return_value
|
||||
source_file = self.manifest.files[self.model.file_id]
|
||||
# Schema files should never get here
|
||||
if source_file.parse_file_type != 'schema':
|
||||
source_file.env_vars.append(var)
|
||||
# hooks come from dbt_project.yml which doesn't have a real file_id
|
||||
if self.model.file_id in self.manifest.files:
|
||||
source_file = self.manifest.files[self.model.file_id]
|
||||
# Schema files should never get here
|
||||
if source_file.parse_file_type != "schema":
|
||||
# TODO CT-211
|
||||
source_file.env_vars.append(var) # type: ignore[union-attr]
|
||||
return return_value
|
||||
else:
|
||||
msg = f"Env var required but not provided: '{var}'"
|
||||
@@ -1199,10 +1149,10 @@ class ProviderContext(ManifestContext):
|
||||
class MacroContext(ProviderContext):
|
||||
"""Internally, macros can be executed like nodes, with some restrictions:
|
||||
|
||||
- they don't have have all values available that nodes do:
|
||||
- 'this', 'pre_hooks', 'post_hooks', and 'sql' are missing
|
||||
- 'schema' does not use any 'model' information
|
||||
- they can't be configured with config() directives
|
||||
- they don't have all values available that nodes do:
|
||||
- 'this', 'pre_hooks', 'post_hooks', and 'sql' are missing
|
||||
- 'schema' does not use any 'model' information
|
||||
- they can't be configured with config() directives
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -1229,35 +1179,34 @@ class ModelContext(ProviderContext):
|
||||
def pre_hooks(self) -> List[Dict[str, Any]]:
|
||||
if self.model.resource_type in [NodeType.Source, NodeType.Test]:
|
||||
return []
|
||||
# TODO CT-211
|
||||
return [
|
||||
h.to_dict(omit_none=True) for h in self.model.config.pre_hook
|
||||
h.to_dict(omit_none=True) for h in self.model.config.pre_hook # type: ignore[union-attr] # noqa
|
||||
]
|
||||
|
||||
@contextproperty
|
||||
def post_hooks(self) -> List[Dict[str, Any]]:
|
||||
if self.model.resource_type in [NodeType.Source, NodeType.Test]:
|
||||
return []
|
||||
# TODO CT-211
|
||||
return [
|
||||
h.to_dict(omit_none=True) for h in self.model.config.post_hook
|
||||
h.to_dict(omit_none=True) for h in self.model.config.post_hook # type: ignore[union-attr] # noqa
|
||||
]
|
||||
|
||||
@contextproperty
|
||||
def sql(self) -> Optional[str]:
|
||||
if getattr(self.model, 'extra_ctes_injected', None):
|
||||
return self.model.compiled_sql
|
||||
if getattr(self.model, "extra_ctes_injected", None):
|
||||
# TODO CT-211
|
||||
return self.model.compiled_sql # type: ignore[union-attr]
|
||||
return None
|
||||
|
||||
@contextproperty
|
||||
def database(self) -> str:
|
||||
return getattr(
|
||||
self.model, 'database', self.config.credentials.database
|
||||
)
|
||||
return getattr(self.model, "database", self.config.credentials.database)
|
||||
|
||||
@contextproperty
|
||||
def schema(self) -> str:
|
||||
return getattr(
|
||||
self.model, 'schema', self.config.credentials.schema
|
||||
)
|
||||
return getattr(self.model, "schema", self.config.credentials.schema)
|
||||
|
||||
@contextproperty
|
||||
def this(self) -> Optional[RelationProxy]:
|
||||
@@ -1305,9 +1254,7 @@ def generate_parser_model_context(
|
||||
# The __init__ method of ModelContext also initializes
|
||||
# a ManifestContext object which creates a MacroNamespaceBuilder
|
||||
# which adds every macro in the Manifest.
|
||||
ctx = ModelContext(
|
||||
model, config, manifest, ParseProvider(), context_config
|
||||
)
|
||||
ctx = ModelContext(model, config, manifest, ParseProvider(), context_config)
|
||||
# The 'to_dict' method in ManifestContext moves all of the macro names
|
||||
# in the macro 'namespace' up to top level keys
|
||||
return ctx.to_dict()
|
||||
@@ -1318,9 +1265,7 @@ def generate_generate_name_macro_context(
|
||||
config: RuntimeConfig,
|
||||
manifest: Manifest,
|
||||
) -> Dict[str, Any]:
|
||||
ctx = MacroContext(
|
||||
macro, config, manifest, GenerateNameProvider(), None
|
||||
)
|
||||
ctx = MacroContext(macro, config, manifest, GenerateNameProvider(), None)
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
@@ -1329,9 +1274,7 @@ def generate_runtime_model_context(
|
||||
config: RuntimeConfig,
|
||||
manifest: Manifest,
|
||||
) -> Dict[str, Any]:
|
||||
ctx = ModelContext(
|
||||
model, config, manifest, RuntimeProvider(), None
|
||||
)
|
||||
ctx = ModelContext(model, config, manifest, RuntimeProvider(), None)
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
@@ -1341,9 +1284,16 @@ def generate_runtime_macro_context(
|
||||
manifest: Manifest,
|
||||
package_name: Optional[str],
|
||||
) -> Dict[str, Any]:
|
||||
ctx = MacroContext(
|
||||
macro, config, manifest, OperationProvider(), package_name
|
||||
)
|
||||
ctx = MacroContext(macro, config, manifest, OperationProvider(), package_name)
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
def generate_runtime_sql_operation_context(
|
||||
model: Union[CompiledSqlNode, CompiledRPCNode],
|
||||
config: RuntimeConfig,
|
||||
manifest: Manifest,
|
||||
) -> Dict[str, Any]:
|
||||
ctx = ModelContext(model, config, manifest, OperationProvider(), None)
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
@@ -1352,18 +1302,17 @@ class ExposureRefResolver(BaseResolver):
|
||||
if len(args) not in (1, 2):
|
||||
ref_invalid_args(self.model, args)
|
||||
self.model.refs.append(list(args))
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
class ExposureSourceResolver(BaseResolver):
|
||||
def __call__(self, *args) -> str:
|
||||
if len(args) != 2:
|
||||
raise_compiler_error(
|
||||
f"source() takes exactly two arguments ({len(args)} given)",
|
||||
self.model
|
||||
f"source() takes exactly two arguments ({len(args)} given)", self.model
|
||||
)
|
||||
self.model.sources.append(list(args))
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
def generate_parse_exposure(
|
||||
@@ -1374,18 +1323,18 @@ def generate_parse_exposure(
|
||||
) -> Dict[str, Any]:
|
||||
project = config.load_dependencies()[package_name]
|
||||
return {
|
||||
'ref': ExposureRefResolver(
|
||||
"ref": ExposureRefResolver(
|
||||
None,
|
||||
exposure,
|
||||
project,
|
||||
manifest,
|
||||
),
|
||||
'source': ExposureSourceResolver(
|
||||
"source": ExposureSourceResolver(
|
||||
None,
|
||||
exposure,
|
||||
project,
|
||||
manifest,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -1400,13 +1349,13 @@ class MetricRefResolver(BaseResolver):
|
||||
ref_invalid_args(self.model, args)
|
||||
self.validate_args(name, package)
|
||||
self.model.refs.append(list(args))
|
||||
return ''
|
||||
return ""
|
||||
|
||||
def validate_args(self, name, package):
|
||||
if not isinstance(name, str):
|
||||
raise ParsingException(
|
||||
f'In a metrics section in {self.model.original_file_path} '
|
||||
f'the name argument to ref() must be a string'
|
||||
f"In a metrics section in {self.model.original_file_path} "
|
||||
f"the name argument to ref() must be a string"
|
||||
)
|
||||
|
||||
|
||||
@@ -1418,7 +1367,7 @@ def generate_parse_metrics(
|
||||
) -> Dict[str, Any]:
|
||||
project = config.load_dependencies()[package_name]
|
||||
return {
|
||||
'ref': MetricRefResolver(
|
||||
"ref": MetricRefResolver(
|
||||
None,
|
||||
metric,
|
||||
project,
|
||||
@@ -1448,9 +1397,7 @@ class TestContext(ProviderContext):
|
||||
self._build_test_namespace()
|
||||
# We need to rebuild this because it's already been built by
|
||||
# the ProviderContext with the wrong namespace.
|
||||
self.db_wrapper = self.provider.DatabaseWrapper(
|
||||
self.adapter, self.namespace
|
||||
)
|
||||
self.db_wrapper = self.provider.DatabaseWrapper(self.adapter, self.namespace)
|
||||
|
||||
def _build_namespace(self):
|
||||
return {}
|
||||
@@ -1463,7 +1410,7 @@ class TestContext(ProviderContext):
|
||||
depends_on_macros = []
|
||||
# all generic tests use a macro named 'get_where_subquery' to wrap 'model' arg
|
||||
# see generic_test_builders.build_model_str
|
||||
get_where_subquery = self.macro_resolver.macros_by_name.get('get_where_subquery')
|
||||
get_where_subquery = self.macro_resolver.macros_by_name.get("get_where_subquery")
|
||||
if get_where_subquery:
|
||||
depends_on_macros.append(get_where_subquery.unique_id)
|
||||
if self.model.depends_on and self.model.depends_on.macros:
|
||||
@@ -1475,8 +1422,7 @@ class TestContext(ProviderContext):
|
||||
depends_on_macros.extend(lookup_macro.depends_on.macros)
|
||||
|
||||
macro_namespace = TestMacroNamespace(
|
||||
self.macro_resolver, self._ctx, self.model, self.thread_ctx,
|
||||
depends_on_macros
|
||||
self.macro_resolver, self._ctx, self.model, self.thread_ctx, depends_on_macros
|
||||
)
|
||||
self.namespace = macro_namespace
|
||||
|
||||
@@ -1495,10 +1441,13 @@ class TestContext(ProviderContext):
|
||||
if self.model:
|
||||
self.manifest.env_vars[var] = return_value
|
||||
# the "model" should only be test nodes, but just in case, check
|
||||
if self.model.resource_type == NodeType.Test and self.model.file_key_name:
|
||||
# TODO CT-211
|
||||
if self.model.resource_type == NodeType.Test and self.model.file_key_name: # type: ignore[union-attr] # noqa
|
||||
source_file = self.manifest.files[self.model.file_id]
|
||||
(yaml_key, name) = self.model.file_key_name.split('.')
|
||||
source_file.add_env_var(var, yaml_key, name)
|
||||
# TODO CT-211
|
||||
(yaml_key, name) = self.model.file_key_name.split(".") # type: ignore[union-attr] # noqa
|
||||
# TODO CT-211
|
||||
source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr]
|
||||
return return_value
|
||||
else:
|
||||
msg = f"Env var required but not provided: '{var}'"
|
||||
@@ -1510,12 +1459,9 @@ def generate_test_context(
|
||||
config: RuntimeConfig,
|
||||
manifest: Manifest,
|
||||
context_config: ContextConfig,
|
||||
macro_resolver: MacroResolver
|
||||
macro_resolver: MacroResolver,
|
||||
) -> Dict[str, Any]:
|
||||
ctx = TestContext(
|
||||
model, config, manifest, ParseProvider(), context_config,
|
||||
macro_resolver
|
||||
)
|
||||
ctx = TestContext(model, config, manifest, ParseProvider(), context_config, macro_resolver)
|
||||
# The 'to_dict' method in ManifestContext moves all of the macro names
|
||||
# in the macro 'namespace' up to top level keys
|
||||
return ctx.to_dict()
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Dict, Optional
|
||||
from .base import BaseContext, contextmember
|
||||
|
||||
from dbt.exceptions import raise_parsing_error
|
||||
from dbt.logger import SECRET_ENV_PREFIX
|
||||
|
||||
|
||||
class SecretContext(BaseContext):
|
||||
@@ -27,7 +28,11 @@ class SecretContext(BaseContext):
|
||||
return_value = default
|
||||
|
||||
if return_value is not None:
|
||||
self.env_vars[var] = return_value
|
||||
# do not save secret environment variables
|
||||
if not var.startswith(SECRET_ENV_PREFIX):
|
||||
self.env_vars[var] = return_value
|
||||
|
||||
# return the value even if its a secret
|
||||
return return_value
|
||||
else:
|
||||
msg = f"Env var required but not provided: '{var}'"
|
||||
|
||||
@@ -2,9 +2,7 @@ from typing import Any, Dict
|
||||
|
||||
from dbt.contracts.connection import HasCredentials
|
||||
|
||||
from dbt.context.base import (
|
||||
BaseContext, contextproperty
|
||||
)
|
||||
from dbt.context.base import BaseContext, contextproperty
|
||||
|
||||
|
||||
class TargetContext(BaseContext):
|
||||
@@ -78,8 +76,6 @@ class TargetContext(BaseContext):
|
||||
return self.config.to_target_dict()
|
||||
|
||||
|
||||
def generate_target_context(
|
||||
config: HasCredentials, cli_vars: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
def generate_target_context(config: HasCredentials, cli_vars: Dict[str, Any]) -> Dict[str, Any]:
|
||||
ctx = TargetContext(config, cli_vars)
|
||||
return ctx.to_dict()
|
||||
|
||||
1
core/dbt/contracts/README.md
Normal file
1
core/dbt/contracts/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Contracts README
|
||||
@@ -3,7 +3,14 @@ import itertools
|
||||
import hashlib
|
||||
from dataclasses import dataclass, field
|
||||
from typing import (
|
||||
Any, ClassVar, Dict, Tuple, Iterable, Optional, List, Callable,
|
||||
Any,
|
||||
ClassVar,
|
||||
Dict,
|
||||
Tuple,
|
||||
Iterable,
|
||||
Optional,
|
||||
List,
|
||||
Callable,
|
||||
)
|
||||
from dbt.exceptions import InternalException
|
||||
from dbt.utils import translate_aliases
|
||||
@@ -11,18 +18,22 @@ from dbt.events.functions import fire_event
|
||||
from dbt.events.types import NewConnectionOpening
|
||||
from typing_extensions import Protocol
|
||||
from dbt.dataclass_schema import (
|
||||
dbtClassMixin, StrEnum, ExtensibleDbtClassMixin, HyphenatedDbtClassMixin,
|
||||
ValidatedStringMixin, register_pattern
|
||||
dbtClassMixin,
|
||||
StrEnum,
|
||||
ExtensibleDbtClassMixin,
|
||||
HyphenatedDbtClassMixin,
|
||||
ValidatedStringMixin,
|
||||
register_pattern,
|
||||
)
|
||||
from dbt.contracts.util import Replaceable
|
||||
|
||||
|
||||
class Identifier(ValidatedStringMixin):
|
||||
ValidationRegex = r'^[A-Za-z_][A-Za-z0-9_]+$'
|
||||
ValidationRegex = r"^[A-Za-z_][A-Za-z0-9_]+$"
|
||||
|
||||
|
||||
# we need register_pattern for jsonschema validation
|
||||
register_pattern(Identifier, r'^[A-Za-z_][A-Za-z0-9_]+$')
|
||||
register_pattern(Identifier, r"^[A-Za-z_][A-Za-z0-9_]+$")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -36,10 +47,10 @@ class AdapterResponse(dbtClassMixin):
|
||||
|
||||
|
||||
class ConnectionState(StrEnum):
|
||||
INIT = 'init'
|
||||
OPEN = 'open'
|
||||
CLOSED = 'closed'
|
||||
FAIL = 'fail'
|
||||
INIT = "init"
|
||||
OPEN = "open"
|
||||
CLOSED = "closed"
|
||||
FAIL = "fail"
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
@@ -83,8 +94,7 @@ class Connection(ExtensibleDbtClassMixin, Replaceable):
|
||||
self._handle.resolve(self)
|
||||
except RecursionError as exc:
|
||||
raise InternalException(
|
||||
"A connection's open() method attempted to read the "
|
||||
"handle value"
|
||||
"A connection's open() method attempted to read the " "handle value"
|
||||
) from exc
|
||||
return self._handle
|
||||
|
||||
@@ -94,7 +104,7 @@ class Connection(ExtensibleDbtClassMixin, Replaceable):
|
||||
|
||||
|
||||
class LazyHandle:
|
||||
"""Opener must be a callable that takes a Connection object and opens the
|
||||
"""The opener must be a callable that takes a Connection object and opens the
|
||||
connection, updating the handle on the Connection.
|
||||
"""
|
||||
|
||||
@@ -111,20 +121,14 @@ class LazyHandle:
|
||||
# for why we have type: ignore. Maybe someday dataclasses + abstract classes
|
||||
# will work.
|
||||
@dataclass # type: ignore
|
||||
class Credentials(
|
||||
ExtensibleDbtClassMixin,
|
||||
Replaceable,
|
||||
metaclass=abc.ABCMeta
|
||||
):
|
||||
class Credentials(ExtensibleDbtClassMixin, Replaceable, metaclass=abc.ABCMeta):
|
||||
database: str
|
||||
schema: str
|
||||
_ALIASES: ClassVar[Dict[str, str]] = field(default={}, init=False)
|
||||
|
||||
@abc.abstractproperty
|
||||
def type(self) -> str:
|
||||
raise NotImplementedError(
|
||||
'type not implemented for base credentials class'
|
||||
)
|
||||
raise NotImplementedError("type not implemented for base credentials class")
|
||||
|
||||
@property
|
||||
def unique_field(self) -> str:
|
||||
@@ -132,25 +136,18 @@ class Credentials(
|
||||
Return the field from Credentials that can uniquely identify
|
||||
one team/organization using this adapter
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'unique_field not implemented for base credentials class'
|
||||
)
|
||||
raise NotImplementedError("unique_field not implemented for base credentials class")
|
||||
|
||||
def hashed_unique_field(self) -> str:
|
||||
return hashlib.md5(self.unique_field.encode('utf-8')).hexdigest()
|
||||
return hashlib.md5(self.unique_field.encode("utf-8")).hexdigest()
|
||||
|
||||
def connection_info(
|
||||
self, *, with_aliases: bool = False
|
||||
) -> Iterable[Tuple[str, Any]]:
|
||||
"""Return an ordered iterator of key/value pairs for pretty-printing.
|
||||
"""
|
||||
def connection_info(self, *, with_aliases: bool = False) -> Iterable[Tuple[str, Any]]:
|
||||
"""Return an ordered iterator of key/value pairs for pretty-printing."""
|
||||
as_dict = self.to_dict(omit_none=False)
|
||||
connection_keys = set(self._connection_keys())
|
||||
aliases: List[str] = []
|
||||
if with_aliases:
|
||||
aliases = [
|
||||
k for k, v in self._ALIASES.items() if v in connection_keys
|
||||
]
|
||||
aliases = [k for k, v in self._ALIASES.items() if v in connection_keys]
|
||||
for key in itertools.chain(self._connection_keys(), aliases):
|
||||
if key in as_dict:
|
||||
yield key, as_dict[key]
|
||||
@@ -166,19 +163,19 @@ class Credentials(
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def translate_aliases(
|
||||
cls, kwargs: Dict[str, Any], recurse: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
def translate_aliases(cls, kwargs: Dict[str, Any], recurse: bool = False) -> Dict[str, Any]:
|
||||
return translate_aliases(kwargs, cls._ALIASES, recurse)
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
# no super() -- do we need it?
|
||||
if self._ALIASES:
|
||||
dct.update({
|
||||
new_name: dct[canonical_name]
|
||||
for new_name, canonical_name in self._ALIASES.items()
|
||||
if canonical_name in dct
|
||||
})
|
||||
dct.update(
|
||||
{
|
||||
new_name: dct[canonical_name]
|
||||
for new_name, canonical_name in self._ALIASES.items()
|
||||
if canonical_name in dct
|
||||
}
|
||||
)
|
||||
return dct
|
||||
|
||||
|
||||
@@ -197,10 +194,10 @@ class HasCredentials(Protocol):
|
||||
threads: int
|
||||
|
||||
def to_target_dict(self):
|
||||
raise NotImplementedError('to_target_dict not implemented')
|
||||
raise NotImplementedError("to_target_dict not implemented")
|
||||
|
||||
|
||||
DEFAULT_QUERY_COMMENT = '''
|
||||
DEFAULT_QUERY_COMMENT = """
|
||||
{%- set comment_dict = {} -%}
|
||||
{%- do comment_dict.update(
|
||||
app='dbt',
|
||||
@@ -217,7 +214,7 @@ DEFAULT_QUERY_COMMENT = '''
|
||||
{%- do comment_dict.update(connection_name=connection_name) -%}
|
||||
{%- endif -%}
|
||||
{{ return(tojson(comment_dict)) }}
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -10,33 +10,33 @@ from .util import SourceKey
|
||||
|
||||
|
||||
MAXIMUM_SEED_SIZE = 1 * 1024 * 1024
|
||||
MAXIMUM_SEED_SIZE_NAME = '1MB'
|
||||
MAXIMUM_SEED_SIZE_NAME = "1MB"
|
||||
|
||||
|
||||
class ParseFileType(StrEnum):
|
||||
Macro = 'macro'
|
||||
Model = 'model'
|
||||
Snapshot = 'snapshot'
|
||||
Analysis = 'analysis'
|
||||
SingularTest = 'singular_test'
|
||||
GenericTest = 'generic_test'
|
||||
Seed = 'seed'
|
||||
Documentation = 'docs'
|
||||
Schema = 'schema'
|
||||
Hook = 'hook' # not a real filetype, from dbt_project.yml
|
||||
Macro = "macro"
|
||||
Model = "model"
|
||||
Snapshot = "snapshot"
|
||||
Analysis = "analysis"
|
||||
SingularTest = "singular_test"
|
||||
GenericTest = "generic_test"
|
||||
Seed = "seed"
|
||||
Documentation = "docs"
|
||||
Schema = "schema"
|
||||
Hook = "hook" # not a real filetype, from dbt_project.yml
|
||||
|
||||
|
||||
parse_file_type_to_parser = {
|
||||
ParseFileType.Macro: 'MacroParser',
|
||||
ParseFileType.Model: 'ModelParser',
|
||||
ParseFileType.Snapshot: 'SnapshotParser',
|
||||
ParseFileType.Analysis: 'AnalysisParser',
|
||||
ParseFileType.SingularTest: 'SingularTestParser',
|
||||
ParseFileType.GenericTest: 'GenericTestParser',
|
||||
ParseFileType.Seed: 'SeedParser',
|
||||
ParseFileType.Documentation: 'DocumentationParser',
|
||||
ParseFileType.Schema: 'SchemaParser',
|
||||
ParseFileType.Hook: 'HookParser',
|
||||
ParseFileType.Macro: "MacroParser",
|
||||
ParseFileType.Model: "ModelParser",
|
||||
ParseFileType.Snapshot: "SnapshotParser",
|
||||
ParseFileType.Analysis: "AnalysisParser",
|
||||
ParseFileType.SingularTest: "SingularTestParser",
|
||||
ParseFileType.GenericTest: "GenericTestParser",
|
||||
ParseFileType.Seed: "SeedParser",
|
||||
ParseFileType.Documentation: "DocumentationParser",
|
||||
ParseFileType.Schema: "SchemaParser",
|
||||
ParseFileType.Hook: "HookParser",
|
||||
}
|
||||
|
||||
|
||||
@@ -55,9 +55,7 @@ class FilePath(dbtClassMixin):
|
||||
@property
|
||||
def full_path(self) -> str:
|
||||
# useful for symlink preservation
|
||||
return os.path.join(
|
||||
self.project_root, self.searched_path, self.relative_path
|
||||
)
|
||||
return os.path.join(self.project_root, self.searched_path, self.relative_path)
|
||||
|
||||
@property
|
||||
def absolute_path(self) -> str:
|
||||
@@ -67,13 +65,10 @@ class FilePath(dbtClassMixin):
|
||||
def original_file_path(self) -> str:
|
||||
# this is mostly used for reporting errors. It doesn't show the project
|
||||
# name, should it?
|
||||
return os.path.join(
|
||||
self.searched_path, self.relative_path
|
||||
)
|
||||
return os.path.join(self.searched_path, self.relative_path)
|
||||
|
||||
def seed_too_large(self) -> bool:
|
||||
"""Return whether the file this represents is over the seed size limit
|
||||
"""
|
||||
"""Return whether the file this represents is over the seed size limit"""
|
||||
return os.stat(self.full_path).st_size > MAXIMUM_SEED_SIZE
|
||||
|
||||
|
||||
@@ -84,35 +79,35 @@ class FileHash(dbtClassMixin):
|
||||
|
||||
@classmethod
|
||||
def empty(cls):
|
||||
return FileHash(name='none', checksum='')
|
||||
return FileHash(name="none", checksum="")
|
||||
|
||||
@classmethod
|
||||
def path(cls, path: str):
|
||||
return FileHash(name='path', checksum=path)
|
||||
return FileHash(name="path", checksum=path)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, FileHash):
|
||||
return NotImplemented
|
||||
|
||||
if self.name == 'none' or self.name != other.name:
|
||||
if self.name == "none" or self.name != other.name:
|
||||
return False
|
||||
|
||||
return self.checksum == other.checksum
|
||||
|
||||
def compare(self, contents: str) -> bool:
|
||||
"""Compare the file contents with the given hash"""
|
||||
if self.name == 'none':
|
||||
if self.name == "none":
|
||||
return False
|
||||
|
||||
return self.from_contents(contents, name=self.name) == self.checksum
|
||||
|
||||
@classmethod
|
||||
def from_contents(cls, contents: str, name='sha256') -> 'FileHash':
|
||||
def from_contents(cls, contents: str, name="sha256") -> "FileHash":
|
||||
"""Create a file hash from the given file contents. The hash is always
|
||||
the utf-8 encoding of the contents given, because dbt only reads files
|
||||
as utf-8.
|
||||
"""
|
||||
data = contents.encode('utf-8')
|
||||
data = contents.encode("utf-8")
|
||||
checksum = hashlib.new(name, data).hexdigest()
|
||||
return cls(name=name, checksum=checksum)
|
||||
|
||||
@@ -121,28 +116,29 @@ class FileHash(dbtClassMixin):
|
||||
class RemoteFile(dbtClassMixin):
|
||||
@property
|
||||
def searched_path(self) -> str:
|
||||
return 'from remote system'
|
||||
return "from remote system"
|
||||
|
||||
@property
|
||||
def relative_path(self) -> str:
|
||||
return 'from remote system'
|
||||
return "from remote system"
|
||||
|
||||
@property
|
||||
def absolute_path(self) -> str:
|
||||
return 'from remote system'
|
||||
return "from remote system"
|
||||
|
||||
@property
|
||||
def original_file_path(self):
|
||||
return 'from remote system'
|
||||
return "from remote system"
|
||||
|
||||
@property
|
||||
def modification_time(self):
|
||||
return 'from remote system'
|
||||
return "from remote system"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseSourceFile(dbtClassMixin, SerializableType):
|
||||
"""Define a source file in dbt"""
|
||||
|
||||
path: Union[FilePath, RemoteFile] # the path information
|
||||
checksum: FileHash
|
||||
# Seems like knowing which project the file came from would be useful
|
||||
@@ -157,7 +153,7 @@ class BaseSourceFile(dbtClassMixin, SerializableType):
|
||||
def file_id(self):
|
||||
if isinstance(self.path, RemoteFile):
|
||||
return None
|
||||
return f'{self.project_name}://{self.path.original_file_path}'
|
||||
return f"{self.project_name}://{self.path.original_file_path}"
|
||||
|
||||
def _serialize(self):
|
||||
dct = self.to_dict()
|
||||
@@ -165,7 +161,7 @@ class BaseSourceFile(dbtClassMixin, SerializableType):
|
||||
|
||||
@classmethod
|
||||
def _deserialize(cls, dct: Dict[str, int]):
|
||||
if dct['parse_file_type'] == 'schema':
|
||||
if dct["parse_file_type"] == "schema":
|
||||
sf = SchemaSourceFile.from_dict(dct)
|
||||
else:
|
||||
sf = SourceFile.from_dict(dct)
|
||||
@@ -180,8 +176,8 @@ class BaseSourceFile(dbtClassMixin, SerializableType):
|
||||
del dct[key]
|
||||
# remove contents. Schema files will still have 'dict_from_yaml'
|
||||
# from the contents
|
||||
if 'contents' in dct:
|
||||
del dct['contents']
|
||||
if "contents" in dct:
|
||||
del dct["contents"]
|
||||
return dct
|
||||
|
||||
|
||||
@@ -193,10 +189,10 @@ class SourceFile(BaseSourceFile):
|
||||
env_vars: List[str] = field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
def big_seed(cls, path: FilePath) -> 'SourceFile':
|
||||
def big_seed(cls, path: FilePath) -> "SourceFile":
|
||||
"""Parse seeds over the size limit with just the path"""
|
||||
self = cls(path=path, checksum=FileHash.path(path.original_file_path))
|
||||
self.contents = ''
|
||||
self.contents = ""
|
||||
return self
|
||||
|
||||
def add_node(self, value):
|
||||
@@ -206,7 +202,7 @@ class SourceFile(BaseSourceFile):
|
||||
# TODO: do this a different way. This remote file kludge isn't going
|
||||
# to work long term
|
||||
@classmethod
|
||||
def remote(cls, contents: str, project_name: str) -> 'SourceFile':
|
||||
def remote(cls, contents: str, project_name: str) -> "SourceFile":
|
||||
self = cls(
|
||||
path=RemoteFile(),
|
||||
checksum=FileHash.from_contents(contents),
|
||||
@@ -255,7 +251,7 @@ class SchemaSourceFile(BaseSourceFile):
|
||||
def __post_serialize__(self, dct):
|
||||
dct = super().__post_serialize__(dct)
|
||||
# Remove partial parsing specific data
|
||||
for key in ('pp_test_index', 'pp_dict'):
|
||||
for key in ("pp_test_index", "pp_dict"):
|
||||
if key in dct:
|
||||
del dct[key]
|
||||
return dct
|
||||
@@ -264,8 +260,8 @@ class SchemaSourceFile(BaseSourceFile):
|
||||
self.node_patches.append(unique_id)
|
||||
|
||||
def add_test(self, node_unique_id, test_from):
|
||||
name = test_from['name']
|
||||
key = test_from['key']
|
||||
name = test_from["name"]
|
||||
key = test_from["key"]
|
||||
if key not in self.tests:
|
||||
self.tests[key] = {}
|
||||
if name not in self.tests[key]:
|
||||
|
||||
@@ -60,49 +60,47 @@ class CompiledNode(ParsedNode, CompiledNodeMixin):
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
dct = super().__post_serialize__(dct)
|
||||
if '_pre_injected_sql' in dct:
|
||||
del dct['_pre_injected_sql']
|
||||
if "_pre_injected_sql" in dct:
|
||||
del dct["_pre_injected_sql"]
|
||||
return dct
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompiledAnalysisNode(CompiledNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Analysis]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompiledHookNode(CompiledNode):
|
||||
resource_type: NodeType = field(
|
||||
metadata={'restrict': [NodeType.Operation]}
|
||||
)
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]})
|
||||
index: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompiledModelNode(CompiledNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Model]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]})
|
||||
|
||||
|
||||
# TODO: rm?
|
||||
@dataclass
|
||||
class CompiledRPCNode(CompiledNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.RPCCall]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompiledSqlNode(CompiledNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.SqlOperation]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompiledSeedNode(CompiledNode):
|
||||
# keep this in sync with ParsedSeedNode!
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Seed]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]})
|
||||
config: SeedConfig = field(default_factory=SeedConfig)
|
||||
|
||||
@property
|
||||
def empty(self):
|
||||
""" Seeds are never empty"""
|
||||
"""Seeds are never empty"""
|
||||
return False
|
||||
|
||||
def same_body(self, other) -> bool:
|
||||
@@ -111,12 +109,12 @@ class CompiledSeedNode(CompiledNode):
|
||||
|
||||
@dataclass
|
||||
class CompiledSnapshotNode(CompiledNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Snapshot]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompiledSingularTestNode(CompiledNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]})
|
||||
# Was not able to make mypy happy and keep the code working. We need to
|
||||
# refactor the various configs.
|
||||
config: TestConfig = field(default_factory=TestConfig) # type:ignore
|
||||
@@ -125,7 +123,7 @@ class CompiledSingularTestNode(CompiledNode):
|
||||
@dataclass
|
||||
class CompiledGenericTestNode(CompiledNode, HasTestMetadata):
|
||||
# keep this in sync with ParsedGenericTestNode!
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]})
|
||||
column_name: Optional[str] = None
|
||||
file_key_name: Optional[str] = None
|
||||
# Was not able to make mypy happy and keep the code working. We need to
|
||||
@@ -136,11 +134,7 @@ class CompiledGenericTestNode(CompiledNode, HasTestMetadata):
|
||||
if other is None:
|
||||
return False
|
||||
|
||||
return (
|
||||
self.same_config(other) and
|
||||
self.same_fqn(other) and
|
||||
True
|
||||
)
|
||||
return self.same_config(other) and self.same_fqn(other) and True
|
||||
|
||||
|
||||
CompiledTestNode = Union[CompiledSingularTestNode, CompiledGenericTestNode]
|
||||
@@ -188,8 +182,7 @@ def parsed_instance_for(compiled: CompiledNode) -> ParsedResource:
|
||||
cls = PARSED_TYPES.get(type(compiled))
|
||||
if cls is None:
|
||||
# how???
|
||||
raise ValueError('invalid resource_type: {}'
|
||||
.format(compiled.resource_type))
|
||||
raise ValueError("invalid resource_type: {}".format(compiled.resource_type))
|
||||
|
||||
return cls.from_dict(compiled.to_dict(omit_none=True))
|
||||
|
||||
|
||||
@@ -4,29 +4,49 @@ from itertools import chain, islice
|
||||
from mashumaro import DataClassMessagePackMixin
|
||||
from multiprocessing.synchronize import Lock
|
||||
from typing import (
|
||||
Dict, List, Optional, Union, Mapping, MutableMapping, Any, Set, Tuple,
|
||||
TypeVar, Callable, Generic, cast, AbstractSet, ClassVar
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Union,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Any,
|
||||
Set,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Callable,
|
||||
Generic,
|
||||
cast,
|
||||
AbstractSet,
|
||||
ClassVar,
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
from uuid import UUID
|
||||
|
||||
from dbt.contracts.graph.compiled import (
|
||||
CompileResultNode, ManifestNode, NonSourceCompiledNode, GraphMemberNode
|
||||
CompileResultNode,
|
||||
ManifestNode,
|
||||
NonSourceCompiledNode,
|
||||
GraphMemberNode,
|
||||
)
|
||||
from dbt.contracts.graph.parsed import (
|
||||
ParsedMacro, ParsedDocumentation,
|
||||
ParsedSourceDefinition, ParsedExposure, ParsedMetric,
|
||||
HasUniqueID, UnpatchedSourceDefinition, ManifestNodes
|
||||
ParsedMacro,
|
||||
ParsedDocumentation,
|
||||
ParsedSourceDefinition,
|
||||
ParsedExposure,
|
||||
ParsedMetric,
|
||||
HasUniqueID,
|
||||
UnpatchedSourceDefinition,
|
||||
ManifestNodes,
|
||||
)
|
||||
from dbt.contracts.graph.unparsed import SourcePatch
|
||||
from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile
|
||||
from dbt.contracts.util import (
|
||||
BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version
|
||||
)
|
||||
from dbt.contracts.util import BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version
|
||||
from dbt.dataclass_schema import dbtClassMixin
|
||||
from dbt.exceptions import (
|
||||
CompilationException,
|
||||
raise_duplicate_resource_name, raise_compiler_error,
|
||||
raise_duplicate_resource_name,
|
||||
raise_compiler_error,
|
||||
)
|
||||
from dbt.helper_types import PathSet
|
||||
from dbt.events.functions import fire_event
|
||||
@@ -62,14 +82,14 @@ def find_unique_id_for_package(storage, key, package: Optional[PackageName]):
|
||||
|
||||
|
||||
class DocLookup(dbtClassMixin):
|
||||
def __init__(self, manifest: 'Manifest'):
|
||||
def __init__(self, manifest: "Manifest"):
|
||||
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
|
||||
self.populate(manifest)
|
||||
|
||||
def get_unique_id(self, key, package: Optional[PackageName]):
|
||||
return find_unique_id_for_package(self.storage, key, package)
|
||||
|
||||
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
|
||||
def find(self, key, package: Optional[PackageName], manifest: "Manifest"):
|
||||
unique_id = self.get_unique_id(key, package)
|
||||
if unique_id is not None:
|
||||
return self.perform_lookup(unique_id, manifest)
|
||||
@@ -84,25 +104,23 @@ class DocLookup(dbtClassMixin):
|
||||
for doc in manifest.docs.values():
|
||||
self.add_doc(doc)
|
||||
|
||||
def perform_lookup(
|
||||
self, unique_id: UniqueID, manifest
|
||||
) -> ParsedDocumentation:
|
||||
def perform_lookup(self, unique_id: UniqueID, manifest) -> ParsedDocumentation:
|
||||
if unique_id not in manifest.docs:
|
||||
raise dbt.exceptions.InternalException(
|
||||
f'Doc {unique_id} found in cache but not found in manifest'
|
||||
f"Doc {unique_id} found in cache but not found in manifest"
|
||||
)
|
||||
return manifest.docs[unique_id]
|
||||
|
||||
|
||||
class SourceLookup(dbtClassMixin):
|
||||
def __init__(self, manifest: 'Manifest'):
|
||||
def __init__(self, manifest: "Manifest"):
|
||||
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
|
||||
self.populate(manifest)
|
||||
|
||||
def get_unique_id(self, search_name, package: Optional[PackageName]):
|
||||
return find_unique_id_for_package(self.storage, search_name, package)
|
||||
|
||||
def find(self, search_name, package: Optional[PackageName], manifest: 'Manifest'):
|
||||
def find(self, search_name, package: Optional[PackageName], manifest: "Manifest"):
|
||||
unique_id = self.get_unique_id(search_name, package)
|
||||
if unique_id is not None:
|
||||
return self.perform_lookup(unique_id, manifest)
|
||||
@@ -116,15 +134,13 @@ class SourceLookup(dbtClassMixin):
|
||||
|
||||
def populate(self, manifest):
|
||||
for source in manifest.sources.values():
|
||||
if hasattr(source, 'source_name'):
|
||||
if hasattr(source, "source_name"):
|
||||
self.add_source(source)
|
||||
|
||||
def perform_lookup(
|
||||
self, unique_id: UniqueID, manifest: 'Manifest'
|
||||
) -> ParsedSourceDefinition:
|
||||
def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedSourceDefinition:
|
||||
if unique_id not in manifest.sources:
|
||||
raise dbt.exceptions.InternalException(
|
||||
f'Source {unique_id} found in cache but not found in manifest'
|
||||
f"Source {unique_id} found in cache but not found in manifest"
|
||||
)
|
||||
return manifest.sources[unique_id]
|
||||
|
||||
@@ -136,14 +152,14 @@ class RefableLookup(dbtClassMixin):
|
||||
# refables are actually unique, so the Dict[PackageName, UniqueID] will
|
||||
# only ever have exactly one value, but doing 3 dict lookups instead of 1
|
||||
# is not a big deal at all and retains consistency
|
||||
def __init__(self, manifest: 'Manifest'):
|
||||
def __init__(self, manifest: "Manifest"):
|
||||
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
|
||||
self.populate(manifest)
|
||||
|
||||
def get_unique_id(self, key, package: Optional[PackageName]):
|
||||
return find_unique_id_for_package(self.storage, key, package)
|
||||
|
||||
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
|
||||
def find(self, key, package: Optional[PackageName], manifest: "Manifest"):
|
||||
unique_id = self.get_unique_id(key, package)
|
||||
if unique_id is not None:
|
||||
return self.perform_lookup(unique_id, manifest)
|
||||
@@ -159,20 +175,17 @@ class RefableLookup(dbtClassMixin):
|
||||
for node in manifest.nodes.values():
|
||||
self.add_node(node)
|
||||
|
||||
def perform_lookup(
|
||||
self, unique_id: UniqueID, manifest
|
||||
) -> ManifestNode:
|
||||
def perform_lookup(self, unique_id: UniqueID, manifest) -> ManifestNode:
|
||||
if unique_id not in manifest.nodes:
|
||||
raise dbt.exceptions.InternalException(
|
||||
f'Node {unique_id} found in cache but not found in manifest'
|
||||
f"Node {unique_id} found in cache but not found in manifest"
|
||||
)
|
||||
return manifest.nodes[unique_id]
|
||||
|
||||
|
||||
# This handles both models/seeds/snapshots and sources
|
||||
class DisabledLookup(dbtClassMixin):
|
||||
|
||||
def __init__(self, manifest: 'Manifest'):
|
||||
def __init__(self, manifest: "Manifest"):
|
||||
self.storage: Dict[str, Dict[PackageName, List[Any]]] = {}
|
||||
self.populate(manifest)
|
||||
|
||||
@@ -226,30 +239,31 @@ def _search_packages(
|
||||
@dataclass
|
||||
class ManifestMetadata(BaseArtifactMetadata):
|
||||
"""Metadata for the manifest."""
|
||||
|
||||
dbt_schema_version: str = field(
|
||||
default_factory=lambda: str(WritableManifest.dbt_schema_version)
|
||||
)
|
||||
project_id: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
'description': 'A unique identifier for the project',
|
||||
"description": "A unique identifier for the project",
|
||||
},
|
||||
)
|
||||
user_id: Optional[UUID] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
'description': 'A unique identifier for the user',
|
||||
"description": "A unique identifier for the user",
|
||||
},
|
||||
)
|
||||
send_anonymous_usage_stats: Optional[bool] = field(
|
||||
default=None,
|
||||
metadata=dict(description=(
|
||||
'Whether dbt is configured to send anonymous usage statistics'
|
||||
)),
|
||||
metadata=dict(
|
||||
description=("Whether dbt is configured to send anonymous usage statistics")
|
||||
),
|
||||
)
|
||||
adapter_type: Optional[str] = field(
|
||||
default=None,
|
||||
metadata=dict(description='The type name of the adapter'),
|
||||
metadata=dict(description="The type name of the adapter"),
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
@@ -295,7 +309,7 @@ def build_node_edges(nodes: List[ManifestNode]):
|
||||
# Build a map of children of macros and generic tests
|
||||
def build_macro_edges(nodes: List[Any]):
|
||||
forward_edges: Dict[str, List[str]] = {
|
||||
n.unique_id: [] for n in nodes if n.unique_id.startswith('macro') or n.depends_on.macros
|
||||
n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on.macros
|
||||
}
|
||||
for node in nodes:
|
||||
for unique_id in node.depends_on.macros:
|
||||
@@ -346,7 +360,7 @@ class MaterializationCandidate(MacroCandidate):
|
||||
@classmethod
|
||||
def from_macro(
|
||||
cls, candidate: MacroCandidate, specificity: Specificity
|
||||
) -> 'MaterializationCandidate':
|
||||
) -> "MaterializationCandidate":
|
||||
return cls(
|
||||
locality=candidate.locality,
|
||||
macro=candidate.macro,
|
||||
@@ -356,16 +370,13 @@ class MaterializationCandidate(MacroCandidate):
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, MaterializationCandidate):
|
||||
return NotImplemented
|
||||
equal = (
|
||||
self.specificity == other.specificity and
|
||||
self.locality == other.locality
|
||||
)
|
||||
equal = self.specificity == other.specificity and self.locality == other.locality
|
||||
if equal:
|
||||
raise_compiler_error(
|
||||
'Found two materializations with the name {} (packages {} and '
|
||||
'{}). dbt cannot resolve this ambiguity'
|
||||
.format(self.macro.name, self.macro.package_name,
|
||||
other.macro.package_name)
|
||||
"Found two materializations with the name {} (packages {} and "
|
||||
"{}). dbt cannot resolve this ambiguity".format(
|
||||
self.macro.name, self.macro.package_name, other.macro.package_name
|
||||
)
|
||||
)
|
||||
|
||||
return equal
|
||||
@@ -384,7 +395,7 @@ class MaterializationCandidate(MacroCandidate):
|
||||
return False
|
||||
|
||||
|
||||
M = TypeVar('M', bound=MacroCandidate)
|
||||
M = TypeVar("M", bound=MacroCandidate)
|
||||
|
||||
|
||||
class CandidateList(List[M]):
|
||||
@@ -412,10 +423,10 @@ class Searchable(Protocol):
|
||||
|
||||
@property
|
||||
def search_name(self) -> str:
|
||||
raise NotImplementedError('search_name not implemented')
|
||||
raise NotImplementedError("search_name not implemented")
|
||||
|
||||
|
||||
D = TypeVar('D')
|
||||
D = TypeVar("D")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -426,37 +437,35 @@ class Disabled(Generic[D]):
|
||||
MaybeDocumentation = Optional[ParsedDocumentation]
|
||||
|
||||
|
||||
MaybeParsedSource = Optional[Union[
|
||||
ParsedSourceDefinition,
|
||||
Disabled[ParsedSourceDefinition],
|
||||
]]
|
||||
MaybeParsedSource = Optional[
|
||||
Union[
|
||||
ParsedSourceDefinition,
|
||||
Disabled[ParsedSourceDefinition],
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
MaybeNonSource = Optional[Union[
|
||||
ManifestNode,
|
||||
Disabled[ManifestNode]
|
||||
]]
|
||||
MaybeNonSource = Optional[Union[ManifestNode, Disabled[ManifestNode]]]
|
||||
|
||||
|
||||
T = TypeVar('T', bound=GraphMemberNode)
|
||||
T = TypeVar("T", bound=GraphMemberNode)
|
||||
|
||||
|
||||
def _update_into(dest: MutableMapping[str, T], new_item: T):
|
||||
"""Update dest to overwrite whatever is at dest[new_item.unique_id] with
|
||||
new_itme. There must be an existing value to overwrite, and they two nodes
|
||||
new_itme. There must be an existing value to overwrite, and the two nodes
|
||||
must have the same original file path.
|
||||
"""
|
||||
unique_id = new_item.unique_id
|
||||
if unique_id not in dest:
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
f'got an update_{new_item.resource_type} call with an '
|
||||
f'unrecognized {new_item.resource_type}: {new_item.unique_id}'
|
||||
f"got an update_{new_item.resource_type} call with an "
|
||||
f"unrecognized {new_item.resource_type}: {new_item.unique_id}"
|
||||
)
|
||||
existing = dest[unique_id]
|
||||
if new_item.original_file_path != existing.original_file_path:
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
f'cannot update a {new_item.resource_type} to have a new file '
|
||||
f'path!'
|
||||
f"cannot update a {new_item.resource_type} to have a new file " f"path!"
|
||||
)
|
||||
dest[unique_id] = new_item
|
||||
|
||||
@@ -480,6 +489,7 @@ class MacroMethods:
|
||||
"""
|
||||
filter: Optional[Callable[[MacroCandidate], bool]] = None
|
||||
if package is not None:
|
||||
|
||||
def filter(candidate: MacroCandidate) -> bool:
|
||||
return package == candidate.macro.package_name
|
||||
|
||||
@@ -502,11 +512,12 @@ class MacroMethods:
|
||||
- return the `generate_{component}_name` macro from the 'dbt'
|
||||
internal project
|
||||
"""
|
||||
|
||||
def filter(candidate: MacroCandidate) -> bool:
|
||||
return candidate.locality != Locality.Imported
|
||||
|
||||
candidates: CandidateList = self._find_macros_by_name(
|
||||
name=f'generate_{component}_name',
|
||||
name=f"generate_{component}_name",
|
||||
root_project_name=root_project_name,
|
||||
# filter out imported packages
|
||||
filter=filter,
|
||||
@@ -517,12 +528,12 @@ class MacroMethods:
|
||||
self,
|
||||
name: str,
|
||||
root_project_name: str,
|
||||
filter: Optional[Callable[[MacroCandidate], bool]] = None
|
||||
filter: Optional[Callable[[MacroCandidate], bool]] = None,
|
||||
) -> CandidateList:
|
||||
"""Find macros by their name.
|
||||
"""
|
||||
"""Find macros by their name."""
|
||||
# avoid an import cycle
|
||||
from dbt.adapters.factory import get_adapter_package_names
|
||||
|
||||
candidates: CandidateList = CandidateList()
|
||||
packages = set(get_adapter_package_names(self.metadata.adapter_type))
|
||||
for unique_id, macro in self.macros.items():
|
||||
@@ -555,8 +566,8 @@ class ManifestStateCheck(dbtClassMixin):
|
||||
|
||||
@dataclass
|
||||
class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
"""The manifest for the full graph, after parsing and during compilation.
|
||||
"""
|
||||
"""The manifest for the full graph, after parsing and during compilation."""
|
||||
|
||||
# These attributes are both positional and by keyword. If an attribute
|
||||
# is added it must all be added in the __reduce_ex__ method in the
|
||||
# args tuple in the right position.
|
||||
@@ -576,27 +587,27 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
env_vars: MutableMapping[str, str] = field(default_factory=dict)
|
||||
|
||||
_doc_lookup: Optional[DocLookup] = field(
|
||||
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_source_lookup: Optional[SourceLookup] = field(
|
||||
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_ref_lookup: Optional[RefableLookup] = field(
|
||||
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_disabled_lookup: Optional[DisabledLookup] = field(
|
||||
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_analysis_lookup: Optional[AnalysisLookup] = field(
|
||||
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_parsing_info: ParsingInfo = field(
|
||||
default_factory=ParsingInfo,
|
||||
metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
metadata={"serialize": lambda x: None, "deserialize": lambda x: None},
|
||||
)
|
||||
_lock: Lock = field(
|
||||
default_factory=flags.MP_CONTEXT.Lock,
|
||||
metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
|
||||
metadata={"serialize": lambda x: None, "deserialize": lambda x: None},
|
||||
)
|
||||
|
||||
def __pre_serialize__(self):
|
||||
@@ -610,9 +621,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
obj._lock = flags.MP_CONTEXT.Lock()
|
||||
return obj
|
||||
|
||||
def sync_update_node(
|
||||
self, new_node: NonSourceCompiledNode
|
||||
) -> NonSourceCompiledNode:
|
||||
def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiledNode:
|
||||
"""update the node with a lock. The only time we should want to lock is
|
||||
when compiling an ephemeral ancestor of a node at runtime, because
|
||||
multiple threads could be just-in-time compiling the same ephemeral
|
||||
@@ -624,7 +633,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
"""
|
||||
with self._lock:
|
||||
existing = self.nodes[new_node.unique_id]
|
||||
if getattr(existing, 'compiled', False):
|
||||
if getattr(existing, "compiled", False):
|
||||
# already compiled -> must be a NonSourceCompiledNode
|
||||
return cast(NonSourceCompiledNode, existing)
|
||||
_update_into(self.nodes, new_node)
|
||||
@@ -649,22 +658,10 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
manifest!
|
||||
"""
|
||||
self.flat_graph = {
|
||||
'exposures': {
|
||||
k: v.to_dict(omit_none=False)
|
||||
for k, v in self.exposures.items()
|
||||
},
|
||||
'metrics': {
|
||||
k: v.to_dict(omit_none=False)
|
||||
for k, v in self.metrics.items()
|
||||
},
|
||||
'nodes': {
|
||||
k: v.to_dict(omit_none=False)
|
||||
for k, v in self.nodes.items()
|
||||
},
|
||||
'sources': {
|
||||
k: v.to_dict(omit_none=False)
|
||||
for k, v in self.sources.items()
|
||||
}
|
||||
"exposures": {k: v.to_dict(omit_none=False) for k, v in self.exposures.items()},
|
||||
"metrics": {k: v.to_dict(omit_none=False) for k, v in self.metrics.items()},
|
||||
"nodes": {k: v.to_dict(omit_none=False) for k, v in self.nodes.items()},
|
||||
"sources": {k: v.to_dict(omit_none=False) for k, v in self.sources.items()},
|
||||
}
|
||||
|
||||
def build_disabled_by_file_id(self):
|
||||
@@ -675,7 +672,8 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
return disabled_by_file_id
|
||||
|
||||
def _materialization_candidates_for(
|
||||
self, project_name: str,
|
||||
self,
|
||||
project_name: str,
|
||||
materialization_name: str,
|
||||
adapter_type: Optional[str],
|
||||
) -> CandidateList:
|
||||
@@ -698,13 +696,16 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
def find_materialization_macro_by_name(
|
||||
self, project_name: str, materialization_name: str, adapter_type: str
|
||||
) -> Optional[ParsedMacro]:
|
||||
candidates: CandidateList = CandidateList(chain.from_iterable(
|
||||
self._materialization_candidates_for(
|
||||
project_name=project_name,
|
||||
materialization_name=materialization_name,
|
||||
adapter_type=atype,
|
||||
) for atype in (adapter_type, None)
|
||||
))
|
||||
candidates: CandidateList = CandidateList(
|
||||
chain.from_iterable(
|
||||
self._materialization_candidates_for(
|
||||
project_name=project_name,
|
||||
materialization_name=materialization_name,
|
||||
adapter_type=atype,
|
||||
)
|
||||
for atype in (adapter_type, None)
|
||||
)
|
||||
)
|
||||
return candidates.last()
|
||||
|
||||
def get_resource_fqns(self) -> Mapping[str, PathSet]:
|
||||
@@ -713,7 +714,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
self.exposures.values(),
|
||||
self.nodes.values(),
|
||||
self.sources.values(),
|
||||
self.metrics.values()
|
||||
self.metrics.values(),
|
||||
)
|
||||
for resource in all_resources:
|
||||
resource_type_plural = resource.resource_type.pluralize()
|
||||
@@ -723,17 +724,16 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
return resource_fqns
|
||||
|
||||
def get_used_schemas(self, resource_types=None):
|
||||
return frozenset({
|
||||
(node.database, node.schema) for node in
|
||||
chain(self.nodes.values(), self.sources.values())
|
||||
if not resource_types or node.resource_type in resource_types
|
||||
})
|
||||
return frozenset(
|
||||
{
|
||||
(node.database, node.schema)
|
||||
for node in chain(self.nodes.values(), self.sources.values())
|
||||
if not resource_types or node.resource_type in resource_types
|
||||
}
|
||||
)
|
||||
|
||||
def get_used_databases(self):
|
||||
return frozenset(
|
||||
x.database for x in
|
||||
chain(self.nodes.values(), self.sources.values())
|
||||
)
|
||||
return frozenset(x.database for x in chain(self.nodes.values(), self.sources.values()))
|
||||
|
||||
def deepcopy(self):
|
||||
return Manifest(
|
||||
@@ -751,21 +751,25 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
)
|
||||
|
||||
def build_parent_and_child_maps(self):
|
||||
edge_members = list(chain(
|
||||
self.nodes.values(),
|
||||
self.sources.values(),
|
||||
self.exposures.values(),
|
||||
self.metrics.values(),
|
||||
))
|
||||
edge_members = list(
|
||||
chain(
|
||||
self.nodes.values(),
|
||||
self.sources.values(),
|
||||
self.exposures.values(),
|
||||
self.metrics.values(),
|
||||
)
|
||||
)
|
||||
forward_edges, backward_edges = build_node_edges(edge_members)
|
||||
self.child_map = forward_edges
|
||||
self.parent_map = backward_edges
|
||||
|
||||
def build_macro_child_map(self):
|
||||
edge_members = list(chain(
|
||||
self.nodes.values(),
|
||||
self.macros.values(),
|
||||
))
|
||||
edge_members = list(
|
||||
chain(
|
||||
self.nodes.values(),
|
||||
self.macros.values(),
|
||||
)
|
||||
)
|
||||
forward_edges = build_macro_edges(edge_members)
|
||||
return forward_edges
|
||||
|
||||
@@ -802,7 +806,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
else:
|
||||
# something terrible has happened
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Expected node {} not found in manifest'.format(unique_id)
|
||||
"Expected node {} not found in manifest".format(unique_id)
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -860,9 +864,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
node: Optional[ManifestNode] = None
|
||||
disabled: Optional[List[ManifestNode]] = None
|
||||
|
||||
candidates = _search_packages(
|
||||
current_project, node_package, target_model_package
|
||||
)
|
||||
candidates = _search_packages(current_project, node_package, target_model_package)
|
||||
for pkg in candidates:
|
||||
node = self.ref_lookup.find(target_model_name, pkg, self)
|
||||
|
||||
@@ -871,9 +873,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
|
||||
# it's possible that the node is disabled
|
||||
if disabled is None:
|
||||
disabled = self.disabled_lookup.find(
|
||||
target_model_name, pkg
|
||||
)
|
||||
disabled = self.disabled_lookup.find(target_model_name, pkg)
|
||||
|
||||
if disabled:
|
||||
return Disabled(disabled[0])
|
||||
@@ -886,9 +886,9 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
target_source_name: str,
|
||||
target_table_name: str,
|
||||
current_project: str,
|
||||
node_package: str
|
||||
node_package: str,
|
||||
) -> MaybeParsedSource:
|
||||
search_name = f'{target_source_name}.{target_table_name}'
|
||||
search_name = f"{target_source_name}.{target_table_name}"
|
||||
candidates = _search_packages(current_project, node_package)
|
||||
|
||||
source: Optional[ParsedSourceDefinition] = None
|
||||
@@ -901,7 +901,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
|
||||
if disabled is None:
|
||||
disabled = self.disabled_lookup.find(
|
||||
f'{target_source_name}.{target_table_name}', pkg
|
||||
f"{target_source_name}.{target_table_name}", pkg
|
||||
)
|
||||
|
||||
if disabled:
|
||||
@@ -920,9 +920,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
resolve_ref except the is_enabled checks are unnecessary as docs are
|
||||
always enabled.
|
||||
"""
|
||||
candidates = _search_packages(
|
||||
current_project, node_package, package
|
||||
)
|
||||
candidates = _search_packages(current_project, node_package, package)
|
||||
|
||||
for pkg in candidates:
|
||||
result = self.doc_lookup.find(name, pkg, self)
|
||||
@@ -934,7 +932,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
def merge_from_artifact(
|
||||
self,
|
||||
adapter,
|
||||
other: 'WritableManifest',
|
||||
other: "WritableManifest",
|
||||
selected: AbstractSet[UniqueID],
|
||||
) -> None:
|
||||
"""Given the selected unique IDs and a writable manifest, update this
|
||||
@@ -947,12 +945,10 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
for unique_id, node in other.nodes.items():
|
||||
current = self.nodes.get(unique_id)
|
||||
if current and (
|
||||
node.resource_type in refables and
|
||||
not node.is_ephemeral and
|
||||
unique_id not in selected and
|
||||
not adapter.get_relation(
|
||||
current.database, current.schema, current.identifier
|
||||
)
|
||||
node.resource_type in refables
|
||||
and not node.is_ephemeral
|
||||
and unique_id not in selected
|
||||
and not adapter.get_relation(current.database, current.schema, current.identifier)
|
||||
):
|
||||
merged.add(unique_id)
|
||||
self.nodes[unique_id] = node.replace(deferred=True)
|
||||
@@ -971,7 +967,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
# note that the line wrap eats newlines, so if you want newlines,
|
||||
# this is the result :(
|
||||
msg = line_wrap_message(
|
||||
f'''\
|
||||
f"""\
|
||||
dbt found two macros named "{macro.name}" in the project
|
||||
"{macro.package_name}".
|
||||
|
||||
@@ -982,8 +978,8 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
- {macro.original_file_path}
|
||||
|
||||
- {other_path}
|
||||
''',
|
||||
subtract=2
|
||||
""",
|
||||
subtract=2,
|
||||
)
|
||||
raise_compiler_error(msg)
|
||||
|
||||
@@ -999,9 +995,7 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
|
||||
my_checksum = self.files[key].checksum
|
||||
return my_checksum == source_file.checksum
|
||||
|
||||
def add_source(
|
||||
self, source_file: SchemaSourceFile, source: UnpatchedSourceDefinition
|
||||
):
|
||||
def add_source(self, source_file: SchemaSourceFile, source: UnpatchedSourceDefinition):
|
||||
# sources can't be overwritten!
|
||||
_check_duplicates(source, self.sources)
|
||||
self.sources[source.unique_id] = source # type: ignore
|
||||
@@ -1097,75 +1091,64 @@ AnyManifest = Union[Manifest, MacroManifest]
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('manifest', 4)
|
||||
@schema_version("manifest", 4)
|
||||
class WritableManifest(ArtifactMixin):
|
||||
nodes: Mapping[UniqueID, ManifestNode] = field(
|
||||
metadata=dict(description=(
|
||||
'The nodes defined in the dbt project and its dependencies'
|
||||
))
|
||||
metadata=dict(description=("The nodes defined in the dbt project and its dependencies"))
|
||||
)
|
||||
sources: Mapping[UniqueID, ParsedSourceDefinition] = field(
|
||||
metadata=dict(description=(
|
||||
'The sources defined in the dbt project and its dependencies'
|
||||
))
|
||||
metadata=dict(description=("The sources defined in the dbt project and its dependencies"))
|
||||
)
|
||||
macros: Mapping[UniqueID, ParsedMacro] = field(
|
||||
metadata=dict(description=(
|
||||
'The macros defined in the dbt project and its dependencies'
|
||||
))
|
||||
metadata=dict(description=("The macros defined in the dbt project and its dependencies"))
|
||||
)
|
||||
docs: Mapping[UniqueID, ParsedDocumentation] = field(
|
||||
metadata=dict(description=(
|
||||
'The docs defined in the dbt project and its dependencies'
|
||||
))
|
||||
metadata=dict(description=("The docs defined in the dbt project and its dependencies"))
|
||||
)
|
||||
exposures: Mapping[UniqueID, ParsedExposure] = field(
|
||||
metadata=dict(description=(
|
||||
'The exposures defined in the dbt project and its dependencies'
|
||||
))
|
||||
metadata=dict(
|
||||
description=("The exposures defined in the dbt project and its dependencies")
|
||||
)
|
||||
)
|
||||
metrics: Mapping[UniqueID, ParsedMetric] = field(
|
||||
metadata=dict(description=(
|
||||
'The metrics defined in the dbt project and its dependencies'
|
||||
))
|
||||
metadata=dict(description=("The metrics defined in the dbt project and its dependencies"))
|
||||
)
|
||||
selectors: Mapping[UniqueID, Any] = field(
|
||||
metadata=dict(description=(
|
||||
'The selectors defined in selectors.yml'
|
||||
))
|
||||
metadata=dict(description=("The selectors defined in selectors.yml"))
|
||||
)
|
||||
disabled: Optional[Mapping[UniqueID, List[CompileResultNode]]] = field(
|
||||
metadata=dict(description="A mapping of the disabled nodes in the target")
|
||||
)
|
||||
parent_map: Optional[NodeEdgeMap] = field(
|
||||
metadata=dict(
|
||||
description="A mapping from child nodes to their dependencies",
|
||||
)
|
||||
)
|
||||
child_map: Optional[NodeEdgeMap] = field(
|
||||
metadata=dict(
|
||||
description="A mapping from parent nodes to their dependents",
|
||||
)
|
||||
)
|
||||
metadata: ManifestMetadata = field(
|
||||
metadata=dict(
|
||||
description="Metadata about the manifest",
|
||||
)
|
||||
)
|
||||
disabled: Optional[Mapping[UniqueID, List[CompileResultNode]]] = field(metadata=dict(
|
||||
description='A mapping of the disabled nodes in the target'
|
||||
))
|
||||
parent_map: Optional[NodeEdgeMap] = field(metadata=dict(
|
||||
description='A mapping from child nodes to their dependencies',
|
||||
))
|
||||
child_map: Optional[NodeEdgeMap] = field(metadata=dict(
|
||||
description='A mapping from parent nodes to their dependents',
|
||||
))
|
||||
metadata: ManifestMetadata = field(metadata=dict(
|
||||
description='Metadata about the manifest',
|
||||
))
|
||||
|
||||
|
||||
def _check_duplicates(
|
||||
value: HasUniqueID, src: Mapping[str, HasUniqueID]
|
||||
):
|
||||
def _check_duplicates(value: HasUniqueID, src: Mapping[str, HasUniqueID]):
|
||||
if value.unique_id in src:
|
||||
raise_duplicate_resource_name(value, src[value.unique_id])
|
||||
|
||||
|
||||
K_T = TypeVar('K_T')
|
||||
V_T = TypeVar('V_T')
|
||||
K_T = TypeVar("K_T")
|
||||
V_T = TypeVar("V_T")
|
||||
|
||||
|
||||
def _expect_value(
|
||||
key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str
|
||||
) -> V_T:
|
||||
def _expect_value(key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str) -> V_T:
|
||||
if key not in src:
|
||||
raise CompilationException(
|
||||
'Expected to find "{}" in cached "result.{}" based '
|
||||
'on cached file information: {}!'
|
||||
.format(key, name, old_file)
|
||||
"on cached file information: {}!".format(key, name, old_file)
|
||||
)
|
||||
return src[key]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from dataclasses import field, Field, dataclass
|
||||
from enum import Enum
|
||||
from itertools import chain
|
||||
from typing import (
|
||||
Any, List, Optional, Dict, Union, Type, TypeVar, Callable
|
||||
)
|
||||
from typing import Any, List, Optional, Dict, Union, Type, TypeVar, Callable
|
||||
from dbt.dataclass_schema import (
|
||||
dbtClassMixin, ValidationError, register_pattern,
|
||||
dbtClassMixin,
|
||||
ValidationError,
|
||||
register_pattern,
|
||||
)
|
||||
from dbt.contracts.graph.unparsed import AdditionalPropertiesAllowed
|
||||
from dbt.exceptions import InternalException, CompilationException
|
||||
@@ -14,7 +14,7 @@ from dbt import hooks
|
||||
from dbt.node_types import NodeType
|
||||
|
||||
|
||||
M = TypeVar('M', bound='Metadata')
|
||||
M = TypeVar("M", bound="Metadata")
|
||||
|
||||
|
||||
def _get_meta_value(cls: Type[M], fld: Field, key: str, default: Any) -> M:
|
||||
@@ -29,14 +29,10 @@ def _get_meta_value(cls: Type[M], fld: Field, key: str, default: Any) -> M:
|
||||
try:
|
||||
return cls(value)
|
||||
except ValueError as exc:
|
||||
raise InternalException(
|
||||
f'Invalid {cls} value: {value}'
|
||||
) from exc
|
||||
raise InternalException(f"Invalid {cls} value: {value}") from exc
|
||||
|
||||
|
||||
def _set_meta_value(
|
||||
obj: M, key: str, existing: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
def _set_meta_value(obj: M, key: str, existing: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
if existing is None:
|
||||
result = {}
|
||||
else:
|
||||
@@ -53,19 +49,17 @@ class Metadata(Enum):
|
||||
|
||||
return _get_meta_value(cls, fld, key, default)
|
||||
|
||||
def meta(
|
||||
self, existing: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
def meta(self, existing: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
key = self.metadata_key()
|
||||
return _set_meta_value(self, key, existing)
|
||||
|
||||
@classmethod
|
||||
def default_field(cls) -> 'Metadata':
|
||||
raise NotImplementedError('Not implemented')
|
||||
def default_field(cls) -> "Metadata":
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
@classmethod
|
||||
def metadata_key(cls) -> str:
|
||||
raise NotImplementedError('Not implemented')
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
|
||||
class MergeBehavior(Metadata):
|
||||
@@ -74,12 +68,12 @@ class MergeBehavior(Metadata):
|
||||
Clobber = 3
|
||||
|
||||
@classmethod
|
||||
def default_field(cls) -> 'MergeBehavior':
|
||||
def default_field(cls) -> "MergeBehavior":
|
||||
return cls.Clobber
|
||||
|
||||
@classmethod
|
||||
def metadata_key(cls) -> str:
|
||||
return 'merge'
|
||||
return "merge"
|
||||
|
||||
|
||||
class ShowBehavior(Metadata):
|
||||
@@ -87,12 +81,12 @@ class ShowBehavior(Metadata):
|
||||
Hide = 2
|
||||
|
||||
@classmethod
|
||||
def default_field(cls) -> 'ShowBehavior':
|
||||
def default_field(cls) -> "ShowBehavior":
|
||||
return cls.Show
|
||||
|
||||
@classmethod
|
||||
def metadata_key(cls) -> str:
|
||||
return 'show_hide'
|
||||
return "show_hide"
|
||||
|
||||
@classmethod
|
||||
def should_show(cls, fld: Field) -> bool:
|
||||
@@ -104,12 +98,12 @@ class CompareBehavior(Metadata):
|
||||
Exclude = 2
|
||||
|
||||
@classmethod
|
||||
def default_field(cls) -> 'CompareBehavior':
|
||||
def default_field(cls) -> "CompareBehavior":
|
||||
return cls.Include
|
||||
|
||||
@classmethod
|
||||
def metadata_key(cls) -> str:
|
||||
return 'compare'
|
||||
return "compare"
|
||||
|
||||
@classmethod
|
||||
def should_include(cls, fld: Field) -> bool:
|
||||
@@ -141,32 +135,28 @@ def _merge_field_value(
|
||||
return _listify(self_value) + _listify(other_value)
|
||||
elif merge_behavior == MergeBehavior.Update:
|
||||
if not isinstance(self_value, dict):
|
||||
raise InternalException(f'expected dict, got {self_value}')
|
||||
raise InternalException(f"expected dict, got {self_value}")
|
||||
if not isinstance(other_value, dict):
|
||||
raise InternalException(f'expected dict, got {other_value}')
|
||||
raise InternalException(f"expected dict, got {other_value}")
|
||||
value = self_value.copy()
|
||||
value.update(other_value)
|
||||
return value
|
||||
else:
|
||||
raise InternalException(
|
||||
f'Got an invalid merge_behavior: {merge_behavior}'
|
||||
)
|
||||
raise InternalException(f"Got an invalid merge_behavior: {merge_behavior}")
|
||||
|
||||
|
||||
def insensitive_patterns(*patterns: str):
|
||||
lowercased = []
|
||||
for pattern in patterns:
|
||||
lowercased.append(
|
||||
''.join('[{}{}]'.format(s.upper(), s.lower()) for s in pattern)
|
||||
)
|
||||
return '^({})$'.format('|'.join(lowercased))
|
||||
lowercased.append("".join("[{}{}]".format(s.upper(), s.lower()) for s in pattern))
|
||||
return "^({})$".format("|".join(lowercased))
|
||||
|
||||
|
||||
class Severity(str):
|
||||
pass
|
||||
|
||||
|
||||
register_pattern(Severity, insensitive_patterns('warn', 'error'))
|
||||
register_pattern(Severity, insensitive_patterns("warn", "error"))
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -176,13 +166,11 @@ class Hook(dbtClassMixin, Replaceable):
|
||||
index: Optional[int] = None
|
||||
|
||||
|
||||
T = TypeVar('T', bound='BaseConfig')
|
||||
T = TypeVar("T", bound="BaseConfig")
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseConfig(
|
||||
AdditionalPropertiesAllowed, Replaceable
|
||||
):
|
||||
class BaseConfig(AdditionalPropertiesAllowed, Replaceable):
|
||||
|
||||
# enable syntax like: config['key']
|
||||
def __getitem__(self, key):
|
||||
@@ -207,8 +195,7 @@ class BaseConfig(
|
||||
def __delitem__(self, key):
|
||||
if hasattr(self, key):
|
||||
msg = (
|
||||
'Error, tried to delete config key "{}": Cannot delete '
|
||||
'built-in keys'
|
||||
'Error, tried to delete config key "{}": Cannot delete ' "built-in keys"
|
||||
).format(key)
|
||||
raise CompilationException(msg)
|
||||
else:
|
||||
@@ -248,9 +235,7 @@ class BaseConfig(
|
||||
return unrendered[key] == other[key]
|
||||
|
||||
@classmethod
|
||||
def same_contents(
|
||||
cls, unrendered: Dict[str, Any], other: Dict[str, Any]
|
||||
) -> bool:
|
||||
def same_contents(cls, unrendered: Dict[str, Any], other: Dict[str, Any]) -> bool:
|
||||
"""This is like __eq__, except it ignores some fields."""
|
||||
seen = set()
|
||||
for fld, target_name in cls._get_fields():
|
||||
@@ -270,14 +255,12 @@ class BaseConfig(
|
||||
# This is used in 'add_config_call' to created the combined config_call_dict.
|
||||
# 'meta' moved here from node
|
||||
mergebehavior = {
|
||||
"append": ['pre-hook', 'pre_hook', 'post-hook', 'post_hook', 'tags'],
|
||||
"update": ['quoting', 'column_types', 'meta'],
|
||||
"append": ["pre-hook", "pre_hook", "post-hook", "post_hook", "tags"],
|
||||
"update": ["quoting", "column_types", "meta"],
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _merge_dicts(
|
||||
cls, src: Dict[str, Any], data: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
def _merge_dicts(cls, src: Dict[str, Any], data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Find all the items in data that match a target_field on this class,
|
||||
and merge them with the data found in `src` for target_field, using the
|
||||
field's specified merge behavior. Matching items will be removed from
|
||||
@@ -309,14 +292,13 @@ class BaseConfig(
|
||||
)
|
||||
return result
|
||||
|
||||
def update_from(
|
||||
self: T, data: Dict[str, Any], adapter_type: str, validate: bool = True
|
||||
) -> T:
|
||||
def update_from(self: T, data: Dict[str, Any], adapter_type: str, validate: bool = True) -> T:
|
||||
"""Given a dict of keys, update the current config from them, validate
|
||||
it, and return a new config with the updated values
|
||||
"""
|
||||
# sadly, this is a circular import
|
||||
from dbt.adapters.factory import get_config_class_by_name
|
||||
|
||||
dct = self.to_dict(omit_none=False)
|
||||
|
||||
adapter_config_cls = get_config_class_by_name(adapter_type)
|
||||
@@ -374,9 +356,7 @@ class NodeAndTestConfig(BaseConfig):
|
||||
)
|
||||
tags: Union[List[str], str] = field(
|
||||
default_factory=list_str,
|
||||
metadata=metas(ShowBehavior.Hide,
|
||||
MergeBehavior.Append,
|
||||
CompareBehavior.Exclude),
|
||||
metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude),
|
||||
)
|
||||
meta: Dict[str, Any] = field(
|
||||
default_factory=dict,
|
||||
@@ -388,7 +368,7 @@ class NodeAndTestConfig(BaseConfig):
|
||||
class NodeConfig(NodeAndTestConfig):
|
||||
# Note: if any new fields are added with MergeBehavior, also update the
|
||||
# 'mergebehavior' dictionary
|
||||
materialized: str = 'view'
|
||||
materialized: str = "view"
|
||||
persist_docs: Dict[str, Any] = field(default_factory=dict)
|
||||
post_hook: List[Hook] = field(
|
||||
default_factory=list,
|
||||
@@ -409,12 +389,15 @@ class NodeConfig(NodeAndTestConfig):
|
||||
metadata=MergeBehavior.Update.meta(),
|
||||
)
|
||||
full_refresh: Optional[bool] = None
|
||||
on_schema_change: Optional[str] = 'ignore'
|
||||
# 'unique_key' doesn't use 'Optional' because typing.get_type_hints was
|
||||
# sometimes getting the Union order wrong, causing serialization failures.
|
||||
unique_key: Union[str, List[str], None] = None
|
||||
on_schema_change: Optional[str] = "ignore"
|
||||
|
||||
@classmethod
|
||||
def __pre_deserialize__(cls, data):
|
||||
data = super().__pre_deserialize__(data)
|
||||
field_map = {'post-hook': 'post_hook', 'pre-hook': 'pre_hook'}
|
||||
field_map = {"post-hook": "post_hook", "pre-hook": "pre_hook"}
|
||||
# create a new dict because otherwise it gets overwritten in
|
||||
# tests
|
||||
new_dict = {}
|
||||
@@ -432,7 +415,7 @@ class NodeConfig(NodeAndTestConfig):
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
dct = super().__post_serialize__(dct)
|
||||
field_map = {'post_hook': 'post-hook', 'pre_hook': 'pre-hook'}
|
||||
field_map = {"post_hook": "post-hook", "pre_hook": "pre-hook"}
|
||||
for field_name in field_map:
|
||||
if field_name in dct:
|
||||
dct[field_map[field_name]] = dct.pop(field_name)
|
||||
@@ -441,12 +424,12 @@ class NodeConfig(NodeAndTestConfig):
|
||||
# this is still used by jsonschema validation
|
||||
@classmethod
|
||||
def field_mapping(cls):
|
||||
return {'post_hook': 'post-hook', 'pre_hook': 'pre-hook'}
|
||||
return {"post_hook": "post-hook", "pre_hook": "pre-hook"}
|
||||
|
||||
|
||||
@dataclass
|
||||
class SeedConfig(NodeConfig):
|
||||
materialized: str = 'seed'
|
||||
materialized: str = "seed"
|
||||
quote_columns: Optional[bool] = None
|
||||
|
||||
|
||||
@@ -454,31 +437,29 @@ class SeedConfig(NodeConfig):
|
||||
class TestConfig(NodeAndTestConfig):
|
||||
# this is repeated because of a different default
|
||||
schema: Optional[str] = field(
|
||||
default='dbt_test__audit',
|
||||
default="dbt_test__audit",
|
||||
metadata=CompareBehavior.Exclude.meta(),
|
||||
)
|
||||
materialized: str = 'test'
|
||||
severity: Severity = Severity('ERROR')
|
||||
materialized: str = "test"
|
||||
severity: Severity = Severity("ERROR")
|
||||
store_failures: Optional[bool] = None
|
||||
where: Optional[str] = None
|
||||
limit: Optional[int] = None
|
||||
fail_calc: str = 'count(*)'
|
||||
warn_if: str = '!= 0'
|
||||
error_if: str = '!= 0'
|
||||
fail_calc: str = "count(*)"
|
||||
warn_if: str = "!= 0"
|
||||
error_if: str = "!= 0"
|
||||
|
||||
@classmethod
|
||||
def same_contents(
|
||||
cls, unrendered: Dict[str, Any], other: Dict[str, Any]
|
||||
) -> bool:
|
||||
def same_contents(cls, unrendered: Dict[str, Any], other: Dict[str, Any]) -> bool:
|
||||
"""This is like __eq__, except it explicitly checks certain fields."""
|
||||
modifiers = [
|
||||
'severity',
|
||||
'where',
|
||||
'limit',
|
||||
'fail_calc',
|
||||
'warn_if',
|
||||
'error_if',
|
||||
'store_failures'
|
||||
"severity",
|
||||
"where",
|
||||
"limit",
|
||||
"fail_calc",
|
||||
"warn_if",
|
||||
"error_if",
|
||||
"store_failures",
|
||||
]
|
||||
|
||||
seen = set()
|
||||
@@ -493,7 +474,8 @@ class TestConfig(NodeAndTestConfig):
|
||||
|
||||
@dataclass
|
||||
class EmptySnapshotConfig(NodeConfig):
|
||||
materialized: str = 'snapshot'
|
||||
materialized: str = "snapshot"
|
||||
unique_key: Optional[str] = None # override NodeConfig unique_key definition
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -503,35 +485,37 @@ class SnapshotConfig(EmptySnapshotConfig):
|
||||
target_schema: Optional[str] = None
|
||||
target_database: Optional[str] = None
|
||||
updated_at: Optional[str] = None
|
||||
check_cols: Optional[Union[str, List[str]]] = None
|
||||
# Not using Optional because of serialization issues with a Union of str and List[str]
|
||||
check_cols: Union[str, List[str], None] = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls, data):
|
||||
super().validate(data)
|
||||
if not data.get('strategy') or not data.get('unique_key') or not \
|
||||
data.get('target_schema'):
|
||||
if not data.get("strategy") or not data.get("unique_key") or not data.get("target_schema"):
|
||||
raise ValidationError(
|
||||
"Snapshots must be configured with a 'strategy', 'unique_key', "
|
||||
"and 'target_schema'.")
|
||||
if data.get('strategy') == 'check':
|
||||
if not data.get('check_cols'):
|
||||
"and 'target_schema'."
|
||||
)
|
||||
if data.get("strategy") == "check":
|
||||
if not data.get("check_cols"):
|
||||
raise ValidationError(
|
||||
"A snapshot configured with the check strategy must "
|
||||
"specify a check_cols configuration.")
|
||||
if (isinstance(data['check_cols'], str) and
|
||||
data['check_cols'] != 'all'):
|
||||
"specify a check_cols configuration."
|
||||
)
|
||||
if isinstance(data["check_cols"], str) and data["check_cols"] != "all":
|
||||
raise ValidationError(
|
||||
f"Invalid value for 'check_cols': {data['check_cols']}. "
|
||||
"Expected 'all' or a list of strings.")
|
||||
"Expected 'all' or a list of strings."
|
||||
)
|
||||
|
||||
elif data.get('strategy') == 'timestamp':
|
||||
if not data.get('updated_at'):
|
||||
elif data.get("strategy") == "timestamp":
|
||||
if not data.get("updated_at"):
|
||||
raise ValidationError(
|
||||
"A snapshot configured with the timestamp strategy "
|
||||
"must specify an updated_at configuration.")
|
||||
if data.get('check_cols'):
|
||||
raise ValidationError(
|
||||
"A 'timestamp' snapshot should not have 'check_cols'")
|
||||
"must specify an updated_at configuration."
|
||||
)
|
||||
if data.get("check_cols"):
|
||||
raise ValidationError("A 'timestamp' snapshot should not have 'check_cols'")
|
||||
# If the strategy is not 'check' or 'timestamp' it's a custom strategy,
|
||||
# formerly supported with GenericSnapshotConfig
|
||||
|
||||
@@ -553,9 +537,7 @@ RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {
|
||||
# base resource types are like resource types, except nothing has mandatory
|
||||
# configs.
|
||||
BASE_RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = RESOURCE_TYPES.copy()
|
||||
BASE_RESOURCE_TYPES.update({
|
||||
NodeType.Snapshot: EmptySnapshotConfig
|
||||
})
|
||||
BASE_RESOURCE_TYPES.update({NodeType.Snapshot: EmptySnapshotConfig})
|
||||
|
||||
|
||||
def get_config_for(resource_type: NodeType, base=False) -> Type[BaseConfig]:
|
||||
|
||||
@@ -15,18 +15,28 @@ from typing import (
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
from dbt.dataclass_schema import (
|
||||
dbtClassMixin, ExtensibleDbtClassMixin
|
||||
)
|
||||
from dbt.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin
|
||||
|
||||
from dbt.clients.system import write_file
|
||||
from dbt.contracts.files import FileHash, MAXIMUM_SEED_SIZE_NAME
|
||||
from dbt.contracts.graph.unparsed import (
|
||||
UnparsedNode, UnparsedDocumentation, Quoting, Docs,
|
||||
UnparsedBaseNode, FreshnessThreshold, ExternalTable,
|
||||
HasYamlMetadata, MacroArgument, UnparsedSourceDefinition,
|
||||
UnparsedSourceTableDefinition, UnparsedColumn, TestDef,
|
||||
ExposureOwner, ExposureType, MaturityType, MetricFilter
|
||||
UnparsedNode,
|
||||
UnparsedDocumentation,
|
||||
Quoting,
|
||||
Docs,
|
||||
UnparsedBaseNode,
|
||||
FreshnessThreshold,
|
||||
ExternalTable,
|
||||
HasYamlMetadata,
|
||||
MacroArgument,
|
||||
UnparsedSourceDefinition,
|
||||
UnparsedSourceTableDefinition,
|
||||
UnparsedColumn,
|
||||
TestDef,
|
||||
ExposureOwner,
|
||||
ExposureType,
|
||||
MaturityType,
|
||||
MetricFilter,
|
||||
)
|
||||
from dbt.contracts.util import Replaceable, AdditionalPropertiesMixin
|
||||
from dbt.exceptions import warn_or_error
|
||||
@@ -45,13 +55,9 @@ from .model_config import (
|
||||
|
||||
|
||||
@dataclass
|
||||
class ColumnInfo(
|
||||
AdditionalPropertiesMixin,
|
||||
ExtensibleDbtClassMixin,
|
||||
Replaceable
|
||||
):
|
||||
class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable):
|
||||
name: str
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
data_type: Optional[str] = None
|
||||
quote: Optional[bool] = None
|
||||
@@ -63,7 +69,7 @@ class ColumnInfo(
|
||||
class HasFqn(dbtClassMixin, Replaceable):
|
||||
fqn: List[str]
|
||||
|
||||
def same_fqn(self, other: 'HasFqn') -> bool:
|
||||
def same_fqn(self, other: "HasFqn") -> bool:
|
||||
return self.fqn == other.fqn
|
||||
|
||||
|
||||
@@ -102,8 +108,8 @@ class HasRelationMetadata(dbtClassMixin, Replaceable):
|
||||
@classmethod
|
||||
def __pre_deserialize__(cls, data):
|
||||
data = super().__pre_deserialize__(data)
|
||||
if 'database' not in data:
|
||||
data['database'] = None
|
||||
if "database" not in data:
|
||||
data["database"] = None
|
||||
return data
|
||||
|
||||
|
||||
@@ -119,21 +125,19 @@ class ParsedNodeMixins(dbtClassMixin):
|
||||
@property
|
||||
def should_store_failures(self):
|
||||
return self.resource_type == NodeType.Test and (
|
||||
self.config.store_failures if self.config.store_failures is not None
|
||||
self.config.store_failures
|
||||
if self.config.store_failures is not None
|
||||
else flags.STORE_FAILURES
|
||||
)
|
||||
|
||||
# will this node map to an object in the database?
|
||||
@property
|
||||
def is_relational(self):
|
||||
return (
|
||||
self.resource_type in NodeType.refable() or
|
||||
self.should_store_failures
|
||||
)
|
||||
return self.resource_type in NodeType.refable() or self.should_store_failures
|
||||
|
||||
@property
|
||||
def is_ephemeral(self):
|
||||
return self.config.materialized == 'ephemeral'
|
||||
return self.config.materialized == "ephemeral"
|
||||
|
||||
@property
|
||||
def is_ephemeral_model(self):
|
||||
@@ -143,7 +147,7 @@ class ParsedNodeMixins(dbtClassMixin):
|
||||
def depends_on_nodes(self):
|
||||
return self.depends_on.nodes
|
||||
|
||||
def patch(self, patch: 'ParsedNodePatch'):
|
||||
def patch(self, patch: "ParsedNodePatch"):
|
||||
"""Given a ParsedNodePatch, add the new information to the node."""
|
||||
# explicitly pick out the parts to update so we don't inadvertently
|
||||
# step on the model name or anything
|
||||
@@ -153,7 +157,6 @@ class ParsedNodeMixins(dbtClassMixin):
|
||||
self.created_at = time.time()
|
||||
self.description = patch.description
|
||||
self.columns = patch.columns
|
||||
self.meta = patch.meta
|
||||
self.docs = patch.docs
|
||||
|
||||
def get_materialization(self):
|
||||
@@ -161,13 +164,7 @@ class ParsedNodeMixins(dbtClassMixin):
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedNodeMandatory(
|
||||
UnparsedNode,
|
||||
HasUniqueID,
|
||||
HasFqn,
|
||||
HasRelationMetadata,
|
||||
Replaceable
|
||||
):
|
||||
class ParsedNodeMandatory(UnparsedNode, HasUniqueID, HasFqn, HasRelationMetadata, Replaceable):
|
||||
alias: str
|
||||
checksum: FileHash
|
||||
config: NodeConfig = field(default_factory=NodeConfig)
|
||||
@@ -178,12 +175,31 @@ class ParsedNodeMandatory(
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedNodeDefaults(ParsedNodeMandatory):
|
||||
class NodeInfoMixin:
|
||||
_event_status: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def node_info(self):
|
||||
node_info = {
|
||||
"node_path": getattr(self, "path", None),
|
||||
"node_name": getattr(self, "name", None),
|
||||
"unique_id": getattr(self, "unique_id", None),
|
||||
"resource_type": str(getattr(self, "resource_type", "")),
|
||||
"materialized": self.config.get("materialized"),
|
||||
"node_status": str(self._event_status.get("node_status")),
|
||||
"node_started_at": self._event_status.get("started_at"),
|
||||
"node_finished_at": self._event_status.get("finished_at"),
|
||||
}
|
||||
return node_info
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory):
|
||||
tags: List[str] = field(default_factory=list)
|
||||
refs: List[List[str]] = field(default_factory=list)
|
||||
sources: List[List[str]] = field(default_factory=list)
|
||||
depends_on: DependsOn = field(default_factory=DependsOn)
|
||||
description: str = field(default='')
|
||||
description: str = field(default="")
|
||||
columns: Dict[str, ColumnInfo] = field(default_factory=dict)
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
docs: Docs = field(default_factory=Docs)
|
||||
@@ -194,38 +210,33 @@ class ParsedNodeDefaults(ParsedNodeMandatory):
|
||||
unrendered_config: Dict[str, Any] = field(default_factory=dict)
|
||||
created_at: float = field(default_factory=lambda: time.time())
|
||||
config_call_dict: Dict[str, Any] = field(default_factory=dict)
|
||||
_event_status: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def write_node(self, target_path: str, subdirectory: str, payload: str):
|
||||
if (os.path.basename(self.path) ==
|
||||
os.path.basename(self.original_file_path)):
|
||||
if os.path.basename(self.path) == os.path.basename(self.original_file_path):
|
||||
# One-to-one relationship of nodes to files.
|
||||
path = self.original_file_path
|
||||
else:
|
||||
# Many-to-one relationship of nodes to files.
|
||||
path = os.path.join(self.original_file_path, self.path)
|
||||
full_path = os.path.join(
|
||||
target_path, subdirectory, self.package_name, path
|
||||
)
|
||||
full_path = os.path.join(target_path, subdirectory, self.package_name, path)
|
||||
|
||||
write_file(full_path, payload)
|
||||
return full_path
|
||||
|
||||
|
||||
T = TypeVar('T', bound='ParsedNode')
|
||||
T = TypeVar("T", bound="ParsedNode")
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
|
||||
|
||||
def _serialize(self):
|
||||
return self.to_dict()
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
if 'config_call_dict' in dct:
|
||||
del dct['config_call_dict']
|
||||
if '_event_status' in dct:
|
||||
del dct['_event_status']
|
||||
if "config_call_dict" in dct:
|
||||
del dct["config_call_dict"]
|
||||
if "_event_status" in dct:
|
||||
del dct["_event_status"]
|
||||
return dct
|
||||
|
||||
@classmethod
|
||||
@@ -233,41 +244,41 @@ class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
|
||||
# The serialized ParsedNodes do not differ from each other
|
||||
# in fields that would allow 'from_dict' to distinguis
|
||||
# between them.
|
||||
resource_type = dct['resource_type']
|
||||
if resource_type == 'model':
|
||||
resource_type = dct["resource_type"]
|
||||
if resource_type == "model":
|
||||
return ParsedModelNode.from_dict(dct)
|
||||
elif resource_type == 'analysis':
|
||||
elif resource_type == "analysis":
|
||||
return ParsedAnalysisNode.from_dict(dct)
|
||||
elif resource_type == 'seed':
|
||||
elif resource_type == "seed":
|
||||
return ParsedSeedNode.from_dict(dct)
|
||||
elif resource_type == 'rpc':
|
||||
elif resource_type == "rpc":
|
||||
return ParsedRPCNode.from_dict(dct)
|
||||
elif resource_type == 'sql':
|
||||
elif resource_type == "sql":
|
||||
return ParsedSqlNode.from_dict(dct)
|
||||
elif resource_type == 'test':
|
||||
if 'test_metadata' in dct:
|
||||
elif resource_type == "test":
|
||||
if "test_metadata" in dct:
|
||||
return ParsedGenericTestNode.from_dict(dct)
|
||||
else:
|
||||
return ParsedSingularTestNode.from_dict(dct)
|
||||
elif resource_type == 'operation':
|
||||
elif resource_type == "operation":
|
||||
return ParsedHookNode.from_dict(dct)
|
||||
elif resource_type == 'seed':
|
||||
elif resource_type == "seed":
|
||||
return ParsedSeedNode.from_dict(dct)
|
||||
elif resource_type == 'snapshot':
|
||||
elif resource_type == "snapshot":
|
||||
return ParsedSnapshotNode.from_dict(dct)
|
||||
else:
|
||||
return cls.from_dict(dct)
|
||||
|
||||
def _persist_column_docs(self) -> bool:
|
||||
if hasattr(self.config, 'persist_docs'):
|
||||
if hasattr(self.config, "persist_docs"):
|
||||
assert isinstance(self.config, NodeConfig)
|
||||
return bool(self.config.persist_docs.get('columns'))
|
||||
return bool(self.config.persist_docs.get("columns"))
|
||||
return False
|
||||
|
||||
def _persist_relation_docs(self) -> bool:
|
||||
if hasattr(self.config, 'persist_docs'):
|
||||
if hasattr(self.config, "persist_docs"):
|
||||
assert isinstance(self.config, NodeConfig)
|
||||
return bool(self.config.persist_docs.get('relation'))
|
||||
return bool(self.config.persist_docs.get("relation"))
|
||||
return False
|
||||
|
||||
def same_body(self: T, other: T) -> bool:
|
||||
@@ -283,12 +294,8 @@ class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
|
||||
|
||||
if self._persist_column_docs():
|
||||
# assert other._persist_column_docs()
|
||||
column_descriptions = {
|
||||
k: v.description for k, v in self.columns.items()
|
||||
}
|
||||
other_column_descriptions = {
|
||||
k: v.description for k, v in other.columns.items()
|
||||
}
|
||||
column_descriptions = {k: v.description for k, v in self.columns.items()}
|
||||
other_column_descriptions = {k: v.description for k, v in other.columns.items()}
|
||||
if column_descriptions != other_column_descriptions:
|
||||
return False
|
||||
|
||||
@@ -299,7 +306,7 @@ class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
|
||||
# compares the configured value, rather than the ultimate value (so
|
||||
# generate_*_name and unset values derived from the target are
|
||||
# ignored)
|
||||
keys = ('database', 'schema', 'alias')
|
||||
keys = ("database", "schema", "alias")
|
||||
for key in keys:
|
||||
mine = self.unrendered_config.get(key)
|
||||
others = other.unrendered_config.get(key)
|
||||
@@ -318,42 +325,40 @@ class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
|
||||
return False
|
||||
|
||||
return (
|
||||
self.same_body(old) and
|
||||
self.same_config(old) and
|
||||
self.same_persisted_description(old) and
|
||||
self.same_fqn(old) and
|
||||
self.same_database_representation(old) and
|
||||
True
|
||||
self.same_body(old)
|
||||
and self.same_config(old)
|
||||
and self.same_persisted_description(old)
|
||||
and self.same_fqn(old)
|
||||
and self.same_database_representation(old)
|
||||
and True
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedAnalysisNode(ParsedNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Analysis]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedHookNode(ParsedNode):
|
||||
resource_type: NodeType = field(
|
||||
metadata={'restrict': [NodeType.Operation]}
|
||||
)
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]})
|
||||
index: Optional[int] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedModelNode(ParsedNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Model]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]})
|
||||
|
||||
|
||||
# TODO: rm?
|
||||
@dataclass
|
||||
class ParsedRPCNode(ParsedNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.RPCCall]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedSqlNode(ParsedNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.SqlOperation]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]})
|
||||
|
||||
|
||||
def same_seeds(first: ParsedNode, second: ParsedNode) -> bool:
|
||||
@@ -363,31 +368,31 @@ def same_seeds(first: ParsedNode, second: ParsedNode) -> bool:
|
||||
# if the current checksum is a path, we want to log a warning.
|
||||
result = first.checksum == second.checksum
|
||||
|
||||
if first.checksum.name == 'path':
|
||||
if first.checksum.name == "path":
|
||||
msg: str
|
||||
if second.checksum.name != 'path':
|
||||
if second.checksum.name != "path":
|
||||
msg = (
|
||||
f'Found a seed ({first.package_name}.{first.name}) '
|
||||
f'>{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was '
|
||||
f'<={MAXIMUM_SEED_SIZE_NAME}, so it has changed'
|
||||
f"Found a seed ({first.package_name}.{first.name}) "
|
||||
f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was "
|
||||
f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed"
|
||||
)
|
||||
elif result:
|
||||
msg = (
|
||||
f'Found a seed ({first.package_name}.{first.name}) '
|
||||
f'>{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt '
|
||||
f'cannot tell if it has changed: assuming they are the same'
|
||||
f"Found a seed ({first.package_name}.{first.name}) "
|
||||
f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt "
|
||||
f"cannot tell if it has changed: assuming they are the same"
|
||||
)
|
||||
elif not result:
|
||||
msg = (
|
||||
f'Found a seed ({first.package_name}.{first.name}) '
|
||||
f'>{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in '
|
||||
f'a different location, assuming it has changed'
|
||||
f"Found a seed ({first.package_name}.{first.name}) "
|
||||
f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in "
|
||||
f"a different location, assuming it has changed"
|
||||
)
|
||||
else:
|
||||
msg = (
|
||||
f'Found a seed ({first.package_name}.{first.name}) '
|
||||
f'>{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a '
|
||||
f'checksum type of {second.checksum.name}, so it has changed'
|
||||
f"Found a seed ({first.package_name}.{first.name}) "
|
||||
f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a "
|
||||
f"checksum type of {second.checksum.name}, so it has changed"
|
||||
)
|
||||
warn_or_error(msg, node=first)
|
||||
|
||||
@@ -397,12 +402,12 @@ def same_seeds(first: ParsedNode, second: ParsedNode) -> bool:
|
||||
@dataclass
|
||||
class ParsedSeedNode(ParsedNode):
|
||||
# keep this in sync with CompiledSeedNode!
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Seed]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]})
|
||||
config: SeedConfig = field(default_factory=SeedConfig)
|
||||
|
||||
@property
|
||||
def empty(self):
|
||||
""" Seeds are never empty"""
|
||||
"""Seeds are never empty"""
|
||||
return False
|
||||
|
||||
def same_body(self: T, other: T) -> bool:
|
||||
@@ -426,16 +431,20 @@ class HasTestMetadata(dbtClassMixin):
|
||||
|
||||
@dataclass
|
||||
class ParsedSingularTestNode(ParsedNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]})
|
||||
# Was not able to make mypy happy and keep the code working. We need to
|
||||
# refactor the various configs.
|
||||
config: TestConfig = field(default_factory=TestConfig) # type: ignore
|
||||
|
||||
@property
|
||||
def test_node_type(self):
|
||||
return "singular"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedGenericTestNode(ParsedNode, HasTestMetadata):
|
||||
# keep this in sync with CompiledGenericTestNode!
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]})
|
||||
column_name: Optional[str] = None
|
||||
file_key_name: Optional[str] = None
|
||||
# Was not able to make mypy happy and keep the code working. We need to
|
||||
@@ -446,11 +455,11 @@ class ParsedGenericTestNode(ParsedNode, HasTestMetadata):
|
||||
if other is None:
|
||||
return False
|
||||
|
||||
return (
|
||||
self.same_config(other) and
|
||||
self.same_fqn(other) and
|
||||
True
|
||||
)
|
||||
return self.same_config(other) and self.same_fqn(other) and True
|
||||
|
||||
@property
|
||||
def test_node_type(self):
|
||||
return "generic"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -461,13 +470,13 @@ class IntermediateSnapshotNode(ParsedNode):
|
||||
# defined in config blocks. To fix that, we have an intermediate type that
|
||||
# uses a regular node config, which the snapshot parser will then convert
|
||||
# into a full ParsedSnapshotNode after rendering.
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Snapshot]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]})
|
||||
config: EmptySnapshotConfig = field(default_factory=EmptySnapshotConfig)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedSnapshotNode(ParsedNode):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Snapshot]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]})
|
||||
config: SnapshotConfig
|
||||
|
||||
|
||||
@@ -497,12 +506,12 @@ class ParsedMacroPatch(ParsedPatch):
|
||||
class ParsedMacro(UnparsedBaseNode, HasUniqueID):
|
||||
name: str
|
||||
macro_sql: str
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Macro]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]})
|
||||
# TODO: can macros even have tags?
|
||||
tags: List[str] = field(default_factory=list)
|
||||
# TODO: is this ever populated?
|
||||
depends_on: MacroDependsOn = field(default_factory=MacroDependsOn)
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
docs: Docs = field(default_factory=Docs)
|
||||
patch_path: Optional[str] = None
|
||||
@@ -517,7 +526,7 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID):
|
||||
self.docs = patch.docs
|
||||
self.arguments = patch.arguments
|
||||
|
||||
def same_contents(self, other: Optional['ParsedMacro']) -> bool:
|
||||
def same_contents(self, other: Optional["ParsedMacro"]) -> bool:
|
||||
if other is None:
|
||||
return False
|
||||
# the only thing that makes one macro different from another with the
|
||||
@@ -534,7 +543,7 @@ class ParsedDocumentation(UnparsedDocumentation, HasUniqueID):
|
||||
def search_name(self):
|
||||
return self.name
|
||||
|
||||
def same_contents(self, other: Optional['ParsedDocumentation']) -> bool:
|
||||
def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool:
|
||||
if other is None:
|
||||
return False
|
||||
# the only thing that makes one doc different from another with the
|
||||
@@ -553,11 +562,11 @@ def normalize_test(testdef: TestDef) -> Dict[str, Any]:
|
||||
class UnpatchedSourceDefinition(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
source: UnparsedSourceDefinition
|
||||
table: UnparsedSourceTableDefinition
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Source]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Source]})
|
||||
patch_path: Optional[Path] = None
|
||||
|
||||
def get_full_source_name(self):
|
||||
return f'{self.source.name}_{self.table.name}'
|
||||
return f"{self.source.name}_{self.table.name}"
|
||||
|
||||
def get_source_representation(self):
|
||||
return f'source("{self.source.name}", "{self.table.name}")'
|
||||
@@ -582,9 +591,7 @@ class UnpatchedSourceDefinition(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
else:
|
||||
return self.table.columns
|
||||
|
||||
def get_tests(
|
||||
self
|
||||
) -> Iterator[Tuple[Dict[str, Any], Optional[UnparsedColumn]]]:
|
||||
def get_tests(self) -> Iterator[Tuple[Dict[str, Any], Optional[UnparsedColumn]]]:
|
||||
for test in self.tests:
|
||||
yield normalize_test(test), None
|
||||
|
||||
@@ -602,24 +609,27 @@ class UnpatchedSourceDefinition(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedSourceDefinition(
|
||||
class ParsedSourceMandatory(
|
||||
UnparsedBaseNode,
|
||||
HasUniqueID,
|
||||
HasRelationMetadata,
|
||||
HasFqn,
|
||||
|
||||
):
|
||||
name: str
|
||||
source_name: str
|
||||
source_description: str
|
||||
loader: str
|
||||
identifier: str
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Source]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Source]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory):
|
||||
quoting: Quoting = field(default_factory=Quoting)
|
||||
loaded_at_field: Optional[str] = None
|
||||
freshness: Optional[FreshnessThreshold] = None
|
||||
external: Optional[ExternalTable] = None
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
columns: Dict[str, ColumnInfo] = field(default_factory=dict)
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
source_meta: Dict[str, Any] = field(default_factory=dict)
|
||||
@@ -629,43 +639,40 @@ class ParsedSourceDefinition(
|
||||
unrendered_config: Dict[str, Any] = field(default_factory=dict)
|
||||
relation_name: Optional[str] = None
|
||||
created_at: float = field(default_factory=lambda: time.time())
|
||||
_event_status: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
if '_event_status' in dct:
|
||||
del dct['_event_status']
|
||||
if "_event_status" in dct:
|
||||
del dct["_event_status"]
|
||||
return dct
|
||||
|
||||
def same_database_representation(
|
||||
self, other: 'ParsedSourceDefinition'
|
||||
) -> bool:
|
||||
def same_database_representation(self, other: "ParsedSourceDefinition") -> bool:
|
||||
return (
|
||||
self.database == other.database and
|
||||
self.schema == other.schema and
|
||||
self.identifier == other.identifier and
|
||||
True
|
||||
self.database == other.database
|
||||
and self.schema == other.schema
|
||||
and self.identifier == other.identifier
|
||||
and True
|
||||
)
|
||||
|
||||
def same_quoting(self, other: 'ParsedSourceDefinition') -> bool:
|
||||
def same_quoting(self, other: "ParsedSourceDefinition") -> bool:
|
||||
return self.quoting == other.quoting
|
||||
|
||||
def same_freshness(self, other: 'ParsedSourceDefinition') -> bool:
|
||||
def same_freshness(self, other: "ParsedSourceDefinition") -> bool:
|
||||
return (
|
||||
self.freshness == other.freshness and
|
||||
self.loaded_at_field == other.loaded_at_field and
|
||||
True
|
||||
self.freshness == other.freshness
|
||||
and self.loaded_at_field == other.loaded_at_field
|
||||
and True
|
||||
)
|
||||
|
||||
def same_external(self, other: 'ParsedSourceDefinition') -> bool:
|
||||
def same_external(self, other: "ParsedSourceDefinition") -> bool:
|
||||
return self.external == other.external
|
||||
|
||||
def same_config(self, old: 'ParsedSourceDefinition') -> bool:
|
||||
def same_config(self, old: "ParsedSourceDefinition") -> bool:
|
||||
return self.config.same_contents(
|
||||
self.unrendered_config,
|
||||
old.unrendered_config,
|
||||
)
|
||||
|
||||
def same_contents(self, old: Optional['ParsedSourceDefinition']) -> bool:
|
||||
def same_contents(self, old: Optional["ParsedSourceDefinition"]) -> bool:
|
||||
# existing when it didn't before is a change!
|
||||
if old is None:
|
||||
return True
|
||||
@@ -679,17 +686,17 @@ class ParsedSourceDefinition(
|
||||
# metadata/tags changes are not "changes"
|
||||
# patching/description changes are not "changes"
|
||||
return (
|
||||
self.same_database_representation(old) and
|
||||
self.same_fqn(old) and
|
||||
self.same_config(old) and
|
||||
self.same_quoting(old) and
|
||||
self.same_freshness(old) and
|
||||
self.same_external(old) and
|
||||
True
|
||||
self.same_database_representation(old)
|
||||
and self.same_fqn(old)
|
||||
and self.same_config(old)
|
||||
and self.same_quoting(old)
|
||||
and self.same_freshness(old)
|
||||
and self.same_external(old)
|
||||
and True
|
||||
)
|
||||
|
||||
def get_full_source_name(self):
|
||||
return f'{self.source_name}_{self.name}'
|
||||
return f"{self.source_name}_{self.name}"
|
||||
|
||||
def get_source_representation(self):
|
||||
return f'source("{self.source.name}", "{self.table.name}")'
|
||||
@@ -728,7 +735,7 @@ class ParsedSourceDefinition(
|
||||
|
||||
@property
|
||||
def search_name(self):
|
||||
return f'{self.source_name}.{self.name}'
|
||||
return f"{self.source_name}.{self.name}"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -737,7 +744,7 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
type: ExposureType
|
||||
owner: ExposureOwner
|
||||
resource_type: NodeType = NodeType.Exposure
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
maturity: Optional[MaturityType] = None
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
tags: List[str] = field(default_factory=list)
|
||||
@@ -755,39 +762,39 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
def search_name(self):
|
||||
return self.name
|
||||
|
||||
def same_depends_on(self, old: 'ParsedExposure') -> bool:
|
||||
def same_depends_on(self, old: "ParsedExposure") -> bool:
|
||||
return set(self.depends_on.nodes) == set(old.depends_on.nodes)
|
||||
|
||||
def same_description(self, old: 'ParsedExposure') -> bool:
|
||||
def same_description(self, old: "ParsedExposure") -> bool:
|
||||
return self.description == old.description
|
||||
|
||||
def same_maturity(self, old: 'ParsedExposure') -> bool:
|
||||
def same_maturity(self, old: "ParsedExposure") -> bool:
|
||||
return self.maturity == old.maturity
|
||||
|
||||
def same_owner(self, old: 'ParsedExposure') -> bool:
|
||||
def same_owner(self, old: "ParsedExposure") -> bool:
|
||||
return self.owner == old.owner
|
||||
|
||||
def same_exposure_type(self, old: 'ParsedExposure') -> bool:
|
||||
def same_exposure_type(self, old: "ParsedExposure") -> bool:
|
||||
return self.type == old.type
|
||||
|
||||
def same_url(self, old: 'ParsedExposure') -> bool:
|
||||
def same_url(self, old: "ParsedExposure") -> bool:
|
||||
return self.url == old.url
|
||||
|
||||
def same_contents(self, old: Optional['ParsedExposure']) -> bool:
|
||||
def same_contents(self, old: Optional["ParsedExposure"]) -> bool:
|
||||
# existing when it didn't before is a change!
|
||||
# metadata/tags changes are not "changes"
|
||||
if old is None:
|
||||
return True
|
||||
|
||||
return (
|
||||
self.same_fqn(old) and
|
||||
self.same_exposure_type(old) and
|
||||
self.same_owner(old) and
|
||||
self.same_maturity(old) and
|
||||
self.same_url(old) and
|
||||
self.same_description(old) and
|
||||
self.same_depends_on(old) and
|
||||
True
|
||||
self.same_fqn(old)
|
||||
and self.same_exposure_type(old)
|
||||
and self.same_owner(old)
|
||||
and self.same_maturity(old)
|
||||
and self.same_url(old)
|
||||
and self.same_description(old)
|
||||
and self.same_depends_on(old)
|
||||
and True
|
||||
)
|
||||
|
||||
|
||||
@@ -819,50 +826,50 @@ class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn):
|
||||
def search_name(self):
|
||||
return self.name
|
||||
|
||||
def same_model(self, old: 'ParsedMetric') -> bool:
|
||||
def same_model(self, old: "ParsedMetric") -> bool:
|
||||
return self.model == old.model
|
||||
|
||||
def same_dimensions(self, old: 'ParsedMetric') -> bool:
|
||||
def same_dimensions(self, old: "ParsedMetric") -> bool:
|
||||
return self.dimensions == old.dimensions
|
||||
|
||||
def same_filters(self, old: 'ParsedMetric') -> bool:
|
||||
def same_filters(self, old: "ParsedMetric") -> bool:
|
||||
return self.filters == old.filters
|
||||
|
||||
def same_description(self, old: 'ParsedMetric') -> bool:
|
||||
def same_description(self, old: "ParsedMetric") -> bool:
|
||||
return self.description == old.description
|
||||
|
||||
def same_label(self, old: 'ParsedMetric') -> bool:
|
||||
def same_label(self, old: "ParsedMetric") -> bool:
|
||||
return self.label == old.label
|
||||
|
||||
def same_type(self, old: 'ParsedMetric') -> bool:
|
||||
def same_type(self, old: "ParsedMetric") -> bool:
|
||||
return self.type == old.type
|
||||
|
||||
def same_sql(self, old: 'ParsedMetric') -> bool:
|
||||
def same_sql(self, old: "ParsedMetric") -> bool:
|
||||
return self.sql == old.sql
|
||||
|
||||
def same_timestamp(self, old: 'ParsedMetric') -> bool:
|
||||
def same_timestamp(self, old: "ParsedMetric") -> bool:
|
||||
return self.timestamp == old.timestamp
|
||||
|
||||
def same_time_grains(self, old: 'ParsedMetric') -> bool:
|
||||
def same_time_grains(self, old: "ParsedMetric") -> bool:
|
||||
return self.time_grains == old.time_grains
|
||||
|
||||
def same_contents(self, old: Optional['ParsedMetric']) -> bool:
|
||||
def same_contents(self, old: Optional["ParsedMetric"]) -> bool:
|
||||
# existing when it didn't before is a change!
|
||||
# metadata/tags changes are not "changes"
|
||||
if old is None:
|
||||
return True
|
||||
|
||||
return (
|
||||
self.same_model(old) and
|
||||
self.same_dimensions(old) and
|
||||
self.same_filters(old) and
|
||||
self.same_description(old) and
|
||||
self.same_label(old) and
|
||||
self.same_type(old) and
|
||||
self.same_sql(old) and
|
||||
self.same_timestamp(old) and
|
||||
self.same_time_grains(old) and
|
||||
True
|
||||
self.same_model(old)
|
||||
and self.same_dimensions(old)
|
||||
and self.same_filters(old)
|
||||
and self.same_description(old)
|
||||
and self.same_label(old)
|
||||
and self.same_type(old)
|
||||
and self.same_sql(old)
|
||||
and self.same_timestamp(old)
|
||||
and self.same_time_grains(old)
|
||||
and True
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -4,13 +4,12 @@ from dbt.contracts.util import (
|
||||
Mergeable,
|
||||
Replaceable,
|
||||
)
|
||||
|
||||
# trigger the PathEncoder
|
||||
import dbt.helper_types # noqa:F401
|
||||
from dbt.exceptions import CompilationException
|
||||
|
||||
from dbt.dataclass_schema import (
|
||||
dbtClassMixin, StrEnum, ExtensibleDbtClassMixin
|
||||
)
|
||||
from dbt.dataclass_schema import dbtClassMixin, StrEnum, ExtensibleDbtClassMixin
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import timedelta
|
||||
@@ -27,7 +26,7 @@ class UnparsedBaseNode(dbtClassMixin, Replaceable):
|
||||
|
||||
@property
|
||||
def file_id(self):
|
||||
return f'{self.package_name}://{self.original_file_path}'
|
||||
return f"{self.package_name}://{self.original_file_path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -41,27 +40,31 @@ class HasSQL:
|
||||
|
||||
@dataclass
|
||||
class UnparsedMacro(UnparsedBaseNode, HasSQL):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Macro]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnparsedGenericTest(UnparsedBaseNode, HasSQL):
|
||||
resource_type: NodeType = field(metadata={'restrict': [NodeType.Macro]})
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]})
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnparsedNode(UnparsedBaseNode, HasSQL):
|
||||
name: str
|
||||
resource_type: NodeType = field(metadata={'restrict': [
|
||||
NodeType.Model,
|
||||
NodeType.Analysis,
|
||||
NodeType.Test,
|
||||
NodeType.Snapshot,
|
||||
NodeType.Operation,
|
||||
NodeType.Seed,
|
||||
NodeType.RPCCall,
|
||||
NodeType.SqlOperation,
|
||||
]})
|
||||
resource_type: NodeType = field(
|
||||
metadata={
|
||||
"restrict": [
|
||||
NodeType.Model,
|
||||
NodeType.Analysis,
|
||||
NodeType.Test,
|
||||
NodeType.Snapshot,
|
||||
NodeType.Operation,
|
||||
NodeType.Seed,
|
||||
NodeType.RPCCall,
|
||||
NodeType.SqlOperation,
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
@property
|
||||
def search_name(self):
|
||||
@@ -70,9 +73,7 @@ class UnparsedNode(UnparsedBaseNode, HasSQL):
|
||||
|
||||
@dataclass
|
||||
class UnparsedRunHook(UnparsedNode):
|
||||
resource_type: NodeType = field(
|
||||
metadata={'restrict': [NodeType.Operation]}
|
||||
)
|
||||
resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]})
|
||||
index: Optional[int] = None
|
||||
|
||||
|
||||
@@ -82,10 +83,9 @@ class Docs(dbtClassMixin, Replaceable):
|
||||
|
||||
|
||||
@dataclass
|
||||
class HasDocs(AdditionalPropertiesMixin, ExtensibleDbtClassMixin,
|
||||
Replaceable):
|
||||
class HasDocs(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable):
|
||||
name: str
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
data_type: Optional[str] = None
|
||||
docs: Docs = field(default_factory=Docs)
|
||||
@@ -128,11 +128,11 @@ class HasYamlMetadata(dbtClassMixin):
|
||||
|
||||
@property
|
||||
def file_id(self):
|
||||
return f'{self.package_name}://{self.original_file_path}'
|
||||
return f"{self.package_name}://{self.original_file_path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class HasConfig():
|
||||
class HasConfig:
|
||||
config: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ class UnparsedNodeUpdate(HasConfig, HasColumnTests, HasTests, HasYamlMetadata):
|
||||
class MacroArgument(dbtClassMixin):
|
||||
name: str
|
||||
type: Optional[str] = None
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -159,12 +159,12 @@ class UnparsedMacroUpdate(HasConfig, HasDocs, HasYamlMetadata):
|
||||
|
||||
|
||||
class TimePeriod(StrEnum):
|
||||
minute = 'minute'
|
||||
hour = 'hour'
|
||||
day = 'day'
|
||||
minute = "minute"
|
||||
hour = "hour"
|
||||
day = "day"
|
||||
|
||||
def plural(self) -> str:
|
||||
return str(self) + 's'
|
||||
return str(self) + "s"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -191,6 +191,7 @@ class FreshnessThreshold(dbtClassMixin, Mergeable):
|
||||
|
||||
def status(self, age: float) -> "dbt.contracts.results.FreshnessStatus":
|
||||
from dbt.contracts.results import FreshnessStatus
|
||||
|
||||
if self.error_after and self.error_after.exceeded(age):
|
||||
return FreshnessStatus.Error
|
||||
elif self.warn_after and self.warn_after.exceeded(age):
|
||||
@@ -203,25 +204,20 @@ class FreshnessThreshold(dbtClassMixin, Mergeable):
|
||||
|
||||
|
||||
@dataclass
|
||||
class AdditionalPropertiesAllowed(
|
||||
AdditionalPropertiesMixin,
|
||||
ExtensibleDbtClassMixin
|
||||
):
|
||||
class AdditionalPropertiesAllowed(AdditionalPropertiesMixin, ExtensibleDbtClassMixin):
|
||||
_extra: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExternalPartition(AdditionalPropertiesAllowed, Replaceable):
|
||||
name: str = ''
|
||||
description: str = ''
|
||||
data_type: str = ''
|
||||
name: str = ""
|
||||
description: str = ""
|
||||
data_type: str = ""
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.name == '' or self.data_type == '':
|
||||
raise CompilationException(
|
||||
'External partition columns must have names and data types'
|
||||
)
|
||||
if self.name == "" or self.data_type == "":
|
||||
raise CompilationException("External partition columns must have names and data types")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -249,44 +245,40 @@ class UnparsedSourceTableDefinition(HasColumnTests, HasTests):
|
||||
loaded_at_field: Optional[str] = None
|
||||
identifier: Optional[str] = None
|
||||
quoting: Quoting = field(default_factory=Quoting)
|
||||
freshness: Optional[FreshnessThreshold] = field(
|
||||
default_factory=FreshnessThreshold
|
||||
)
|
||||
freshness: Optional[FreshnessThreshold] = field(default_factory=FreshnessThreshold)
|
||||
external: Optional[ExternalTable] = None
|
||||
tags: List[str] = field(default_factory=list)
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
dct = super().__post_serialize__(dct)
|
||||
if 'freshness' not in dct and self.freshness is None:
|
||||
dct['freshness'] = None
|
||||
if "freshness" not in dct and self.freshness is None:
|
||||
dct["freshness"] = None
|
||||
return dct
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnparsedSourceDefinition(dbtClassMixin, Replaceable):
|
||||
name: str
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
database: Optional[str] = None
|
||||
schema: Optional[str] = None
|
||||
loader: str = ''
|
||||
loader: str = ""
|
||||
quoting: Quoting = field(default_factory=Quoting)
|
||||
freshness: Optional[FreshnessThreshold] = field(
|
||||
default_factory=FreshnessThreshold
|
||||
)
|
||||
freshness: Optional[FreshnessThreshold] = field(default_factory=FreshnessThreshold)
|
||||
loaded_at_field: Optional[str] = None
|
||||
tables: List[UnparsedSourceTableDefinition] = field(default_factory=list)
|
||||
tags: List[str] = field(default_factory=list)
|
||||
config: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def yaml_key(self) -> 'str':
|
||||
return 'sources'
|
||||
def yaml_key(self) -> "str":
|
||||
return "sources"
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
dct = super().__post_serialize__(dct)
|
||||
if 'freshness' not in dct and self.freshness is None:
|
||||
dct['freshness'] = None
|
||||
if "freshness" not in dct and self.freshness is None:
|
||||
dct["freshness"] = None
|
||||
return dct
|
||||
|
||||
|
||||
@@ -300,9 +292,7 @@ class SourceTablePatch(dbtClassMixin):
|
||||
loaded_at_field: Optional[str] = None
|
||||
identifier: Optional[str] = None
|
||||
quoting: Quoting = field(default_factory=Quoting)
|
||||
freshness: Optional[FreshnessThreshold] = field(
|
||||
default_factory=FreshnessThreshold
|
||||
)
|
||||
freshness: Optional[FreshnessThreshold] = field(default_factory=FreshnessThreshold)
|
||||
external: Optional[ExternalTable] = None
|
||||
tags: Optional[List[str]] = None
|
||||
tests: Optional[List[TestDef]] = None
|
||||
@@ -310,13 +300,13 @@ class SourceTablePatch(dbtClassMixin):
|
||||
|
||||
def to_patch_dict(self) -> Dict[str, Any]:
|
||||
dct = self.to_dict(omit_none=True)
|
||||
remove_keys = ('name')
|
||||
remove_keys = "name"
|
||||
for key in remove_keys:
|
||||
if key in dct:
|
||||
del dct[key]
|
||||
|
||||
if self.freshness is None:
|
||||
dct['freshness'] = None
|
||||
dct["freshness"] = None
|
||||
|
||||
return dct
|
||||
|
||||
@@ -324,13 +314,13 @@ class SourceTablePatch(dbtClassMixin):
|
||||
@dataclass
|
||||
class SourcePatch(dbtClassMixin, Replaceable):
|
||||
name: str = field(
|
||||
metadata=dict(description='The name of the source to override'),
|
||||
metadata=dict(description="The name of the source to override"),
|
||||
)
|
||||
overrides: str = field(
|
||||
metadata=dict(description='The package of the source to override'),
|
||||
metadata=dict(description="The package of the source to override"),
|
||||
)
|
||||
path: Path = field(
|
||||
metadata=dict(description='The path to the patch-defining yml file'),
|
||||
metadata=dict(description="The path to the patch-defining yml file"),
|
||||
)
|
||||
description: Optional[str] = None
|
||||
meta: Optional[Dict[str, Any]] = None
|
||||
@@ -338,22 +328,20 @@ class SourcePatch(dbtClassMixin, Replaceable):
|
||||
schema: Optional[str] = None
|
||||
loader: Optional[str] = None
|
||||
quoting: Optional[Quoting] = None
|
||||
freshness: Optional[Optional[FreshnessThreshold]] = field(
|
||||
default_factory=FreshnessThreshold
|
||||
)
|
||||
freshness: Optional[Optional[FreshnessThreshold]] = field(default_factory=FreshnessThreshold)
|
||||
loaded_at_field: Optional[str] = None
|
||||
tables: Optional[List[SourceTablePatch]] = None
|
||||
tags: Optional[List[str]] = None
|
||||
|
||||
def to_patch_dict(self) -> Dict[str, Any]:
|
||||
dct = self.to_dict(omit_none=True)
|
||||
remove_keys = ('name', 'overrides', 'tables', 'path')
|
||||
remove_keys = ("name", "overrides", "tables", "path")
|
||||
for key in remove_keys:
|
||||
if key in dct:
|
||||
del dct[key]
|
||||
|
||||
if self.freshness is None:
|
||||
dct['freshness'] = None
|
||||
dct["freshness"] = None
|
||||
|
||||
return dct
|
||||
|
||||
@@ -374,7 +362,7 @@ class UnparsedDocumentation(dbtClassMixin, Replaceable):
|
||||
|
||||
@property
|
||||
def file_id(self):
|
||||
return f'{self.package_name}://{self.original_file_path}'
|
||||
return f"{self.package_name}://{self.original_file_path}"
|
||||
|
||||
@property
|
||||
def resource_type(self):
|
||||
@@ -389,9 +377,9 @@ class UnparsedDocumentationFile(UnparsedDocumentation):
|
||||
# can't use total_ordering decorator here, as str provides an ordering already
|
||||
# and it's not the one we want.
|
||||
class Maturity(StrEnum):
|
||||
low = 'low'
|
||||
medium = 'medium'
|
||||
high = 'high'
|
||||
low = "low"
|
||||
medium = "medium"
|
||||
high = "high"
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Maturity):
|
||||
@@ -416,17 +404,17 @@ class Maturity(StrEnum):
|
||||
|
||||
|
||||
class ExposureType(StrEnum):
|
||||
Dashboard = 'dashboard'
|
||||
Notebook = 'notebook'
|
||||
Analysis = 'analysis'
|
||||
ML = 'ml'
|
||||
Application = 'application'
|
||||
Dashboard = "dashboard"
|
||||
Notebook = "notebook"
|
||||
Analysis = "analysis"
|
||||
ML = "ml"
|
||||
Application = "application"
|
||||
|
||||
|
||||
class MaturityType(StrEnum):
|
||||
Low = 'low'
|
||||
Medium = 'medium'
|
||||
High = 'high'
|
||||
Low = "low"
|
||||
Medium = "medium"
|
||||
High = "high"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -440,7 +428,7 @@ class UnparsedExposure(dbtClassMixin, Replaceable):
|
||||
name: str
|
||||
type: ExposureType
|
||||
owner: ExposureOwner
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
maturity: Optional[MaturityType] = None
|
||||
meta: Dict[str, Any] = field(default_factory=dict)
|
||||
tags: List[str] = field(default_factory=list)
|
||||
@@ -462,7 +450,7 @@ class UnparsedMetric(dbtClassMixin, Replaceable):
|
||||
name: str
|
||||
label: str
|
||||
type: str
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
sql: Optional[str] = None
|
||||
timestamp: Optional[str] = None
|
||||
time_grains: List[str] = field(default_factory=list)
|
||||
|
||||
@@ -2,24 +2,40 @@ from dbt.contracts.util import Replaceable, Mergeable, list_str
|
||||
from dbt.contracts.connection import QueryComment, UserConfigContract
|
||||
from dbt.helper_types import NoValue
|
||||
from dbt.dataclass_schema import (
|
||||
dbtClassMixin, ValidationError,
|
||||
dbtClassMixin,
|
||||
ValidationError,
|
||||
HyphenatedDbtClassMixin,
|
||||
ExtensibleDbtClassMixin,
|
||||
register_pattern, ValidatedStringMixin
|
||||
register_pattern,
|
||||
ValidatedStringMixin,
|
||||
)
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, List, Dict, Union, Any
|
||||
from mashumaro.types import SerializableType
|
||||
|
||||
PIN_PACKAGE_URL = 'https://docs.getdbt.com/docs/package-management#section-specifying-package-versions' # noqa
|
||||
PIN_PACKAGE_URL = (
|
||||
"https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa
|
||||
)
|
||||
DEFAULT_SEND_ANONYMOUS_USAGE_STATS = True
|
||||
|
||||
|
||||
class Name(ValidatedStringMixin):
|
||||
ValidationRegex = r'^[^\d\W]\w*$'
|
||||
ValidationRegex = r"^[^\d\W]\w*$"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, value: Any) -> bool:
|
||||
if not isinstance(value, str):
|
||||
return False
|
||||
|
||||
try:
|
||||
cls.validate(value)
|
||||
except ValidationError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
register_pattern(Name, r'^[^\d\W]\w*$')
|
||||
register_pattern(Name, r"^[^\d\W]\w*$")
|
||||
|
||||
|
||||
class SemverString(str, SerializableType):
|
||||
@@ -27,16 +43,15 @@ class SemverString(str, SerializableType):
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def _deserialize(cls, value: str) -> 'SemverString':
|
||||
def _deserialize(cls, value: str) -> "SemverString":
|
||||
return SemverString(value)
|
||||
|
||||
|
||||
# this does not support the full semver (does not allow a trailing -fooXYZ) and
|
||||
# is not restrictive enough for full semver, (allows '1.0'). But it's like
|
||||
# 'semver lite'.
|
||||
# this supports full semver,
|
||||
# but also allows for 2 group version numbers, (allows '1.0').
|
||||
register_pattern(
|
||||
SemverString,
|
||||
r'^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(\.(?:0|[1-9]\d*))?$',
|
||||
r"^(0|[1-9]\d*)\.(0|[1-9]\d*)(\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)?$", # noqa
|
||||
)
|
||||
|
||||
|
||||
@@ -104,8 +119,7 @@ class ProjectPackageMetadata:
|
||||
|
||||
@classmethod
|
||||
def from_project(cls, project):
|
||||
return cls(name=project.project_name,
|
||||
packages=project.packages.packages)
|
||||
return cls(name=project.project_name, packages=project.packages.packages)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -123,46 +137,46 @@ class RegistryPackageMetadata(
|
||||
|
||||
# A list of all the reserved words that packages may not have as names.
|
||||
BANNED_PROJECT_NAMES = {
|
||||
'_sql_results',
|
||||
'adapter',
|
||||
'api',
|
||||
'column',
|
||||
'config',
|
||||
'context',
|
||||
'database',
|
||||
'env',
|
||||
'env_var',
|
||||
'exceptions',
|
||||
'execute',
|
||||
'flags',
|
||||
'fromjson',
|
||||
'fromyaml',
|
||||
'graph',
|
||||
'invocation_id',
|
||||
'load_agate_table',
|
||||
'load_result',
|
||||
'log',
|
||||
'model',
|
||||
'modules',
|
||||
'post_hooks',
|
||||
'pre_hooks',
|
||||
'ref',
|
||||
'render',
|
||||
'return',
|
||||
'run_started_at',
|
||||
'schema',
|
||||
'source',
|
||||
'sql',
|
||||
'sql_now',
|
||||
'store_result',
|
||||
'store_raw_result',
|
||||
'target',
|
||||
'this',
|
||||
'tojson',
|
||||
'toyaml',
|
||||
'try_or_compiler_error',
|
||||
'var',
|
||||
'write',
|
||||
"_sql_results",
|
||||
"adapter",
|
||||
"api",
|
||||
"column",
|
||||
"config",
|
||||
"context",
|
||||
"database",
|
||||
"env",
|
||||
"env_var",
|
||||
"exceptions",
|
||||
"execute",
|
||||
"flags",
|
||||
"fromjson",
|
||||
"fromyaml",
|
||||
"graph",
|
||||
"invocation_id",
|
||||
"load_agate_table",
|
||||
"load_result",
|
||||
"log",
|
||||
"model",
|
||||
"modules",
|
||||
"post_hooks",
|
||||
"pre_hooks",
|
||||
"ref",
|
||||
"render",
|
||||
"return",
|
||||
"run_started_at",
|
||||
"schema",
|
||||
"source",
|
||||
"sql",
|
||||
"sql_now",
|
||||
"store_result",
|
||||
"store_raw_result",
|
||||
"target",
|
||||
"this",
|
||||
"tojson",
|
||||
"toyaml",
|
||||
"try_or_compiler_error",
|
||||
"var",
|
||||
"write",
|
||||
}
|
||||
|
||||
|
||||
@@ -201,7 +215,7 @@ class Project(HyphenatedDbtClassMixin, Replaceable):
|
||||
vars: Optional[Dict[str, Any]] = field(
|
||||
default=None,
|
||||
metadata=dict(
|
||||
description='map project names to their vars override dicts',
|
||||
description="map project names to their vars override dicts",
|
||||
),
|
||||
)
|
||||
packages: List[PackageSpec] = field(default_factory=list)
|
||||
@@ -210,16 +224,17 @@ class Project(HyphenatedDbtClassMixin, Replaceable):
|
||||
@classmethod
|
||||
def validate(cls, data):
|
||||
super().validate(data)
|
||||
if data['name'] in BANNED_PROJECT_NAMES:
|
||||
raise ValidationError(
|
||||
f"Invalid project name: {data['name']} is a reserved word"
|
||||
)
|
||||
if data["name"] in BANNED_PROJECT_NAMES:
|
||||
raise ValidationError(f"Invalid project name: {data['name']} is a reserved word")
|
||||
# validate dispatch config
|
||||
if 'dispatch' in data and data['dispatch']:
|
||||
entries = data['dispatch']
|
||||
if "dispatch" in data and data["dispatch"]:
|
||||
entries = data["dispatch"]
|
||||
for entry in entries:
|
||||
if ('macro_namespace' not in entry or 'search_order' not in entry or
|
||||
not isinstance(entry['search_order'], list)):
|
||||
if (
|
||||
"macro_namespace" not in entry
|
||||
or "search_order" not in entry
|
||||
or not isinstance(entry["search_order"], list)
|
||||
):
|
||||
raise ValidationError(f"Invalid project dispatch config: {entry}")
|
||||
|
||||
|
||||
@@ -231,7 +246,7 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
|
||||
printer_width: Optional[int] = None
|
||||
write_json: Optional[bool] = None
|
||||
warn_error: Optional[bool] = None
|
||||
log_format: Optional[bool] = None
|
||||
log_format: Optional[str] = None
|
||||
debug: Optional[bool] = None
|
||||
version_check: Optional[bool] = None
|
||||
fail_fast: Optional[bool] = None
|
||||
@@ -242,9 +257,9 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
|
||||
|
||||
@dataclass
|
||||
class ProfileConfig(HyphenatedDbtClassMixin, Replaceable):
|
||||
profile_name: str = field(metadata={'preserve_underscore': True})
|
||||
target_name: str = field(metadata={'preserve_underscore': True})
|
||||
user_config: UserConfig = field(metadata={'preserve_underscore': True})
|
||||
profile_name: str = field(metadata={"preserve_underscore": True})
|
||||
target_name: str = field(metadata={"preserve_underscore": True})
|
||||
user_config: UserConfig = field(metadata={"preserve_underscore": True})
|
||||
threads: int
|
||||
# TODO: make this a dynamic union of some kind?
|
||||
credentials: Optional[Dict[str, Any]]
|
||||
@@ -262,7 +277,7 @@ class ConfiguredQuoting(Quoting, Replaceable):
|
||||
class Configuration(Project, ProfileConfig):
|
||||
cli_vars: Dict[str, Any] = field(
|
||||
default_factory=dict,
|
||||
metadata={'preserve_underscore': True},
|
||||
metadata={"preserve_underscore": True},
|
||||
)
|
||||
quoting: Optional[ConfiguredQuoting] = None
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from collections.abc import Mapping
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Optional, Dict,
|
||||
Optional,
|
||||
Dict,
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
|
||||
@@ -13,17 +14,17 @@ from dbt.utils import deep_merge
|
||||
|
||||
|
||||
class RelationType(StrEnum):
|
||||
Table = 'table'
|
||||
View = 'view'
|
||||
CTE = 'cte'
|
||||
MaterializedView = 'materializedview'
|
||||
External = 'external'
|
||||
Table = "table"
|
||||
View = "view"
|
||||
CTE = "cte"
|
||||
MaterializedView = "materializedview"
|
||||
External = "external"
|
||||
|
||||
|
||||
class ComponentName(StrEnum):
|
||||
Database = 'database'
|
||||
Schema = 'schema'
|
||||
Identifier = 'identifier'
|
||||
Database = "database"
|
||||
Schema = "schema"
|
||||
Identifier = "identifier"
|
||||
|
||||
|
||||
class HasQuoting(Protocol):
|
||||
@@ -68,8 +69,7 @@ class Policy(FakeAPIObject):
|
||||
return self.identifier
|
||||
else:
|
||||
raise ValueError(
|
||||
'Got a key of {}, expected one of {}'
|
||||
.format(key, list(ComponentName))
|
||||
"Got a key of {}, expected one of {}".format(key, list(ComponentName))
|
||||
)
|
||||
|
||||
def replace_dict(self, dct: Dict[ComponentName, bool]):
|
||||
@@ -88,16 +88,12 @@ class Path(FakeAPIObject):
|
||||
def __post_init__(self):
|
||||
# handle pesky jinja2.Undefined sneaking in here and messing up rende
|
||||
if not isinstance(self.database, (type(None), str)):
|
||||
raise CompilationException(
|
||||
'Got an invalid path database: {}'.format(self.database)
|
||||
)
|
||||
raise CompilationException("Got an invalid path database: {}".format(self.database))
|
||||
if not isinstance(self.schema, (type(None), str)):
|
||||
raise CompilationException(
|
||||
'Got an invalid path schema: {}'.format(self.schema)
|
||||
)
|
||||
raise CompilationException("Got an invalid path schema: {}".format(self.schema))
|
||||
if not isinstance(self.identifier, (type(None), str)):
|
||||
raise CompilationException(
|
||||
'Got an invalid path identifier: {}'.format(self.identifier)
|
||||
"Got an invalid path identifier: {}".format(self.identifier)
|
||||
)
|
||||
|
||||
def get_lowered_part(self, key: ComponentName) -> Optional[str]:
|
||||
@@ -115,8 +111,7 @@ class Path(FakeAPIObject):
|
||||
return self.identifier
|
||||
else:
|
||||
raise ValueError(
|
||||
'Got a key of {}, expected one of {}'
|
||||
.format(key, list(ComponentName))
|
||||
"Got a key of {}, expected one of {}".format(key, list(ComponentName))
|
||||
)
|
||||
|
||||
def replace_dict(self, dct: Dict[ComponentName, str]):
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from dbt.contracts.graph.manifest import CompileResultNode
|
||||
from dbt.contracts.graph.unparsed import (
|
||||
FreshnessThreshold
|
||||
)
|
||||
from dbt.contracts.graph.unparsed import FreshnessThreshold
|
||||
from dbt.contracts.graph.parsed import ParsedSourceDefinition
|
||||
from dbt.contracts.util import (
|
||||
BaseArtifactMetadata,
|
||||
@@ -25,7 +23,13 @@ import agate
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import (
|
||||
Union, Dict, List, Optional, Any, NamedTuple, Sequence,
|
||||
Union,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Any,
|
||||
NamedTuple,
|
||||
Sequence,
|
||||
)
|
||||
|
||||
from dbt.clients.system import write_json
|
||||
@@ -59,9 +63,9 @@ class collect_timing_info:
|
||||
|
||||
|
||||
class RunningStatus(StrEnum):
|
||||
Started = 'started'
|
||||
Compiling = 'compiling'
|
||||
Executing = 'executing'
|
||||
Started = "started"
|
||||
Compiling = "compiling"
|
||||
Executing = "executing"
|
||||
|
||||
|
||||
class NodeStatus(StrEnum):
|
||||
@@ -81,6 +85,7 @@ class RunStatus(StrEnum):
|
||||
|
||||
|
||||
class TestStatus(StrEnum):
|
||||
__test__ = False
|
||||
Pass = NodeStatus.Pass
|
||||
Error = NodeStatus.Error
|
||||
Fail = NodeStatus.Fail
|
||||
@@ -108,10 +113,10 @@ class BaseResult(dbtClassMixin):
|
||||
@classmethod
|
||||
def __pre_deserialize__(cls, data):
|
||||
data = super().__pre_deserialize__(data)
|
||||
if 'message' not in data:
|
||||
data['message'] = None
|
||||
if 'failures' not in data:
|
||||
data['failures'] = None
|
||||
if "message" not in data:
|
||||
data["message"] = None
|
||||
if "failures" not in data:
|
||||
data["failures"] = None
|
||||
return data
|
||||
|
||||
|
||||
@@ -123,9 +128,7 @@ class NodeResult(BaseResult):
|
||||
@dataclass
|
||||
class RunResult(NodeResult):
|
||||
agate_table: Optional[agate.Table] = field(
|
||||
default=None, metadata={
|
||||
'serialize': lambda x: None, 'deserialize': lambda x: None
|
||||
}
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -169,7 +172,7 @@ def process_run_result(result: RunResult) -> RunResultOutput:
|
||||
execution_time=result.execution_time,
|
||||
message=result.message,
|
||||
adapter_response=result.adapter_response,
|
||||
failures=result.failures
|
||||
failures=result.failures,
|
||||
)
|
||||
|
||||
|
||||
@@ -192,7 +195,7 @@ class RunExecutionResult(
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('run-results', 4)
|
||||
@schema_version("run-results", 4)
|
||||
class RunResultsArtifact(ExecutionResult, ArtifactMixin):
|
||||
results: Sequence[RunResultOutput]
|
||||
args: Dict[str, Any] = field(default_factory=dict)
|
||||
@@ -210,12 +213,7 @@ class RunResultsArtifact(ExecutionResult, ArtifactMixin):
|
||||
dbt_schema_version=str(cls.dbt_schema_version),
|
||||
generated_at=generated_at,
|
||||
)
|
||||
return cls(
|
||||
metadata=meta,
|
||||
results=processed_results,
|
||||
elapsed_time=elapsed_time,
|
||||
args=args
|
||||
)
|
||||
return cls(metadata=meta, results=processed_results, elapsed_time=elapsed_time, args=args)
|
||||
|
||||
def write(self, path: str):
|
||||
write_json(path, self.to_dict(omit_none=False))
|
||||
@@ -228,15 +226,14 @@ class RunOperationResult(ExecutionResult):
|
||||
|
||||
@dataclass
|
||||
class RunOperationResultMetadata(BaseArtifactMetadata):
|
||||
dbt_schema_version: str = field(default_factory=lambda: str(
|
||||
RunOperationResultsArtifact.dbt_schema_version
|
||||
))
|
||||
dbt_schema_version: str = field(
|
||||
default_factory=lambda: str(RunOperationResultsArtifact.dbt_schema_version)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('run-operation-result', 1)
|
||||
@schema_version("run-operation-result", 1)
|
||||
class RunOperationResultsArtifact(RunOperationResult, ArtifactMixin):
|
||||
|
||||
@classmethod
|
||||
def from_success(
|
||||
cls,
|
||||
@@ -255,6 +252,7 @@ class RunOperationResultsArtifact(RunOperationResult, ArtifactMixin):
|
||||
success=success,
|
||||
)
|
||||
|
||||
|
||||
# due to issues with typing.Union collapsing subclasses, this can't subclass
|
||||
# PartialResult
|
||||
|
||||
@@ -273,7 +271,7 @@ class SourceFreshnessResult(NodeResult):
|
||||
|
||||
|
||||
class FreshnessErrorEnum(StrEnum):
|
||||
runtime_error = 'runtime error'
|
||||
runtime_error = "runtime error"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -306,14 +304,11 @@ class PartialSourceFreshnessResult(NodeResult):
|
||||
return False
|
||||
|
||||
|
||||
FreshnessNodeResult = Union[PartialSourceFreshnessResult,
|
||||
SourceFreshnessResult]
|
||||
FreshnessNodeResult = Union[PartialSourceFreshnessResult, SourceFreshnessResult]
|
||||
FreshnessNodeOutput = Union[SourceFreshnessRuntimeError, SourceFreshnessOutput]
|
||||
|
||||
|
||||
def process_freshness_result(
|
||||
result: FreshnessNodeResult
|
||||
) -> FreshnessNodeOutput:
|
||||
def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput:
|
||||
unique_id = result.node.unique_id
|
||||
if result.status == FreshnessStatus.RuntimeErr:
|
||||
return SourceFreshnessRuntimeError(
|
||||
@@ -325,16 +320,14 @@ def process_freshness_result(
|
||||
# we know that this must be a SourceFreshnessResult
|
||||
if not isinstance(result, SourceFreshnessResult):
|
||||
raise InternalException(
|
||||
'Got {} instead of a SourceFreshnessResult for a '
|
||||
'non-error result in freshness execution!'
|
||||
.format(type(result))
|
||||
"Got {} instead of a SourceFreshnessResult for a "
|
||||
"non-error result in freshness execution!".format(type(result))
|
||||
)
|
||||
# if we're here, we must have a non-None freshness threshold
|
||||
criteria = result.node.freshness
|
||||
if criteria is None:
|
||||
raise InternalException(
|
||||
'Somehow evaluated a freshness result for a source '
|
||||
'that has no freshness criteria!'
|
||||
"Somehow evaluated a freshness result for a source " "that has no freshness criteria!"
|
||||
)
|
||||
return SourceFreshnessOutput(
|
||||
unique_id=unique_id,
|
||||
@@ -353,9 +346,7 @@ def process_freshness_result(
|
||||
@dataclass
|
||||
class FreshnessMetadata(BaseArtifactMetadata):
|
||||
dbt_schema_version: str = field(
|
||||
default_factory=lambda: str(
|
||||
FreshnessExecutionResultArtifact.dbt_schema_version
|
||||
)
|
||||
default_factory=lambda: str(FreshnessExecutionResultArtifact.dbt_schema_version)
|
||||
)
|
||||
|
||||
|
||||
@@ -376,7 +367,7 @@ class FreshnessResult(ExecutionResult):
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('sources', 3)
|
||||
@schema_version("sources", 3)
|
||||
class FreshnessExecutionResultArtifact(
|
||||
ArtifactMixin,
|
||||
VersionedSchema,
|
||||
@@ -399,8 +390,7 @@ Primitive = Union[bool, str, float, None]
|
||||
PrimitiveDict = Dict[str, Primitive]
|
||||
|
||||
CatalogKey = NamedTuple(
|
||||
'CatalogKey',
|
||||
[('database', Optional[str]), ('schema', str), ('name', str)]
|
||||
"CatalogKey", [("database", Optional[str]), ("schema", str), ("name", str)]
|
||||
)
|
||||
|
||||
|
||||
@@ -469,13 +459,13 @@ class CatalogResults(dbtClassMixin):
|
||||
|
||||
def __post_serialize__(self, dct):
|
||||
dct = super().__post_serialize__(dct)
|
||||
if '_compile_results' in dct:
|
||||
del dct['_compile_results']
|
||||
if "_compile_results" in dct:
|
||||
del dct["_compile_results"]
|
||||
return dct
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('catalog', 1)
|
||||
@schema_version("catalog", 1)
|
||||
class CatalogArtifact(CatalogResults, ArtifactMixin):
|
||||
metadata: CatalogMetadata
|
||||
|
||||
@@ -486,8 +476,8 @@ class CatalogArtifact(CatalogResults, ArtifactMixin):
|
||||
nodes: Dict[str, CatalogTable],
|
||||
sources: Dict[str, CatalogTable],
|
||||
compile_results: Optional[Any],
|
||||
errors: Optional[List[str]]
|
||||
) -> 'CatalogArtifact':
|
||||
errors: Optional[List[str]],
|
||||
) -> "CatalogArtifact":
|
||||
meta = CatalogMetadata(generated_at=generated_at)
|
||||
return cls(
|
||||
metadata=meta,
|
||||
|
||||
@@ -8,7 +8,7 @@ from typing import List, Dict, Any, Union
|
||||
class SelectorDefinition(dbtClassMixin):
|
||||
name: str
|
||||
definition: Union[str, Dict[str, Any]]
|
||||
description: str = ''
|
||||
description: str = ""
|
||||
default: bool = False
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,9 @@ from dbt.dataclass_schema import dbtClassMixin
|
||||
|
||||
from dbt.contracts.graph.compiled import CompileResultNode
|
||||
from dbt.contracts.results import (
|
||||
RunResult, RunResultsArtifact, TimingInfo,
|
||||
RunResult,
|
||||
RunResultsArtifact,
|
||||
TimingInfo,
|
||||
ExecutionResult,
|
||||
RunExecutionResult,
|
||||
)
|
||||
@@ -35,7 +37,7 @@ class RemoteCompileResultMixin(RemoteResult):
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('remote-compile-result', 1)
|
||||
@schema_version("remote-compile-result", 1)
|
||||
class RemoteCompileResult(RemoteCompileResultMixin):
|
||||
generated_at: datetime = field(default_factory=datetime.utcnow)
|
||||
|
||||
@@ -45,7 +47,7 @@ class RemoteCompileResult(RemoteCompileResultMixin):
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('remote-execution-result', 1)
|
||||
@schema_version("remote-execution-result", 1)
|
||||
class RemoteExecutionResult(ExecutionResult, RemoteResult):
|
||||
results: Sequence[RunResult]
|
||||
args: Dict[str, Any] = field(default_factory=dict)
|
||||
@@ -65,7 +67,7 @@ class RemoteExecutionResult(ExecutionResult, RemoteResult):
|
||||
cls,
|
||||
base: RunExecutionResult,
|
||||
logs: List[LogMessage],
|
||||
) -> 'RemoteExecutionResult':
|
||||
) -> "RemoteExecutionResult":
|
||||
return cls(
|
||||
generated_at=base.generated_at,
|
||||
results=base.results,
|
||||
@@ -82,7 +84,7 @@ class ResultTable(dbtClassMixin):
|
||||
|
||||
|
||||
@dataclass
|
||||
@schema_version('remote-run-result', 1)
|
||||
@schema_version("remote-run-result", 1)
|
||||
class RemoteRunResult(RemoteCompileResultMixin):
|
||||
table: ResultTable
|
||||
generated_at: datetime = field(default_factory=datetime.utcnow)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user