Compare commits

..

1 Commits

Author SHA1 Message Date
Gerda Shank
845b95f3d0 Initial file creation of code documentation READMEs 2022-01-31 22:09:17 -05:00
5779 changed files with 90696 additions and 162026 deletions

View File

@@ -1,19 +1,13 @@
[bumpversion]
current_version = 1.7.0a1
parse = (?P<major>[\d]+) # major version number
\.(?P<minor>[\d]+) # minor version number
\.(?P<patch>[\d]+) # patch version number
(?P<prerelease> # optional pre-release - ex: a1, b2, rc25
(?P<prekind>a|b|rc) # pre-release type
(?P<num>[\d]+) # pre-release version number
current_version = 1.0.1
parse = (?P<major>\d+)
\.(?P<minor>\d+)
\.(?P<patch>\d+)
((?P<prekind>a|b|rc)
(?P<pre>\d+) # pre-release version num
)?
( # optional nightly release indicator
\.(?P<nightly>dev[0-9]+) # ex: .dev02142023
)? # expected matches: `1.15.0`, `1.5.0a11`, `1.5.0a1.dev123`, `1.5.0.dev123457`, expected failures: `1`, `1.5`, `1.5.2-a1`, `text1.5.0`
serialize =
{major}.{minor}.{patch}{prekind}{num}.{nightly}
{major}.{minor}.{patch}.{nightly}
{major}.{minor}.{patch}{prekind}{num}
serialize =
{major}.{minor}.{patch}{prekind}{pre}
{major}.{minor}.{patch}
commit = False
tag = False
@@ -21,27 +15,23 @@ tag = False
[bumpversion:part:prekind]
first_value = a
optional_value = final
values =
values =
a
b
rc
final
[bumpversion:part:num]
[bumpversion:part:pre]
first_value = 1
[bumpversion:part:nightly]
[bumpversion:file:setup.py]
[bumpversion:file:core/setup.py]
[bumpversion:file:core/dbt/version.py]
[bumpversion:file:core/scripts/create_adapter_plugins.py]
[bumpversion:file:plugins/postgres/setup.py]
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
[bumpversion:file:docker/Dockerfile]
[bumpversion:file:tests/adapter/setup.py]
[bumpversion:file:tests/adapter/dbt/tests/adapter/__version__.py]

View File

@@ -1,23 +0,0 @@
## Previous Releases
For information on prior major and minor releases, see their changelogs:
* [1.6](https://github.com/dbt-labs/dbt-core/blob/1.6.latest/CHANGELOG.md)
* [1.5](https://github.com/dbt-labs/dbt-core/blob/1.5.latest/CHANGELOG.md)
* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
* [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
* [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)
* [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md)
* [1.0](https://github.com/dbt-labs/dbt-core/blob/1.0.latest/CHANGELOG.md)
* [0.21](https://github.com/dbt-labs/dbt-core/blob/0.21.latest/CHANGELOG.md)
* [0.20](https://github.com/dbt-labs/dbt-core/blob/0.20.latest/CHANGELOG.md)
* [0.19](https://github.com/dbt-labs/dbt-core/blob/0.19.latest/CHANGELOG.md)
* [0.18](https://github.com/dbt-labs/dbt-core/blob/0.18.latest/CHANGELOG.md)
* [0.17](https://github.com/dbt-labs/dbt-core/blob/0.17.latest/CHANGELOG.md)
* [0.16](https://github.com/dbt-labs/dbt-core/blob/0.16.latest/CHANGELOG.md)
* [0.15](https://github.com/dbt-labs/dbt-core/blob/0.15.latest/CHANGELOG.md)
* [0.14](https://github.com/dbt-labs/dbt-core/blob/0.14.latest/CHANGELOG.md)
* [0.13](https://github.com/dbt-labs/dbt-core/blob/0.13.latest/CHANGELOG.md)
* [0.12](https://github.com/dbt-labs/dbt-core/blob/0.12.latest/CHANGELOG.md)
* [0.11 and earlier](https://github.com/dbt-labs/dbt-core/blob/0.11.latest/CHANGELOG.md)

View File

@@ -1,53 +0,0 @@
# CHANGELOG Automation
We use [changie](https://changie.dev/) to automate `CHANGELOG` generation. For installation and format/command specifics, see the documentation.
### Quick Tour
- All new change entries get generated under `/.changes/unreleased` as a yaml file
- `header.tpl.md` contains the contents of the entire CHANGELOG file
- `0.0.0.md` contains the contents of the footer for the entire CHANGELOG file. changie looks to be in the process of supporting a footer file the same as it supports a header file. Switch to that when available. For now, the 0.0.0 in the file name forces it to the bottom of the changelog no matter what version we are releasing.
- `.changie.yaml` contains the fields in a change, the format of a single change, as well as the format of the Contributors section for each version.
### Workflow
#### Daily workflow
Almost every code change we make associated with an issue will require a `CHANGELOG` entry. After you have created the PR in GitHub, run `changie new` and follow the command prompts to generate a yaml file with your change details. This only needs to be done once per PR.
The `changie new` command will ensure correct file format and file name. There is a one to one mapping of issues to changes. Multiple issues cannot be lumped into a single entry. If you make a mistake, the yaml file may be directly modified and saved as long as the format is preserved.
Note: If your PR has been cleared by the Core Team as not needing a changelog entry, the `Skip Changelog` label may be put on the PR to bypass the GitHub action that blacks PRs from being merged when they are missing a `CHANGELOG` entry.
#### Prerelease Workflow
These commands batch up changes in `/.changes/unreleased` to be included in this prerelease and move those files to a directory named for the release version. The `--move-dir` will be created if it does not exist and is created in `/.changes`.
```
changie batch <version> --move-dir '<version>' --prerelease 'rc1'
changie merge
```
Example
```
changie batch 1.0.5 --move-dir '1.0.5' --prerelease 'rc1'
changie merge
```
#### Final Release Workflow
These commands batch up changes in `/.changes/unreleased` as well as `/.changes/<version>` to be included in this final release and delete all prereleases. This rolls all prereleases up into a single final release. All `yaml` files in `/unreleased` and `<version>` will be deleted at this point.
```
changie batch <version> --include '<version>' --remove-prereleases
changie merge
```
Example
```
changie batch 1.0.5 --include '1.0.5' --remove-prereleases
changie merge
```
### A Note on Manual Edits & Gotchas
- Changie generates markdown files in the `.changes` directory that are parsed together with the `changie merge` command. Every time `changie merge` is run, it regenerates the entire file. For this reason, any changes made directly to `CHANGELOG.md` will be overwritten on the next run of `changie merge`.
- If changes need to be made to the `CHANGELOG.md`, make the changes to the relevant `<version>.md` file located in the `/.changes` directory. You will then run `changie merge` to regenerate the `CHANGELOG.MD`.
- Do not run `changie batch` again on released versions. Our final release workflow deletes all of the yaml files associated with individual changes. If for some reason modifications to the `CHANGELOG.md` are required after we've generated the final release `CHANGELOG.md`, the modifications need to be done manually to the `<version>.md` file in the `/.changes` directory.
- changie can modify, create and delete files depending on the command you run. This is expected. Be sure to commit everything that has been modified and deleted.

View File

@@ -1,6 +0,0 @@
# dbt Core Changelog
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)

View File

@@ -1,6 +0,0 @@
kind: Dependencies
body: Pin click<9 + sqlparse<0.5
time: 2023-07-19T12:37:43.716495+02:00
custom:
Author: jtcohen6
PR: "8146"

View File

@@ -1,6 +0,0 @@
kind: Docs
body: Fix for column tests not rendering on quoted columns
time: 2023-05-31T11:54:19.687363-04:00
custom:
Author: drewbanin
Issue: "201"

View File

@@ -1,6 +0,0 @@
kind: Docs
body: Remove static SQL codeblock for metrics
time: 2023-07-18T19:24:22.155323+02:00
custom:
Author: marcodamore
Issue: "436"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Enable converting deprecation warnings to errors
time: 2023-07-18T12:55:18.03914-04:00
custom:
Author: michelleark
Issue: "8130"

View File

@@ -1,140 +0,0 @@
changesDir: .changes
unreleasedDir: unreleased
headerPath: header.tpl.md
versionHeaderPath: ""
changelogPath: CHANGELOG.md
versionExt: md
envPrefix: "CHANGIE_"
versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}'
kindFormat: '### {{.Kind}}'
changeFormat: |-
{{- $IssueList := list }}
{{- $changes := splitList " " $.Custom.Issue }}
{{- range $issueNbr := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
{{- $IssueList = append $IssueList $changeLink }}
{{- end -}}
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
kinds:
- label: Breaking Changes
- label: Features
- label: Fixes
- label: Docs
changeFormat: |-
{{- $IssueList := list }}
{{- $changes := splitList " " $.Custom.Issue }}
{{- range $issueNbr := $changes }}
{{- $changeLink := "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $issueNbr }}
{{- $IssueList = append $IssueList $changeLink }}
{{- end -}}
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
- label: Under the Hood
- label: Dependencies
changeFormat: |-
{{- $PRList := list }}
{{- $changes := splitList " " $.Custom.PR }}
{{- range $pullrequest := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
{{- $PRList = append $PRList $changeLink }}
{{- end -}}
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
skipGlobalChoices: true
additionalChoices:
- key: Author
label: GitHub Username(s) (separated by a single space if multiple)
type: string
minLength: 3
- key: PR
label: GitHub Pull Request Number (separated by a single space if multiple)
type: string
minLength: 1
- label: Security
changeFormat: |-
{{- $PRList := list }}
{{- $changes := splitList " " $.Custom.PR }}
{{- range $pullrequest := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
{{- $PRList = append $PRList $changeLink }}
{{- end -}}
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
skipGlobalChoices: true
additionalChoices:
- key: Author
label: GitHub Username(s) (separated by a single space if multiple)
type: string
minLength: 3
- key: PR
label: GitHub Pull Request Number (separated by a single space if multiple)
type: string
minLength: 1
newlines:
afterChangelogHeader: 1
afterKind: 1
afterChangelogVersion: 1
beforeKind: 1
endOfVersion: 1
custom:
- key: Author
label: GitHub Username(s) (separated by a single space if multiple)
type: string
minLength: 3
- key: Issue
label: GitHub Issue Number (separated by a single space if multiple)
type: string
minLength: 1
footerFormat: |
{{- $contributorDict := dict }}
{{- /* ensure all names in this list are all lowercase for later matching purposes */}}
{{- $core_team := splitList " " .Env.CORE_TEAM }}
{{- /* ensure we always skip snyk and dependabot in addition to the core team */}}
{{- $maintainers := list "dependabot[bot]" "snyk-bot"}}
{{- range $team_member := $core_team }}
{{- $team_member_lower := lower $team_member }}
{{- $maintainers = append $maintainers $team_member_lower }}
{{- end }}
{{- range $change := .Changes }}
{{- $authorList := splitList " " $change.Custom.Author }}
{{- /* loop through all authors for a single changelog */}}
{{- range $author := $authorList }}
{{- $authorLower := lower $author }}
{{- /* we only want to include non-core team contributors */}}
{{- if not (has $authorLower $maintainers)}}
{{- $changeList := splitList " " $change.Custom.Author }}
{{- $IssueList := list }}
{{- $changeLink := $change.Kind }}
{{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
{{- $changes := splitList " " $change.Custom.PR }}
{{- range $issueNbr := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $issueNbr }}
{{- $IssueList = append $IssueList $changeLink }}
{{- end -}}
{{- else }}
{{- $changes := splitList " " $change.Custom.Issue }}
{{- range $issueNbr := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
{{- $IssueList = append $IssueList $changeLink }}
{{- end -}}
{{- end }}
{{- /* check if this contributor has other changes associated with them already */}}
{{- if hasKey $contributorDict $author }}
{{- $contributionList := get $contributorDict $author }}
{{- $contributionList = concat $contributionList $IssueList }}
{{- $contributorDict := set $contributorDict $author $contributionList }}
{{- else }}
{{- $contributionList := $IssueList }}
{{- $contributorDict := set $contributorDict $author $contributionList }}
{{- end }}
{{- end}}
{{- end}}
{{- end }}
{{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}}
{{- if $contributorDict}}
### Contributors
{{- range $k,$v := $contributorDict }}
- [@{{$k}}](https://github.com/{{$k}}) ({{ range $index, $element := $v }}{{if $index}}, {{end}}{{$element}}{{end}})
{{- end }}
{{- end }}

12
.flake8
View File

@@ -1,12 +0,0 @@
[flake8]
select =
E
W
F
ignore =
W503 # makes Flake8 work like black
W504
E203 # makes Flake8 work like black
E741
E501 # long line checking is done in black
exclude = test/

View File

@@ -1,2 +0,0 @@
# Reformatting dbt-core via black, flake8, mypy, and assorted pre-commit hooks.
43e3fc22c4eae4d3d901faba05e33c40f1f1dc5a

6
.gitattributes vendored
View File

@@ -1,6 +0,0 @@
core/dbt/include/index.html binary
tests/functional/artifacts/data/state/*/manifest.json binary
core/dbt/docs/build/html/searchindex.js binary
core/dbt/docs/build/html/index.html binary
performance/runner/Cargo.lock binary
core/dbt/events/types_pb2.py binary

63
.github/CODEOWNERS vendored
View File

@@ -1,5 +1,5 @@
# This file contains the code owners for the dbt-core repo.
# PRs will be automatically assigned for review to the associated
# PRs will be automatically assigned for review to the associated
# team(s) or person(s) that touches any files that are mapped to them.
#
# A statement takes precedence over the statements above it so more general
@@ -9,52 +9,35 @@
# Consult GitHub documentation for formatting guidelines:
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#example-of-a-codeowners-file
# As a default for areas with no assignment,
# As a default for areas with no assignment,
# the core team as a whole will be assigned
* @dbt-labs/core-team
* @dbt-labs/core
### OSS Tooling Guild
# Changes to GitHub configurations including Actions
/.github/ @leahwicz
/.github/ @dbt-labs/guild-oss-tooling
.bumpversion.cfg @dbt-labs/guild-oss-tooling
# Language core modules
/core/dbt/config/ @dbt-labs/core-language
/core/dbt/context/ @dbt-labs/core-language
/core/dbt/contracts/ @dbt-labs/core-language
/core/dbt/deps/ @dbt-labs/core-language
/core/dbt/parser/ @dbt-labs/core-language
.changie.yaml @dbt-labs/guild-oss-tooling
# Execution core modules
/core/dbt/events/ @dbt-labs/core-execution @dbt-labs/core-language # eventually remove language but they have knowledge here now
/core/dbt/graph/ @dbt-labs/core-execution
/core/dbt/task/ @dbt-labs/core-execution
pre-commit-config.yaml @dbt-labs/guild-oss-tooling
pytest.ini @dbt-labs/guild-oss-tooling
tox.ini @dbt-labs/guild-oss-tooling
# Adapter interface, scaffold, Postgres plugin
/core/dbt/adapters @dbt-labs/core-adapters
/core/scripts/create_adapter_plugin.py @dbt-labs/core-adapters
/plugins/ @dbt-labs/core-adapters
pyproject.toml @dbt-labs/guild-oss-tooling
requirements.txt @dbt-labs/guild-oss-tooling
dev_requirements.txt @dbt-labs/guild-oss-tooling
/core/setup.py @dbt-labs/guild-oss-tooling
/core/MANIFEST.in @dbt-labs/guild-oss-tooling
### ADAPTERS
# Adapter interface ("base" + "sql" adapter defaults, cache)
/core/dbt/adapters @dbt-labs/core-adapters
# Global project (default macros + materializations), starter project
/core/dbt/include @dbt-labs/core-adapters
# Postgres plugin
/plugins/ @dbt-labs/core-adapters
/plugins/postgres/setup.py @dbt-labs/core-adapters @dbt-labs/guild-oss-tooling
# Functional tests for adapter plugins
/tests/adapter @dbt-labs/core-adapters
### TESTS
# Overlapping ownership for vast majority of unit + functional tests
# Global project: default macros, including generic tests + materializations
/core/dbt/include/global_project @dbt-labs/core-execution @dbt-labs/core-adapters
# Perf regression testing framework
# This excludes the test project files itself since those aren't specific
# This excludes the test project files itself since those aren't specific
# framework changes (excluded by not setting an owner next to it- no owner)
/performance @nathaniel-may
/performance @nathaniel-may
/performance/projects
### ARTIFACTS
/schemas/dbt @dbt-labs/cloud-artifacts

View File

@@ -9,33 +9,23 @@ body:
Thanks for taking the time to fill out this bug report!
- type: checkboxes
attributes:
label: Is this a new bug in dbt-core?
description: >
In other words, is this an error, flaw, failure or fault in our software?
If this is a bug that broke existing functionality that used to work, please open a regression issue.
If this is a bug in an adapter plugin, please open an issue in the adapter's repository.
If this is a bug experienced while using dbt Cloud, please report to [support](mailto:support@getdbt.com).
If this is a request for help or troubleshooting code in your own dbt project, please join our [dbt Community Slack](https://www.getdbt.com/community/join-the-community/) or open a [Discussion question](https://github.com/dbt-labs/docs.getdbt.com/discussions).
Please search to see if an issue already exists for the bug you encountered.
label: Is there an existing issue for this?
description: Please search to see if an issue already exists for the bug you encountered.
options:
- label: I believe this is a new bug in dbt-core
required: true
- label: I have searched the existing issues, and I could not find an existing issue for this bug
- label: I have searched the existing issues
required: true
- type: textarea
attributes:
label: Current Behavior
description: A concise description of what you're experiencing.
validations:
required: true
required: false
- type: textarea
attributes:
label: Expected Behavior
description: A concise description of what you expected to happen.
validations:
required: true
required: false
- type: textarea
attributes:
label: Steps To Reproduce
@@ -46,7 +36,7 @@ body:
3. Run '...'
4. See error...
validations:
required: true
required: false
- type: textarea
id: logs
attributes:
@@ -62,8 +52,8 @@ body:
description: |
examples:
- **OS**: Ubuntu 20.04
- **Python**: 3.9.12 (`python3 --version`)
- **dbt-core**: 1.1.1 (`dbt --version`)
- **Python**: 3.7.2 (`python --version`)
- **dbt**: 0.21.0 (`dbt --version`)
value: |
- OS:
- Python:
@@ -74,15 +64,13 @@ body:
- type: dropdown
id: database
attributes:
label: Which database adapter are you using with dbt?
description: If the bug is specific to the database or adapter, please open the issue in that adapter's repository instead
label: What database are you using dbt with?
multiple: true
options:
- postgres
- redshift
- snowflake
- bigquery
- spark
- other (mention it in "Additional Context")
validations:
required: false

View File

@@ -1,14 +1,4 @@
blank_issues_enabled: false
contact_links:
- name: Ask the community for help
url: https://github.com/dbt-labs/docs.getdbt.com/discussions
about: Need help troubleshooting? Check out our guide on how to ask
- name: Contact dbt Cloud support
url: mailto:support@getdbt.com
about: Are you using dbt Cloud? Contact our support team for help!
- name: Participate in Discussions
url: https://github.com/dbt-labs/dbt-core/discussions
about: Do you have a Big Idea for dbt? Read open discussions, or start a new one
- name: Create an issue for dbt-redshift
url: https://github.com/dbt-labs/dbt-redshift/issues/new/choose
about: Report a bug or request a feature for dbt-redshift
@@ -18,6 +8,9 @@ contact_links:
- name: Create an issue for dbt-snowflake
url: https://github.com/dbt-labs/dbt-snowflake/issues/new/choose
about: Report a bug or request a feature for dbt-snowflake
- name: Create an issue for dbt-spark
url: https://github.com/dbt-labs/dbt-spark/issues/new/choose
about: Report a bug or request a feature for dbt-spark
- name: Ask a question or get support
url: https://docs.getdbt.com/docs/guides/getting-help
about: Ask a question or request support
- name: Questions on Stack Overflow
url: https://stackoverflow.com/questions/tagged/dbt
about: Look at questions/answers at Stack Overflow

View File

@@ -1,32 +1,22 @@
name: ✨ Feature
description: Propose a straightforward extension of dbt functionality
description: Suggest an idea for dbt
title: "[Feature] <title>"
labels: ["enhancement", "triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this feature request!
Thanks for taking the time to fill out this feature requests!
- type: checkboxes
attributes:
label: Is this your first time submitting a feature request?
description: >
We want to make sure that features are distinct and discoverable,
so that other members of the community can find them and offer their thoughts.
Issues are the right place to request straightforward extensions of existing dbt functionality.
For "big ideas" about future capabilities of dbt, we ask that you open a
[discussion](https://github.com/dbt-labs/dbt-core/discussions) in the "Ideas" category instead.
label: Is there an existing feature request for this?
description: Please search to see if an issue already exists for the feature you would like.
options:
- label: I have read the [expectations for open source contributors](https://docs.getdbt.com/docs/contributing/oss-expectations)
required: true
- label: I have searched the existing issues, and I could not find an existing issue for this feature
required: true
- label: I am requesting a straightforward extension of existing dbt functionality, rather than a Big Idea better suited to a discussion
- label: I have searched the existing issues
required: true
- type: textarea
attributes:
label: Describe the feature
label: Describe the Feature
description: A clear and concise description of what you want to happen.
validations:
required: true

View File

@@ -1,93 +0,0 @@
name: ☣️ Regression
description: Report a regression you've observed in a newer version of dbt
title: "[Regression] <title>"
labels: ["bug", "regression", "triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this regression report!
- type: checkboxes
attributes:
label: Is this a regression in a recent version of dbt-core?
description: >
A regression is when documented functionality works as expected in an older version of dbt-core,
and no longer works after upgrading to a newer version of dbt-core
options:
- label: I believe this is a regression in dbt-core functionality
required: true
- label: I have searched the existing issues, and I could not find an existing issue for this regression
required: true
- type: textarea
attributes:
label: Current Behavior
description: A concise description of what you're experiencing.
validations:
required: true
- type: textarea
attributes:
label: Expected/Previous Behavior
description: A concise description of what you expected to happen.
validations:
required: true
- type: textarea
attributes:
label: Steps To Reproduce
description: Steps to reproduce the behavior.
placeholder: |
1. In this environment...
2. With this config...
3. Run '...'
4. See error...
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
If applicable, log output to help explain your problem.
render: shell
validations:
required: false
- type: textarea
attributes:
label: Environment
description: |
examples:
- **OS**: Ubuntu 20.04
- **Python**: 3.9.12 (`python3 --version`)
- **dbt-core (working version)**: 1.1.1 (`dbt --version`)
- **dbt-core (regression version)**: 1.2.0 (`dbt --version`)
value: |
- OS:
- Python:
- dbt (working version):
- dbt (regression version):
render: markdown
validations:
required: true
- type: dropdown
id: database
attributes:
label: Which database adapter are you using with dbt?
description: If the regression is specific to the database or adapter, please open the issue in that adapter's repository instead
multiple: true
options:
- postgres
- redshift
- snowflake
- bigquery
- spark
- other (mention it in "Additional Context")
validations:
required: false
- type: textarea
attributes:
label: Additional Context
description: |
Links? References? Anything that will give us more context about the issue you are encountering!
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
validations:
required: false

216
.github/_README.md vendored
View File

@@ -1,216 +0,0 @@
<!-- GitHub will publish this readme on the main repo page if the name is `README.md` so we've added the leading underscore to prevent this -->
<!-- Do not rename this file `README.md` -->
<!-- See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-readmes -->
## What are GitHub Actions?
GitHub Actions are used for many different purposes. We use them to run tests in CI, validate PRs are in an expected state, and automate processes.
- [Overview of GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions)
- [What's a workflow?](https://docs.github.com/en/actions/using-workflows/about-workflows)
- [GitHub Actions guides](https://docs.github.com/en/actions/guides)
___
## Where do actions and workflows live
We try to maintain actions that are shared across repositories in a single place so that necesary changes can be made in a single place.
[dbt-labs/actions](https://github.com/dbt-labs/actions/) is the central repository of actions and workflows we use across repositories.
GitHub Actions also live locally within a repository. The workflows can be found at `.github/workflows` from the root of the repository. These should be specific to that code base.
Note: We are actively moving actions into the central Action repository so there is currently some duplication across repositories.
___
## Basics of Using Actions
### Viewing Output
- View the detailed action output for your PR in the **Checks** tab of the PR. This only shows the most recent run. You can also view high level **Checks** output at the bottom on the PR.
- View _all_ action output for a repository from the [**Actions**](https://github.com/dbt-labs/dbt-core/actions) tab. Workflow results last 1 year. Artifacts last 90 days, unless specified otherwise in individual workflows.
This view often shows what seem like duplicates of the same workflow. This occurs when files are renamed but the workflow name has not changed. These are in fact _not_ duplicates.
You can see the branch the workflow runs from in this view. It is listed in the table between the workflow name and the time/duration of the run. When blank, the workflow is running in the context of the `main` branch.
### How to view what workflow file is being referenced from a run
- When viewing the output of a specific workflow run, click the 3 dots at the top right of the display. There will be an option to `View workflow file`.
### How to manually run a workflow
- If a workflow has the `on: workflow_dispatch` trigger, it can be manually triggered
- From the [**Actions**](https://github.com/dbt-labs/dbt-core/actions) tab, find the workflow you want to run, select it and fill in any inputs requied. That's it!
### How to re-run jobs
- Some actions cannot be rerun in the GitHub UI. Namely the snyk checks and the cla check. Snyk checks are rerun by closing and reopening the PR. You can retrigger the cla check by commenting on the PR with `@cla-bot check`
___
## General Standards
### Permissions
- By default, workflows have read permissions in the repository for the contents scope only when no permissions are explicitly set.
- It is best practice to always define the permissions explicitly. This will allow actions to continue to work when the default permissions on the repository are changed. It also allows explicit grants of the least permissions possible.
- There are a lot of permissions available. [Read up on them](https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs) if you're unsure what to use.
```yaml
permissions:
contents: read
pull-requests: write
```
### Secrets
- When to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) vs the [GITHUB_TOKEN](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) generated for the action?
The `GITHUB_TOKEN` is used by default. In most cases it is sufficient for what you need.
If you expect the workflow to result in a commit to that should retrigger workflows, you will need to use a Personal Access Token for the bot to commit the file. When using the GITHUB_TOKEN, the resulting commit will not trigger another GitHub Actions Workflow run. This is due to limitations set by GitHub. See [the docs](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) for a more detailed explanation.
For example, we must use a PAT in our workflow to commit a new changelog yaml file for bot PRs. Once the file has been committed to the branch, it should retrigger the check to validate that a changelog exists on the PR. Otherwise, it would stay in a failed state since the check would never retrigger.
### Triggers
You can configure your workflows to run when specific activity on GitHub happens, at a scheduled time, or when an event outside of GitHub occurs. Read more details in the [GitHub docs](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows).
These triggers are under the `on` key of the workflow and more than one can be listed.
```yaml
on:
push:
branches:
- "main"
- "*.latest"
- "releases/*"
pull_request:
# catch when the PR is opened with the label or when the label is added
types: [opened, labeled]
workflow_dispatch:
```
Some triggers of note that we use:
- `push` - Runs your workflow when you push a commit or tag.
- `pull_request` - Runs your workflow when activity on a pull request in the workflow's repository occurs. Takes in a list of activity types (opened, labeled, etc) if appropriate.
- `pull_request_target` - Same as `pull_request` but runs in the context of the PR target branch.
- `workflow_call` - used with reusable workflows. Triggered by another workflow calling it.
- `workflow_dispatch` - Gives the ability to manually trigger a workflow from the GitHub API, GitHub CLI, or GitHub browser interface.
### Basic Formatting
- Add a description of what your workflow does at the top in this format
```
# **what?**
# Describe what the action does.
# **why?**
# Why does this action exist?
# **when?**
# How/when will it be triggered?
```
- Leave blank lines between steps and jobs
```yaml
jobs:
dependency_changelog:
runs-on: ubuntu-latest
steps:
- name: Get File Name Timestamp
id: filename_time
uses: nanzm/get-time-action@v1.1
with:
format: 'YYYYMMDD-HHmmss'
- name: Get File Content Timestamp
id: file_content_time
uses: nanzm/get-time-action@v1.1
with:
format: 'YYYY-MM-DDTHH:mm:ss.000000-05:00'
- name: Generate Filepath
id: fp
run: |
FILEPATH=.changes/unreleased/Dependencies-${{ steps.filename_time.outputs.time }}.yaml
echo "FILEPATH=$FILEPATH" >> $GITHUB_OUTPUT
```
- Print out all variables you will reference as the first step of a job. This allows for easier debugging. The first job should log all inputs. Subsequent jobs should reference outputs of other jobs, if present.
When possible, generate variables at the top of your workflow in a single place to reference later. This is not always strictly possible since you may generate a value to be used later mid-workflow.
Be sure to use quotes around these logs so special characters are not interpreted.
```yaml
job1:
- name: "[DEBUG] Print Variables"
run: |
echo "all variables defined as inputs"
echo "The last commit sha in the release: ${{ inputs.sha }}"
echo "The release version number: ${{ inputs.version_number }}"
echo "The changelog_path: ${{ inputs.changelog_path }}"
echo "The build_script_path: ${{ inputs.build_script_path }}"
echo "The s3_bucket_name: ${{ inputs.s3_bucket_name }}"
echo "The package_test_command: ${{ inputs.package_test_command }}"
# collect all the variables that need to be used in subsequent jobs
- name: Set Variables
id: variables
run: |
echo "important_path='performance/runner/Cargo.toml'" >> $GITHUB_OUTPUT
echo "release_id=${{github.event.inputs.release_id}}" >> $GITHUB_OUTPUT
echo "open_prs=${{github.event.inputs.open_prs}}" >> $GITHUB_OUTPUT
job2:
needs: [job1]
- name: "[DEBUG] Print Variables"
run: |
echo "all variables defined in job1 > Set Variables > outputs"
echo "important_path: ${{ needs.job1.outputs.important_path }}"
echo "release_id: ${{ needs.job1.outputs.release_id }}"
echo "open_prs: ${{ needs.job1.outputs.open_prs }}"
```
- When it's not obvious what something does, add a comment!
___
## Tips
### Context
- The [GitHub CLI](https://cli.github.com/) is available in the default runners
- Actions run in your context. ie, using an action from the marketplace that uses the GITHUB_TOKEN uses the GITHUB_TOKEN generated by your workflow run.
### Actions from the Marketplace
- Dont use external actions for things that can easily be accomplished manually.
- Always read through what an external action does before using it! Often an action in the GitHub Actions Marketplace can be replaced with a few lines in bash. This is much more maintainable (and wont change under us) and clear as to whats actually happening. It also prevents any
- Pin actions _we don't control_ to tags.
### Connecting to AWS
- Authenticate with the aws managed workflow
```yaml
- name: Configure AWS credentials from Test account
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: us-east-1
```
- Then access with the aws command that comes installed on the action runner machines
```yaml
- name: Copy Artifacts from S3 via CLI
run: aws s3 cp ${{ env.s3_bucket }} . --recursive
```
### Testing
- Depending on what your action does, you may be able to use [`act`](https://github.com/nektos/act) to test the action locally. Some features of GitHub Actions do not work with `act`, among those are reusable workflows. If you can't use `act`, you'll have to push your changes up before being able to test. This can be slow.

View File

@@ -1,14 +0,0 @@
FROM python:3-slim AS builder
ADD . /app
WORKDIR /app
# We are installing a dependency here directly into our app source dir
RUN pip install --target=/app requests packaging
# A distroless container image with Python and some basics like SSL certificates
# https://github.com/GoogleContainerTools/distroless
FROM gcr.io/distroless/python3-debian10
COPY --from=builder /app /app
WORKDIR /app
ENV PYTHONPATH /app
CMD ["/app/main.py"]

View File

@@ -1,50 +0,0 @@
# Github package 'latest' tag wrangler for containers
## Usage
Plug in the necessary inputs to determine if the container being built should be tagged 'latest; at the package level, for example `dbt-redshift:latest`.
## Inputs
| Input | Description |
| - | - |
| `package` | Name of the GH package to check against |
| `new_version` | Semver of new container |
| `gh_token` | GH token with package read scope|
| `halt_on_missing` | Return non-zero exit code if requested package does not exist. (defaults to false)|
## Outputs
| Output | Description |
| - | - |
| `latest` | Wether or not the new container should be tagged 'latest'|
| `minor_latest` | Wether or not the new container should be tagged major.minor.latest |
## Example workflow
```yaml
name: Ship it!
on:
workflow_dispatch:
inputs:
package:
description: The package to publish
required: true
version_number:
description: The version number
required: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Wrangle latest tag
id: is_latest
uses: ./.github/actions/latest-wrangler
with:
package: ${{ github.event.inputs.package }}
new_version: ${{ github.event.inputs.new_version }}
gh_token: ${{ secrets.GITHUB_TOKEN }}
- name: Print the results
run: |
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
echo "Is it minor.latest? Survey says: ${{ steps.is_latest.outputs.minor_latest }} !"
```

View File

@@ -1,20 +0,0 @@
name: "Github package 'latest' tag wrangler for containers"
description: "Determines wether or not a given dbt container should be given a bare 'latest' tag (I.E. dbt-core:latest)"
inputs:
package_name:
description: "Package to check (I.E. dbt-core, dbt-redshift, etc)"
required: true
new_version:
description: "Semver of the container being built (I.E. 1.0.4)"
required: true
gh_token:
description: "Auth token for github (must have view packages scope)"
required: true
outputs:
latest:
description: "Wether or not built container should be tagged latest (bool)"
minor_latest:
description: "Wether or not built container should be tagged minor.latest (bool)"
runs:
using: "docker"
image: "Dockerfile"

View File

@@ -1,26 +0,0 @@
name: Ship it!
on:
workflow_dispatch:
inputs:
package:
description: The package to publish
required: true
version_number:
description: The version number
required: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Wrangle latest tag
id: is_latest
uses: ./.github/actions/latest-wrangler
with:
package: ${{ github.event.inputs.package }}
new_version: ${{ github.event.inputs.new_version }}
gh_token: ${{ secrets.GITHUB_TOKEN }}
- name: Print the results
run: |
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"

View File

@@ -1,6 +0,0 @@
{
"inputs": {
"version_number": "1.0.1",
"package": "dbt-redshift"
}
}

View File

@@ -1,98 +0,0 @@
import os
import sys
import requests
from distutils.util import strtobool
from typing import Union
from packaging.version import parse, Version
if __name__ == "__main__":
# get inputs
package = os.environ["INPUT_PACKAGE"]
new_version = parse(os.environ["INPUT_NEW_VERSION"])
gh_token = os.environ["INPUT_GH_TOKEN"]
halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
# get package metadata from github
package_request = requests.get(
f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
auth=("", gh_token),
)
package_meta = package_request.json()
# Log info if we don't get a 200
if package_request.status_code != 200:
print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
# Make an early exit if there is no matching package in github
if package_request.status_code == 404:
if halt_on_missing:
sys.exit(1)
# everything is the latest if the package doesn't exist
github_output = os.environ.get("GITHUB_OUTPUT")
with open(github_output, "at", encoding="utf-8") as gh_output:
gh_output.write("latest=True")
gh_output.write("minor_latest=True")
sys.exit(0)
# TODO: verify package meta is "correct"
# https://github.com/dbt-labs/dbt-core/issues/4640
# map versions and tags
version_tag_map = {
version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
}
# is pre-release
pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
# semver of current latest
for version, tags in version_tag_map.items():
if "latest" in tags:
# N.B. This seems counterintuitive, but we expect any version tagged
# 'latest' to have exactly three associated tags:
# latest, major.minor.latest, and major.minor.patch.
# Subtracting everything that contains the string 'latest' gets us
# the major.minor.patch which is what's needed for comparison.
current_latest = parse([tag for tag in tags if "latest" not in tag][0])
else:
current_latest = False
# semver of current_minor_latest
for version, tags in version_tag_map.items():
if f"{new_version.major}.{new_version.minor}.latest" in tags:
# Similar to above, only now we expect exactly two tags:
# major.minor.patch and major.minor.latest
current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
else:
current_minor_latest = False
def is_latest(
pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
) -> bool:
"""Determine if a given contaier should be tagged 'latest' based on:
- it's pre-release status
- it's version
- the version of a previously identified container tagged 'latest'
:param pre_rel: Wether or not the version of the new container is a pre-release
:param new_version: The version of the new container
:param remote_latest: The version of the previously identified container that's
already tagged latest or False
"""
# is a pre-release = not latest
if pre_rel:
return False
# + no latest tag found = is latest
if not remote_latest:
return True
# + if remote version is lower than current = is latest, else not latest
return True if remote_latest <= new_version else False
latest = is_latest(pre_rel, new_version, current_latest)
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
github_output = os.environ.get("GITHUB_OUTPUT")
with open(github_output, "at", encoding="utf-8") as gh_output:
gh_output.write(f"latest={latest}")
gh_output.write(f"minor_latest={minor_latest}")

View File

@@ -1,35 +1,21 @@
resolves #
[docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose) dbt-labs/docs.getdbt.com/#
resolves #
<!---
Include the number of the issue addressed by this PR above if applicable.
PRs for code changes without an associated issue *will not be merged*.
See CONTRIBUTING.md for more information.
Include the number of the docs issue that was opened for this PR. If
this change has no user-facing implications, "N/A" suffices instead. New
docs tickets can be created by clicking the link above or by going to
https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose.
-->
### Problem
### Description
<!---
Describe the problem this PR is solving. What is the application state
before this PR is merged?
-->
### Solution
<!---
Describe the way this PR solves the above problem. Add as much detail as you
can to help reviewers understand your changes. Include any alternatives and
tradeoffs you considered.
Describe the Pull Request here. Add any references and info to help reviewers
understand your changes. Include any tradeoffs you considered.
-->
### Checklist
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] This PR has no interface changes (e.g. macros, cli, logs, json artifacts, config files, adapter interface, etc) or this PR has already received feedback and approval from Product or DX
- [ ] I have updated the `CHANGELOG.md` and added information about my change

View File

@@ -0,0 +1,95 @@
module.exports = ({ context }) => {
const defaultPythonVersion = "3.8";
const supportedPythonVersions = ["3.7", "3.8", "3.9"];
const supportedAdapters = ["postgres"];
// if PR, generate matrix based on files changed and PR labels
if (context.eventName.includes("pull_request")) {
// `changes` is a list of adapter names that have related
// file changes in the PR
// ex: ['postgres', 'snowflake']
const changes = JSON.parse(process.env.CHANGES);
const labels = context.payload.pull_request.labels.map(({ name }) => name);
console.log("labels", labels);
console.log("changes", changes);
const testAllLabel = labels.includes("test all");
const include = [];
for (const adapter of supportedAdapters) {
if (
changes.includes(adapter) ||
testAllLabel ||
labels.includes(`test ${adapter}`)
) {
for (const pythonVersion of supportedPythonVersions) {
if (
pythonVersion === defaultPythonVersion ||
labels.includes(`test python${pythonVersion}`) ||
testAllLabel
) {
// always run tests on ubuntu by default
include.push({
os: "ubuntu-latest",
adapter,
"python-version": pythonVersion,
});
if (labels.includes("test windows") || testAllLabel) {
include.push({
os: "windows-latest",
adapter,
"python-version": pythonVersion,
});
}
if (labels.includes("test macos") || testAllLabel) {
include.push({
os: "macos-latest",
adapter,
"python-version": pythonVersion,
});
}
}
}
}
}
console.log("matrix", { include });
return {
include,
};
}
// if not PR, generate matrix of python version, adapter, and operating
// system to run integration tests on
const include = [];
// run for all adapters and python versions on ubuntu
for (const adapter of supportedAdapters) {
for (const pythonVersion of supportedPythonVersions) {
include.push({
os: 'ubuntu-latest',
adapter: adapter,
"python-version": pythonVersion,
});
}
}
// additionally include runs for all adapters, on macos and windows,
// but only for the default python version
for (const adapter of supportedAdapters) {
for (const operatingSystem of ["windows-latest", "macos-latest"]) {
include.push({
os: operatingSystem,
adapter: adapter,
"python-version": defaultPythonVersion,
});
}
}
console.log("matrix", { include });
return {
include,
};
};

View File

@@ -1,5 +1,5 @@
# **what?**
# When a PR is merged, if it has the backport label, it will create
# When a PR is merged, if it has the backport label, it will create
# a new PR to backport those changes to the given branch. If it can't
# cleanly do a backport, it will comment on the merged PR of the failure.
#
@@ -13,28 +13,22 @@
# This automates the backporting process
# **when?**
# Once a PR is "Squash and merge"'d, by adding a backport label, this is triggered
# Once a PR is "Squash and merge"'d and it has been correctly labeled
# according to the naming convention.
name: Backport
on:
pull_request:
types:
- closed
- labeled
permissions:
contents: write
pull-requests: write
jobs:
backport:
runs-on: ubuntu-18.04
name: Backport
runs-on: ubuntu-latest
# Only react to merged PRs for security reasons.
# See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target.
if: >
github.event.pull_request.merged
&& contains(github.event.label.name, 'backport')
steps:
- uses: tibdex/backport@v2.0.3
- name: Backport
uses: tibdex/backport@v1.1.1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,61 +0,0 @@
# **what?**
# When bots create a PR, this action will add a corresponding changie yaml file to that
# PR when a specific label is added.
#
# The file is created off a template:
#
# kind: <per action matrix>
# body: <PR title>
# time: <current timestamp>
# custom:
# Author: <PR User Login (generally the bot)>
# Issue: 4904
# PR: <PR number>
#
# **why?**
# Automate changelog generation for more visability with automated bot PRs.
#
# **when?**
# Once a PR is created, label should be added to PR before or after creation. You can also
# manually trigger this by adding the appropriate label at any time.
#
# **how to add another bot?**
# Add the label and changie kind to the include matrix. That's it!
#
name: Bot Changelog
on:
pull_request:
# catch when the PR is opened with the label or when the label is added
types: [labeled]
permissions:
contents: write
pull-requests: read
jobs:
generate_changelog:
strategy:
matrix:
include:
- label: "dependencies"
changie_kind: "Dependencies"
- label: "snyk"
changie_kind: "Security"
runs-on: ubuntu-latest
steps:
- name: Create and commit changelog on bot PR
if: ${{ contains(github.event.pull_request.labels.*.name, matrix.label) }}
id: bot_changelog
uses: emmyoop/changie_bot@v1.1.0
with:
GITHUB_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
commit_author_name: "Github Build Bot"
commit_author_email: "<buildbot@fishtownanalytics.com>"
commit_message: "Add automated changelog yaml from template for bot PR"
changie_kind: ${{ matrix.changie_kind }}
label: ${{ matrix.label }}
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}"

View File

@@ -1,40 +0,0 @@
# **what?**
# Checks that a file has been committed under the /.changes directory
# as a new CHANGELOG entry. Cannot check for a specific filename as
# it is dynamically generated by change type and timestamp.
# This workflow should not require any secrets since it runs for PRs
# from forked repos.
# By default, secrets are not passed to workflows running from
# a forked repo.
# **why?**
# Ensure code change gets reflected in the CHANGELOG.
# **when?**
# This will run for all PRs going into main and *.latest. It will
# run when they are opened, reopened, when any label is added or removed
# and when new code is pushed to the branch. The action will then get
# skipped if the 'Skip Changelog' label is present is any of the labels.
name: Check Changelog Entry
on:
pull_request:
types: [opened, reopened, labeled, unlabeled, synchronize]
workflow_dispatch:
defaults:
run:
shell: bash
permissions:
contents: read
pull-requests: write
jobs:
changelog:
uses: dbt-labs/actions/.github/workflows/changelog-existence.yml@main
with:
changelog_comment: 'Thank you for your pull request! We could not find a changelog entry for this change. For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry).'
skip_label: 'Skip Changelog'
secrets: inherit

View File

@@ -1,41 +0,0 @@
# **what?**
# Cuts a new `*.latest` branch
# Also cleans up all files in `.changes/unreleased` and `.changes/previous verion on
# `main` and bumps `main` to the input version.
# **why?**
# Generally reduces the workload of engineers and reduces error. Allow automation.
# **when?**
# This will run when called manually.
name: Cut new release branch
on:
workflow_dispatch:
inputs:
version_to_bump_main:
description: 'The alpha version main should bump to (ex. 1.6.0a1)'
required: true
new_branch_name:
description: 'The full name of the new branch (ex. 1.5.latest)'
required: true
defaults:
run:
shell: bash
permissions:
contents: write
jobs:
cut_branch:
name: "Cut branch and clean up main for dbt-core"
uses: dbt-labs/actions/.github/workflows/cut-release-branch.yml@main
with:
version_to_bump_main: ${{ inputs.version_to_bump_main }}
new_branch_name: ${{ inputs.new_branch_name }}
PR_title: "Cleanup main after cutting new ${{ inputs.new_branch_name }} branch"
PR_body: "All adapter PRs will fail CI until the dbt-core PR has been merged due to release version conflicts."
secrets:
FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}

222
.github/workflows/integration.yml vendored Normal file
View File

@@ -0,0 +1,222 @@
# **what?**
# This workflow runs all integration tests for supported OS
# and python versions and core adapters. If triggered by PR,
# the workflow will only run tests for adapters related
# to code changes. Use the `test all` and `test ${adapter}`
# label to run all or additional tests. Use `ok to test`
# label to mark PRs from forked repositories that are safe
# to run integration tests for. Requires secrets to run
# against different warehouses.
# **why?**
# This checks the functionality of dbt from a user's perspective
# and attempts to catch functional regressions.
# **when?**
# This workflow will run on every push to a protected branch
# and when manually triggered. It will also run for all PRs, including
# PRs from forks. The workflow will be skipped until there is a label
# to mark the PR as safe to run.
name: Adapter Integration Tests
on:
# pushes to release branches
push:
branches:
- "main"
- "develop"
- "*.latest"
- "releases/*"
# all PRs, important to note that `pull_request_target` workflows
# will run in the context of the target branch of a PR
pull_request_target:
# manual tigger
workflow_dispatch:
# explicitly turn off permissions for `GITHUB_TOKEN`
permissions: read-all
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
cancel-in-progress: true
# sets default shell to bash, for all operating systems
defaults:
run:
shell: bash
jobs:
# generate test metadata about what files changed and the testing matrix to use
test-metadata:
# run if not a PR from a forked repository or has a label to mark as safe to test
if: >-
github.event_name != 'pull_request_target' ||
github.event.pull_request.head.repo.full_name == github.repository ||
contains(github.event.pull_request.labels.*.name, 'ok to test')
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.generate-matrix.outputs.result }}
steps:
- name: Check out the repository (non-PR)
if: github.event_name != 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Check out the repository (PR)
if: github.event_name == 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
ref: ${{ github.event.pull_request.head.sha }}
- name: Check if relevant files changed
# https://github.com/marketplace/actions/paths-changes-filter
# For each filter, it sets output variable named by the filter to the text:
# 'true' - if any of changed files matches any of filter rules
# 'false' - if none of changed files matches any of filter rules
# also, returns:
# `changes` - JSON array with names of all filters matching any of the changed files
uses: dorny/paths-filter@v2
id: get-changes
with:
token: ${{ secrets.GITHUB_TOKEN }}
filters: |
postgres:
- 'core/**'
- 'plugins/postgres/**'
- 'dev-requirements.txt'
- name: Generate integration test matrix
id: generate-matrix
uses: actions/github-script@v4
env:
CHANGES: ${{ steps.get-changes.outputs.changes }}
with:
script: |
const script = require('./.github/scripts/integration-test-matrix.js')
const matrix = script({ context })
console.log(matrix)
return matrix
test:
name: ${{ matrix.adapter }} / python ${{ matrix.python-version }} / ${{ matrix.os }}
# run if not a PR from a forked repository or has a label to mark as safe to test
# also checks that the matrix generated is not empty
if: >-
needs.test-metadata.outputs.matrix &&
fromJSON( needs.test-metadata.outputs.matrix ).include[0] &&
(
github.event_name != 'pull_request_target' ||
github.event.pull_request.head.repo.full_name == github.repository ||
contains(github.event.pull_request.labels.*.name, 'ok to test')
)
runs-on: ${{ matrix.os }}
needs: test-metadata
strategy:
fail-fast: false
matrix: ${{ fromJSON(needs.test-metadata.outputs.matrix) }}
env:
TOXENV: integration-${{ matrix.adapter }}
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
DBT_INVOCATION_ENV: github-actions
steps:
- name: Check out the repository
if: github.event_name != 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
# explicity checkout the branch for the PR,
# this is necessary for the `pull_request_target` event
- name: Check out the repository (PR)
if: github.event_name == 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Set up postgres (linux)
if: |
matrix.adapter == 'postgres' &&
runner.os == 'Linux'
uses: ./.github/actions/setup-postgres-linux
- name: Set up postgres (macos)
if: |
matrix.adapter == 'postgres' &&
runner.os == 'macOS'
uses: ./.github/actions/setup-postgres-macos
- name: Set up postgres (windows)
if: |
matrix.adapter == 'postgres' &&
runner.os == 'Windows'
uses: ./.github/actions/setup-postgres-windows
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip install tox
pip --version
tox --version
- name: Run tox (postgres)
if: matrix.adapter == 'postgres'
run: tox
- uses: actions/upload-artifact@v2
if: always()
with:
name: logs
path: ./logs
- name: Get current date
if: always()
id: date
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
- uses: actions/upload-artifact@v2
if: always()
with:
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.adapter }}-${{ steps.date.outputs.date }}.csv
path: integration_results.csv
require-label-comment:
runs-on: ubuntu-latest
needs: test
permissions:
pull-requests: write
steps:
- name: Needs permission PR comment
if: >-
needs.test.result == 'skipped' &&
github.event_name == 'pull_request_target' &&
github.event.pull_request.head.repo.full_name != github.repository
uses: unsplash/comment-on-pr@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
msg: |
"You do not have permissions to run integration tests, @dbt-labs/core "\
"needs to label this PR with `ok to test` in order to run integration tests!"
check_for_duplicate_msg: true

View File

@@ -13,13 +13,13 @@ name: Jira Issue Creation
on:
issues:
types: [opened, labeled]
permissions:
issues: write
jobs:
call-creation-action:
uses: dbt-labs/actions/.github/workflows/jira-creation-actions.yml@main
call-label-action:
uses: dbt-labs/jira-actions/.github/workflows/jira-creation.yml@main
secrets:
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}

View File

@@ -13,14 +13,15 @@ name: Jira Label Mirroring
on:
issues:
types: [labeled, unlabeled]
permissions:
issues: read
jobs:
call-label-action:
uses: dbt-labs/actions/.github/workflows/jira-label-actions.yml@main
uses: dbt-labs/jira-actions/.github/workflows/jira-label.yml@main
secrets:
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}

View File

@@ -15,13 +15,10 @@ on:
issues:
types: [closed, deleted, reopened]
# no special access is needed
permissions: read-all
jobs:
call-transition-action:
uses: dbt-labs/actions/.github/workflows/jira-transition-actions.yml@main
call-label-action:
uses: dbt-labs/jira-actions/.github/workflows/jira-transition.yml@main
secrets:
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}

View File

@@ -1,8 +1,9 @@
# **what?**
# Runs code quality checks, unit tests, integration tests and
# verifies python build on all code commited to the repository. This workflow
# should not require any secrets since it runs for PRs from forked repos. By
# default, secrets are not passed to workflows running from a forked repos.
# Runs code quality checks, unit tests, and verifies python build on
# all code commited to the repository. This workflow should not
# require any secrets since it runs for PRs from forked repos.
# By default, secrets are not passed to workflows running from
# a forked repo.
# **why?**
# Ensure code for dbt meets a certain quality standard.
@@ -17,6 +18,7 @@ on:
push:
branches:
- "main"
- "develop"
- "*.latest"
- "releases/*"
pull_request:
@@ -35,59 +37,68 @@ defaults:
jobs:
code-quality:
name: code-quality
name: ${{ matrix.toxenv }}
runs-on: ubuntu-latest
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
toxenv: [flake8, mypy]
env:
TOXENV: ${{ matrix.toxenv }}
PYTEST_ADDOPTS: "-v --color=yes"
steps:
- name: Check out the repository
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.8'
uses: actions/setup-python@v2
- name: Install python dependencies
run: |
python -m pip install --user --upgrade pip
python -m pip --version
make dev
mypy --version
dbt --version
pip install --user --upgrade pip
pip install tox
pip --version
tox --version
- name: Run pre-commit hooks
run: pre-commit run --all-files --show-diff-on-failure
- name: Run tox
run: tox
unit:
name: unit test / python ${{ matrix.python-version }}
runs-on: ubuntu-latest
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: [3.7, 3.8, 3.9]
env:
TOXENV: "unit"
PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv"
steps:
- name: Check out the repository
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install python dependencies
run: |
python -m pip install --user --upgrade pip
python -m pip --version
python -m pip install tox
pip install --user --upgrade pip
pip install tox
pip --version
tox --version
- name: Run tox
@@ -96,94 +107,13 @@ jobs:
- name: Get current date
if: always()
id: date
run: |
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
- name: Upload Unit Test Coverage to Codecov
if: ${{ matrix.python-version == '3.11' }}
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
integration:
name: integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
runs-on: ${{ matrix.os }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
os: [ubuntu-20.04]
include:
- python-version: 3.8
os: windows-latest
- python-version: 3.8
os: macos-latest
env:
TOXENV: integration
DBT_INVOCATION_ENV: github-actions
DBT_TEST_USER_1: dbt_test_user_1
DBT_TEST_USER_2: dbt_test_user_2
DBT_TEST_USER_3: dbt_test_user_3
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
DD_SITE: datadoghq.com
DD_ENV: ci
DD_SERVICE: ${{ github.event.repository.name }}
steps:
- name: Check out the repository
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Set up postgres (linux)
if: runner.os == 'Linux'
uses: ./.github/actions/setup-postgres-linux
- name: Set up postgres (macos)
if: runner.os == 'macOS'
uses: ./.github/actions/setup-postgres-macos
- name: Set up postgres (windows)
if: runner.os == 'Windows'
uses: ./.github/actions/setup-postgres-windows
- name: Install python tools
run: |
python -m pip install --user --upgrade pip
python -m pip --version
python -m pip install tox
tox --version
- name: Run tests
run: tox -- --ddtrace
- name: Get current date
if: always()
id: date
run: |
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v2
if: always()
with:
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}
path: ./logs
- name: Upload Integration Test Coverage to Codecov
if: ${{ matrix.python-version == '3.11' }}
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv
path: unit_results.csv
build:
name: build packages
@@ -192,18 +122,20 @@ jobs:
steps:
- name: Check out the repository
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: '3.8'
python-version: 3.8
- name: Install python dependencies
run: |
python -m pip install --user --upgrade pip
python -m pip install --upgrade setuptools wheel twine check-wheel-contents
python -m pip --version
pip install --user --upgrade pip
pip install --upgrade setuptools wheel twine check-wheel-contents
pip --version
- name: Build distributions
run: ./scripts/build-dist.sh
@@ -219,9 +151,47 @@ jobs:
run: |
check-wheel-contents dist/*.whl --ignore W007,W008
- uses: actions/upload-artifact@v2
with:
name: dist
path: dist/
test-build:
name: verify packages / python ${{ matrix.python-version }} / ${{ matrix.os }}
needs: build
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: [3.7, 3.8, 3.9]
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip install --upgrade wheel
pip --version
- uses: actions/download-artifact@v2
with:
name: dist
path: dist/
- name: Show distributions
run: ls -lh dist/
- name: Install wheel distributions
run: |
find ./dist/*.whl -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check wheel distributions
run: |
@@ -230,7 +200,7 @@ jobs:
- name: Install source distributions
# ignore dbt-1.0.0, which intentionally raises an error when installed from source
run: |
find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check source distributions
run: |

View File

@@ -1,265 +0,0 @@
# **what?**
# This workflow models the performance characteristics of a point in time in dbt.
# It runs specific dbt commands on committed projects multiple times to create and
# commit information about the distribution to the current branch. For more information
# see the readme in the performance module at /performance/README.md.
#
# **why?**
# When developing new features, we can take quick performance samples and compare
# them against the commited baseline measurements produced by this workflow to detect
# some performance regressions at development time before they reach users.
#
# **when?**
# This is only run once directly after each release (for non-prereleases). If for some
# reason the results of a run are not satisfactory, it can also be triggered manually.
name: Model Performance Characteristics
on:
# runs after non-prereleases are published.
release:
types: [released]
# run manually from the actions tab
workflow_dispatch:
inputs:
release_id:
description: 'dbt version to model (must be non-prerelease in Pypi)'
type: string
required: true
env:
RUNNER_CACHE_PATH: performance/runner/target/release/runner
# both jobs need to write
permissions:
contents: write
pull-requests: write
jobs:
set-variables:
name: Setting Variables
runs-on: ubuntu-latest
outputs:
cache_key: ${{ steps.variables.outputs.cache_key }}
release_id: ${{ steps.semver.outputs.base-version }}
release_branch: ${{ steps.variables.outputs.release_branch }}
steps:
# explicitly checkout the performance runner from main regardless of which
# version we are modeling.
- name: Checkout
uses: actions/checkout@v3
with:
ref: main
- name: Parse version into parts
id: semver
uses: dbt-labs/actions/parse-semver@v1
with:
version: ${{ github.event.inputs.release_id || github.event.release.tag_name }}
# collect all the variables that need to be used in subsequent jobs
- name: Set variables
id: variables
run: |
# create a cache key that will be used in the next job. without this the
# next job would have to checkout from main and hash the files itself.
echo "cache_key=${{ runner.os }}-${{ hashFiles('performance/runner/Cargo.toml')}}-${{ hashFiles('performance/runner/src/*') }}" >> $GITHUB_OUTPUT
branch_name="${{steps.semver.outputs.major}}.${{steps.semver.outputs.minor}}.latest"
echo "release_branch=$branch_name" >> $GITHUB_OUTPUT
echo "release branch is inferred to be ${branch_name}"
latest-runner:
name: Build or Fetch Runner
runs-on: ubuntu-latest
needs: [set-variables]
env:
RUSTFLAGS: "-D warnings"
steps:
- name: '[DEBUG] print variables'
run: |
echo "all variables defined in set-variables"
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
# explicitly checkout the performance runner from main regardless of which
# version we are modeling.
- name: Checkout
uses: actions/checkout@v3
with:
ref: main
# attempts to access a previously cached runner
- uses: actions/cache@v3
id: cache
with:
path: ${{ env.RUNNER_CACHE_PATH }}
key: ${{ needs.set-variables.outputs.cache_key }}
- name: Fetch Rust Toolchain
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Add fmt
if: steps.cache.outputs.cache-hit != 'true'
run: rustup component add rustfmt
- name: Cargo fmt
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/cargo@v1
with:
command: fmt
args: --manifest-path performance/runner/Cargo.toml --all -- --check
- name: Test
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/cargo@v1
with:
command: test
args: --manifest-path performance/runner/Cargo.toml
- name: Build (optimized)
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/cargo@v1
with:
command: build
args: --release --manifest-path performance/runner/Cargo.toml
# the cache action automatically caches this binary at the end of the job
model:
# depends on `latest-runner` as a separate job so that failures in this job do not prevent
# a successfully tested and built binary from being cached.
needs: [set-variables, latest-runner]
name: Model a release
runs-on: ubuntu-latest
steps:
- name: '[DEBUG] print variables'
run: |
echo "all variables defined in set-variables"
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
- name: Install dbt
run: pip install dbt-postgres==${{ needs.set-variables.outputs.release_id }}
- name: Install Hyperfine
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
# explicitly checkout main to get the latest project definitions
- name: Checkout
uses: actions/checkout@v3
with:
ref: main
# this was built in the previous job so it will be there.
- name: Fetch Runner
uses: actions/cache@v3
id: cache
with:
path: ${{ env.RUNNER_CACHE_PATH }}
key: ${{ needs.set-variables.outputs.cache_key }}
- name: Move Runner
run: mv performance/runner/target/release/runner performance/app
- name: Change Runner Permissions
run: chmod +x ./performance/app
- name: '[DEBUG] ls baseline directory before run'
run: ls -R performance/baselines/
# `${{ github.workspace }}` is used to pass the absolute path
- name: Create directories
run: |
mkdir ${{ github.workspace }}/performance/tmp/
mkdir -p performance/baselines/${{ needs.set-variables.outputs.release_id }}/
# Run modeling with taking 20 samples
- name: Run Measurement
run: |
performance/app model -v ${{ needs.set-variables.outputs.release_id }} -b ${{ github.workspace }}/performance/baselines/ -p ${{ github.workspace }}/performance/projects/ -t ${{ github.workspace }}/performance/tmp/ -n 20
- name: '[DEBUG] ls baseline directory after run'
run: ls -R performance/baselines/
- uses: actions/upload-artifact@v3
with:
name: baseline
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}/
create-pr:
name: Open PR for ${{ matrix.base-branch }}
# depends on `model` as a separate job so that the baseline can be committed to more than one branch
# i.e. release branch and main
needs: [set-variables, latest-runner, model]
runs-on: ubuntu-latest
strategy:
matrix:
include:
- base-branch: refs/heads/main
target-branch: performance-bot/main_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
- base-branch: refs/heads/${{ needs.set-variables.outputs.release_branch }}
target-branch: performance-bot/release_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
steps:
- name: '[DEBUG] print variables'
run: |
echo "all variables defined in set-variables"
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
- name: Checkout
uses: actions/checkout@v3
with:
ref: ${{ matrix.base-branch }}
- name: Create PR branch
run: |
git checkout -b ${{ matrix.target-branch }}
git push origin ${{ matrix.target-branch }}
git branch --set-upstream-to=origin/${{ matrix.target-branch }} ${{ matrix.target-branch }}
- uses: actions/download-artifact@v3
with:
name: baseline
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}
- name: '[DEBUG] ls baselines after artifact download'
run: ls -R performance/baselines/
- name: Commit baseline
uses: EndBug/add-and-commit@v9
with:
add: 'performance/baselines/*'
author_name: 'Github Build Bot'
author_email: 'buildbot@fishtownanalytics.com'
message: 'adding performance baseline for ${{ needs.set-variables.outputs.release_id }}'
push: 'origin origin/${{ matrix.target-branch }}'
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
base: ${{ matrix.base-branch }}
branch: '${{ matrix.target-branch }}'
title: 'Adding performance modeling for ${{needs.set-variables.outputs.release_id}} to ${{ matrix.base-branch }}'
body: 'Committing perf results for tracking for the ${{needs.set-variables.outputs.release_id}}'
labels: |
Skip Changelog
Performance

View File

@@ -1,109 +0,0 @@
# **what?**
# Nightly releases to GitHub and PyPI. This workflow produces the following outcome:
# - generate and validate data for night release (commit SHA, version number, release branch);
# - pass data to release workflow;
# - night release will be pushed to GitHub as a draft release;
# - night build will be pushed to test PyPI;
#
# **why?**
# Ensure an automated and tested release process for nightly builds
#
# **when?**
# This workflow runs on schedule or can be run manually on demand.
name: Nightly Test Release to GitHub and PyPI
on:
workflow_dispatch: # for manual triggering
schedule:
- cron: 0 9 * * *
permissions:
contents: write # this is the permission that allows creating a new release
defaults:
run:
shell: bash
env:
RELEASE_BRANCH: "main"
jobs:
aggregate-release-data:
runs-on: ubuntu-latest
outputs:
commit_sha: ${{ steps.resolve-commit-sha.outputs.release_commit }}
version_number: ${{ steps.nightly-release-version.outputs.number }}
release_branch: ${{ steps.release-branch.outputs.name }}
steps:
- name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
uses: actions/checkout@v3
with:
ref: ${{ env.RELEASE_BRANCH }}
- name: "Resolve Commit To Release"
id: resolve-commit-sha
run: |
commit_sha=$(git rev-parse HEAD)
echo "release_commit=$commit_sha" >> $GITHUB_OUTPUT
- name: "Get Current Version Number"
id: version-number-sources
run: |
current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
echo "current_version=$current_version" >> $GITHUB_OUTPUT
- name: "Audit Version And Parse Into Parts"
id: semver
uses: dbt-labs/actions/parse-semver@v1.1.0
with:
version: ${{ steps.version-number-sources.outputs.current_version }}
- name: "Get Current Date"
id: current-date
run: echo "date=$(date +'%m%d%Y')" >> $GITHUB_OUTPUT
- name: "Generate Nightly Release Version Number"
id: nightly-release-version
run: |
number="${{ steps.semver.outputs.version }}.dev${{ steps.current-date.outputs.date }}"
echo "number=$number" >> $GITHUB_OUTPUT
- name: "Audit Nightly Release Version And Parse Into Parts"
uses: dbt-labs/actions/parse-semver@v1.1.0
with:
version: ${{ steps.nightly-release-version.outputs.number }}
- name: "Set Release Branch"
id: release-branch
run: |
echo "name=${{ env.RELEASE_BRANCH }}" >> $GITHUB_OUTPUT
log-outputs-aggregate-release-data:
runs-on: ubuntu-latest
needs: [aggregate-release-data]
steps:
- name: "[DEBUG] Log Outputs"
run: |
echo commit_sha : ${{ needs.aggregate-release-data.outputs.commit_sha }}
echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
release-github-pypi:
needs: [aggregate-release-data]
uses: ./.github/workflows/release.yml
with:
sha: ${{ needs.aggregate-release-data.outputs.commit_sha }}
target_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
build_script_path: "scripts/build-dist.sh"
env_setup_script_path: "scripts/env-setup.sh"
s3_bucket_name: "core-team-artifacts"
package_test_command: "dbt --version"
test_run: true
nightly_release: true
secrets: inherit

176
.github/workflows/performance.yml vendored Normal file
View File

@@ -0,0 +1,176 @@
name: Performance Regression Tests
# Schedule triggers
on:
# runs twice a day at 10:05am and 10:05pm
schedule:
- cron: "5 10,22 * * *"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
# checks fmt of runner code
# purposefully not a dependency of any other job
# will block merging, but not prevent developing
fmt:
name: Cargo fmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --manifest-path performance/runner/Cargo.toml --all -- --check
# runs any tests associated with the runner
# these tests make sure the runner logic is correct
test-runner:
name: Test Runner
runs-on: ubuntu-latest
env:
# turns errors into warnings
RUSTFLAGS: "-D warnings"
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: test
args: --manifest-path performance/runner/Cargo.toml
# build an optimized binary to be used as the runner in later steps
build-runner:
needs: [test-runner]
name: Build Runner
runs-on: ubuntu-latest
env:
RUSTFLAGS: "-D warnings"
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: build
args: --release --manifest-path performance/runner/Cargo.toml
- uses: actions/upload-artifact@v2
with:
name: runner
path: performance/runner/target/release/runner
# run the performance measurements on the current or default branch
measure-dev:
needs: [build-runner]
name: Measure Dev Branch
runs-on: ubuntu-latest
steps:
- name: checkout dev
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2.2.2
with:
python-version: "3.8"
- name: install dbt
run: pip install -r dev-requirements.txt -r editable-requirements.txt
- name: install hyperfine
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
- uses: actions/download-artifact@v2
with:
name: runner
- name: change permissions
run: chmod +x ./runner
- name: run
run: ./runner measure -b dev -p ${{ github.workspace }}/performance/projects/
- uses: actions/upload-artifact@v2
with:
name: dev-results
path: performance/results/
# run the performance measurements on the release branch which we use
# as a performance baseline. This part takes by far the longest, so
# we do everything we can first so the job fails fast.
# -----
# we need to checkout dbt twice in this job: once for the baseline dbt
# version, and once to get the latest regression testing projects,
# metrics, and runner code from the develop or current branch so that
# the calculations match for both versions of dbt we are comparing.
measure-baseline:
needs: [build-runner]
name: Measure Baseline Branch
runs-on: ubuntu-latest
steps:
- name: checkout latest
uses: actions/checkout@v2
with:
ref: "0.20.latest"
- name: Setup Python
uses: actions/setup-python@v2.2.2
with:
python-version: "3.8"
- name: move repo up a level
run: mkdir ${{ github.workspace }}/../baseline/ && cp -r ${{ github.workspace }} ${{ github.workspace }}/../baseline
- name: "[debug] ls new dbt location"
run: ls ${{ github.workspace }}/../baseline/dbt/
# installation creates egg-links so we have to preserve source
- name: install dbt from new location
run: cd ${{ github.workspace }}/../baseline/dbt/ && pip install -r dev-requirements.txt -r editable-requirements.txt
# checkout the current branch to get all the target projects
# this deletes the old checked out code which is why we had to copy before
- name: checkout dev
uses: actions/checkout@v2
- name: install hyperfine
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
- uses: actions/download-artifact@v2
with:
name: runner
- name: change permissions
run: chmod +x ./runner
- name: run runner
run: ./runner measure -b baseline -p ${{ github.workspace }}/performance/projects/
- uses: actions/upload-artifact@v2
with:
name: baseline-results
path: performance/results/
# detect regressions on the output generated from measuring
# the two branches. Exits with non-zero code if a regression is detected.
calculate-regressions:
needs: [measure-dev, measure-baseline]
name: Compare Results
runs-on: ubuntu-latest
steps:
- uses: actions/download-artifact@v2
with:
name: dev-results
- uses: actions/download-artifact@v2
with:
name: baseline-results
- name: "[debug] ls result files"
run: ls
- uses: actions/download-artifact@v2
with:
name: runner
- name: change permissions
run: chmod +x ./runner
- name: make results directory
run: mkdir ./final-output/
- name: run calculation
run: ./runner calculate -r ./ -o ./final-output/
# always attempt to upload the results even if there were regressions found
- uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: final-calculations
path: ./final-output/*

View File

@@ -1,31 +0,0 @@
# **what?**
# The purpose of this workflow is to trigger CI to run for each
# release branch and main branch on a regular cadence. If the CI workflow
# fails for a branch, it will post to #dev-core-alerts to raise awareness.
# **why?**
# Ensures release branches and main are always shippable and not broken.
# Also, can catch any dependencies shifting beneath us that might
# introduce breaking changes (could also impact Cloud).
# **when?**
# Mainly on a schedule of 9:00, 13:00, 18:00 UTC everyday.
# Manual trigger can also test on demand
name: Release branch scheduled testing
on:
schedule:
- cron: '0 9,13,18 * * *' # 9:00, 13:00, 18:00 UTC
workflow_dispatch: # for manual triggering
# no special access is needed
permissions: read-all
jobs:
run_tests:
uses: dbt-labs/actions/.github/workflows/release-branch-tests.yml@main
with:
workflows_to_run: '["main.yml"]'
secrets: inherit

View File

@@ -1,118 +0,0 @@
# **what?**
# This workflow will generate a series of docker images for dbt and push them to the github container registry
# **why?**
# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. This is how we keep those images up-to-date.
# **when?**
# This is triggered manually
# **next steps**
# - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
name: Docker release
permissions:
packages: write
on:
workflow_dispatch:
inputs:
package:
description: The package to release. _One_ of [dbt-core, dbt-redshift, dbt-bigquery, dbt-snowflake, dbt-spark, dbt-postgres]
required: true
version_number:
description: The release version number (i.e. 1.0.0b1). Do not include `latest` tags or a leading `v`!
required: true
jobs:
get_version_meta:
name: Get version meta
runs-on: ubuntu-latest
outputs:
major: ${{ steps.version.outputs.major }}
minor: ${{ steps.version.outputs.minor }}
patch: ${{ steps.version.outputs.patch }}
latest: ${{ steps.latest.outputs.latest }}
minor_latest: ${{ steps.latest.outputs.minor_latest }}
steps:
- uses: actions/checkout@v3
- name: Split version
id: version
run: |
IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
echo "major=$MAJOR" >> $GITHUB_OUTPUT
echo "minor=$MINOR" >> $GITHUB_OUTPUT
echo "patch=$PATCH" >> $GITHUB_OUTPUT
- name: Is pkg 'latest'
id: latest
uses: ./.github/actions/latest-wrangler
with:
package: ${{ github.event.inputs.package }}
new_version: ${{ github.event.inputs.version_number }}
gh_token: ${{ secrets.GITHUB_TOKEN }}
halt_on_missing: False
setup_image_builder:
name: Set up docker image builder
runs-on: ubuntu-latest
needs: [get_version_meta]
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
build_and_push:
name: Build images and push to GHCR
runs-on: ubuntu-latest
needs: [setup_image_builder, get_version_meta]
steps:
- name: Get docker build arg
id: build_arg
run: |
BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT
echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT
- name: Log in to the GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push MAJOR.MINOR.PATCH tag
uses: docker/build-push-action@v4
with:
file: docker/Dockerfile
push: True
target: ${{ github.event.inputs.package }}
build-args: |
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
tags: |
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ github.event.inputs.version_number }}
- name: Build and push MINOR.latest tag
uses: docker/build-push-action@v4
if: ${{ needs.get_version_meta.outputs.minor_latest == 'True' }}
with:
file: docker/Dockerfile
push: True
target: ${{ github.event.inputs.package }}
build-args: |
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
tags: |
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ needs.get_version_meta.outputs.major }}.${{ needs.get_version_meta.outputs.minor }}.latest
- name: Build and push latest tag
uses: docker/build-push-action@v4
if: ${{ needs.get_version_meta.outputs.latest == 'True' }}
with:
file: docker/Dockerfile
push: True
target: ${{ github.event.inputs.package }}
build-args: |
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
tags: |
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest

View File

@@ -1,229 +1,200 @@
# **what?**
# Release workflow provides the following steps:
# - checkout the given commit;
# - validate version in sources and changelog file for given version;
# - bump the version and generate a changelog if needed;
# - merge all changes to the target branch if needed;
# - run unit and integration tests against given commit;
# - build and package that SHA;
# - release it to GitHub and PyPI with that specific build;
#
# Take the given commit, run unit tests specifically on that sha, build and
# package it, and then release to GitHub and PyPi with that specific build
# **why?**
# Ensure an automated and tested release process
#
# **when?**
# This workflow can be run manually on demand or can be called by other workflows
name: Release to GitHub and PyPI
# **when?**
# This will only run manually with a given sha and version
name: Release to GitHub and PyPi
on:
workflow_dispatch:
inputs:
sha:
description: "The last commit sha in the release"
type: string
required: true
target_branch:
description: "The branch to release from"
type: string
required: true
description: 'The last commit sha in the release'
required: true
version_number:
description: "The release version number (i.e. 1.0.0b1)"
type: string
required: true
build_script_path:
description: "Build script path"
type: string
default: "scripts/build-dist.sh"
required: true
env_setup_script_path:
description: "Environment setup script path"
type: string
default: "scripts/env-setup.sh"
required: false
s3_bucket_name:
description: "AWS S3 bucket name"
type: string
default: "core-team-artifacts"
required: true
package_test_command:
description: "Package test command"
type: string
default: "dbt --version"
required: true
test_run:
description: "Test run (Publish release as draft)"
type: boolean
default: true
required: false
nightly_release:
description: "Nightly release to dev environment"
type: boolean
default: false
required: false
workflow_call:
inputs:
sha:
description: "The last commit sha in the release"
type: string
required: true
target_branch:
description: "The branch to release from"
type: string
required: true
version_number:
description: "The release version number (i.e. 1.0.0b1)"
type: string
required: true
build_script_path:
description: "Build script path"
type: string
default: "scripts/build-dist.sh"
required: true
env_setup_script_path:
description: "Environment setup script path"
type: string
default: "scripts/env-setup.sh"
required: false
s3_bucket_name:
description: "AWS S3 bucket name"
type: string
default: "core-team-artifacts"
required: true
package_test_command:
description: "Package test command"
type: string
default: "dbt --version"
required: true
test_run:
description: "Test run (Publish release as draft)"
type: boolean
default: true
required: false
nightly_release:
description: "Nightly release to dev environment"
type: boolean
default: false
required: false
permissions:
contents: write # this is the permission that allows creating a new release
description: 'The release version number (i.e. 1.0.0b1)'
required: true
defaults:
run:
shell: bash
jobs:
log-inputs:
name: Log Inputs
unit:
name: Unit test
runs-on: ubuntu-latest
env:
TOXENV: "unit"
steps:
- name: "[DEBUG] Print Variables"
- name: Check out the repository
uses: actions/checkout@v2
with:
persist-credentials: false
ref: ${{ github.event.inputs.sha }}
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install python dependencies
run: |
echo The last commit sha in the release: ${{ inputs.sha }}
echo The branch to release from: ${{ inputs.target_branch }}
echo The release version number: ${{ inputs.version_number }}
echo Build script path: ${{ inputs.build_script_path }}
echo Environment setup script path: ${{ inputs.env_setup_script_path }}
echo AWS S3 bucket name: ${{ inputs.s3_bucket_name }}
echo Package test command: ${{ inputs.package_test_command }}
echo Test run: ${{ inputs.test_run }}
echo Nightly release: ${{ inputs.nightly_release }}
pip install --user --upgrade pip
pip install tox
pip --version
tox --version
bump-version-generate-changelog:
name: Bump package version, Generate changelog
- name: Run tox
run: tox
uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
with:
sha: ${{ inputs.sha }}
version_number: ${{ inputs.version_number }}
target_branch: ${{ inputs.target_branch }}
env_setup_script_path: ${{ inputs.env_setup_script_path }}
test_run: ${{ inputs.test_run }}
nightly_release: ${{ inputs.nightly_release }}
secrets: inherit
log-outputs-bump-version-generate-changelog:
name: "[Log output] Bump package version, Generate changelog"
if: ${{ !failure() && !cancelled() }}
needs: [bump-version-generate-changelog]
build:
name: build packages
runs-on: ubuntu-latest
steps:
- name: Print variables
- name: Check out the repository
uses: actions/checkout@v2
with:
persist-credentials: false
ref: ${{ github.event.inputs.sha }}
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install python dependencies
run: |
echo Final SHA : ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
echo Changelog path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
pip install --user --upgrade pip
pip install --upgrade setuptools wheel twine check-wheel-contents
pip --version
build-test-package:
name: Build, Test, Package
if: ${{ !failure() && !cancelled() }}
needs: [bump-version-generate-changelog]
- name: Build distributions
run: ./scripts/build-dist.sh
uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
- name: Show distributions
run: ls -lh dist/
with:
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
version_number: ${{ inputs.version_number }}
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
build_script_path: ${{ inputs.build_script_path }}
s3_bucket_name: ${{ inputs.s3_bucket_name }}
package_test_command: ${{ inputs.package_test_command }}
test_run: ${{ inputs.test_run }}
nightly_release: ${{ inputs.nightly_release }}
- name: Check distribution descriptions
run: |
twine check dist/*
- name: Check wheel contents
run: |
check-wheel-contents dist/*.whl --ignore W007,W008
- uses: actions/upload-artifact@v2
with:
name: dist
path: |
dist/
!dist/dbt-${{github.event.inputs.version_number}}.tar.gz
test-build:
name: verify packages
needs: [build, unit]
runs-on: ubuntu-latest
steps:
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip install --upgrade wheel
pip --version
- uses: actions/download-artifact@v2
with:
name: dist
path: dist/
- name: Show distributions
run: ls -lh dist/
- name: Install wheel distributions
run: |
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check wheel distributions
run: |
dbt --version
- name: Install source distributions
run: |
find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check source distributions
run: |
dbt --version
secrets:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
github-release:
name: GitHub Release
if: ${{ !failure() && !cancelled() }}
name: GitHub Release
needs: [bump-version-generate-changelog, build-test-package]
needs: test-build
uses: dbt-labs/dbt-release/.github/workflows/github-release.yml@main
runs-on: ubuntu-latest
with:
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
version_number: ${{ inputs.version_number }}
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
test_run: ${{ inputs.test_run }}
steps:
- uses: actions/download-artifact@v2
with:
name: dist
path: '.'
# Need to set an output variable because env variables can't be taken as input
# This is needed for the next step with releasing to GitHub
- name: Find release type
id: release_type
env:
IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') || contains(github.event.inputs.version_number, 'b') }}
run: |
echo ::set-output name=isPrerelease::$IS_PRERELEASE
- name: Creating GitHub Release
uses: softprops/action-gh-release@v1
with:
name: dbt-core v${{github.event.inputs.version_number}}
tag_name: v${{github.event.inputs.version_number}}
prerelease: ${{ steps.release_type.outputs.isPrerelease }}
target_commitish: ${{github.event.inputs.sha}}
body: |
[Release notes](https://github.com/dbt-labs/dbt-core/blob/main/CHANGELOG.md)
files: |
dbt_postgres-${{github.event.inputs.version_number}}-py3-none-any.whl
dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
dbt-core-${{github.event.inputs.version_number}}.tar.gz
pypi-release:
name: PyPI Release
name: Pypi release
needs: [github-release]
runs-on: ubuntu-latest
uses: dbt-labs/dbt-release/.github/workflows/pypi-release.yml@main
needs: github-release
with:
version_number: ${{ inputs.version_number }}
test_run: ${{ inputs.test_run }}
secrets:
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
slack-notification:
name: Slack Notification
if: ${{ failure() && (!inputs.test_run || inputs.nightly_release) }}
needs:
[
bump-version-generate-changelog,
build-test-package,
github-release,
pypi-release,
]
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
with:
status: "failure"
secrets:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
environment: PypiProd
steps:
- uses: actions/download-artifact@v2
with:
name: dist
path: 'dist'
- name: Publish distribution to PyPI
uses: pypa/gh-action-pypi-publish@v1.4.2
with:
password: ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,5 +1,5 @@
# **what?**
# Compares the schema of the dbt version of the given ref vs
# Compares the schema of the dbt version of the given ref vs
# the latest official schema releases found in schemas.getdbt.com.
# If there are differences, the workflow will fail and upload the
# diff as an artifact. The metadata team should be alerted to the change.
@@ -21,9 +21,6 @@ on:
- "*.latest"
- "releases/*"
# no special access is needed
permissions: read-all
env:
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
@@ -37,23 +34,23 @@ jobs:
steps:
- name: Set up Python
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Checkout dbt repo
uses: actions/checkout@v3
uses: actions/checkout@v2.3.4
with:
path: ${{ env.DBT_REPO_DIRECTORY }}
- name: Checkout schemas.getdbt.com repo
uses: actions/checkout@v3
with:
uses: actions/checkout@v2.3.4
with:
repository: dbt-labs/schemas.getdbt.com
ref: 'main'
ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
- name: Generate current schema
run: |
cd ${{ env.DBT_REPO_DIRECTORY }}
@@ -62,7 +59,7 @@ jobs:
pip install --upgrade pip
pip install -r dev-requirements.txt -r editable-requirements.txt
python scripts/collect-artifact-schema.py --path ${{ env.LATEST_SCHEMA_PATH }}
# Copy generated schema files into the schemas.getdbt.com repo
# Do a git diff to find any changes
# Ignore any date or version changes though
@@ -83,7 +80,7 @@ jobs:
fi
- name: Upload schema diff
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v2.2.4
if: ${{ failure() }}
with:
name: 'schema_schanges.txt'

View File

@@ -3,10 +3,16 @@ on:
schedule:
- cron: "30 1 * * *"
permissions:
issues: write
pull-requests: write
jobs:
stale:
uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main
runs-on: ubuntu-latest
steps:
# pinned at v4 (https://github.com/actions/stale/releases/tag/v4.0.0)
- uses: actions/stale@cdf15f641adb27a71842045a94023bef6945e3aa
with:
stale-issue-message: "This issue has been marked as Stale because it has been open for 180 days with no activity. If you would like the issue to remain open, please remove the stale label or comment on the issue, or it will be closed in 7 days."
stale-pr-message: "This PR has been marked as Stale because it has been open for 180 days with no activity. If you would like the PR to remain open, please remove the stale label or comment on the PR, or it will be closed in 7 days."
# mark issues/PRs stale when they haven't seen activity in 180 days
days-before-stale: 180
# ignore checking issues with the following labels
exempt-issue-labels: "epic,discussion"

View File

@@ -1,11 +1,12 @@
# This Action checks makes a dbt run to sample json structured logs
# and checks that they conform to the currently documented schema.
#
#
# If this action fails it either means we have unintentionally deviated
# from our documented structured logging schema, or we need to bump the
# version of our structured logging and add new documentation to
# communicate these changes.
name: Structured Logging Schema Check
on:
push:
@@ -22,38 +23,34 @@ jobs:
# run the performance measurements on the current or default branch
test-schema:
name: Test Log Schema
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
env:
# turns warnings into errors
RUSTFLAGS: "-D warnings"
# points tests to the log file
LOG_DIR: "/home/runner/work/dbt-core/dbt-core/logs"
# tells integration tests to output into json format
DBT_LOG_FORMAT: "json"
# tell eventmgr to convert logging events into bytes
DBT_TEST_BINARY_SERIALIZATION: "true"
# Additional test users
DBT_TEST_USER_1: dbt_test_user_1
DBT_TEST_USER_2: dbt_test_user_2
DBT_TEST_USER_3: dbt_test_user_3
DBT_LOG_FORMAT: 'json'
steps:
- name: checkout dev
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v2.2.2
with:
python-version: "3.8"
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip --version
pip install tox
tox --version
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: install dbt
run: pip install -r dev-requirements.txt -r editable-requirements.txt
- name: Set up postgres
uses: ./.github/actions/setup-postgres-linux
@@ -64,4 +61,11 @@ jobs:
# integration tests generate a ton of logs in different files. the next step will find them all.
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
- name: Run integration tests
run: tox -e integration -- -nauto
run: tox -e py38-postgres -- -nauto
# apply our schema tests to every log event from the previous step
# skips any output that isn't valid json
- uses: actions-rs/cargo@v1
with:
command: run
args: --manifest-path test/interop/log_parsing/Cargo.toml

View File

@@ -1,155 +0,0 @@
# **what?**
# This workflow will test all test(s) at the input path given number of times to determine if it's flaky or not. You can test with any supported OS/Python combination.
# This is batched in 10 to allow more test iterations faster.
# **why?**
# Testing if a test is flaky and if a previously flaky test has been fixed. This allows easy testing on supported python versions and OS combinations.
# **when?**
# This is triggered manually from dbt-core.
name: Flaky Tester
on:
workflow_dispatch:
inputs:
branch:
description: 'Branch to check out'
type: string
required: true
default: 'main'
test_path:
description: 'Path to single test to run (ex: tests/functional/retry/test_retry.py::TestRetry::test_fail_fast)'
type: string
required: true
default: 'tests/functional/...'
python_version:
description: 'Version of Python to Test Against'
type: choice
options:
- '3.8'
- '3.9'
- '3.10'
- '3.11'
os:
description: 'OS to run test in'
type: choice
options:
- 'ubuntu-latest'
- 'macos-latest'
- 'windows-latest'
num_runs_per_batch:
description: 'Max number of times to run the test per batch. We always run 10 batches.'
type: number
required: true
default: '50'
permissions: read-all
defaults:
run:
shell: bash
jobs:
debug:
runs-on: ubuntu-latest
steps:
- name: "[DEBUG] Output Inputs"
run: |
echo "Branch: ${{ inputs.branch }}"
echo "test_path: ${{ inputs.test_path }}"
echo "python_version: ${{ inputs.python_version }}"
echo "os: ${{ inputs.os }}"
echo "num_runs_per_batch: ${{ inputs.num_runs_per_batch }}"
pytest:
runs-on: ${{ inputs.os }}
strategy:
# run all batches, even if one fails. This informs how flaky the test may be.
fail-fast: false
# using a matrix to speed up the jobs since the matrix will run in parallel when runners are available
matrix:
batch: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
env:
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
DBT_TEST_USER_1: dbt_test_user_1
DBT_TEST_USER_2: dbt_test_user_2
DBT_TEST_USER_3: dbt_test_user_3
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
DD_SITE: datadoghq.com
DD_ENV: ci
DD_SERVICE: ${{ github.event.repository.name }}
steps:
- name: "Checkout code"
uses: actions/checkout@v3
with:
ref: ${{ inputs.branch }}
- name: "Setup Python"
uses: actions/setup-python@v4
with:
python-version: "${{ inputs.python_version }}"
- name: "Setup Dev Environment"
run: make dev
- name: "Set up postgres (linux)"
if: inputs.os == 'ubuntu-latest'
run: make setup-db
# mac and windows don't use make due to limitations with docker with those runners in GitHub
- name: "Set up postgres (macos)"
if: inputs.os == 'macos-latest'
uses: ./.github/actions/setup-postgres-macos
- name: "Set up postgres (windows)"
if: inputs.os == 'windows-latest'
uses: ./.github/actions/setup-postgres-windows
- name: "Test Command"
id: command
run: |
test_command="python -m pytest ${{ inputs.test_path }}"
echo "test_command=$test_command" >> $GITHUB_OUTPUT
- name: "Run test ${{ inputs.num_runs_per_batch }} times"
id: pytest
run: |
set +e
for ((i=1; i<=${{ inputs.num_runs_per_batch }}; i++))
do
echo "Running pytest iteration $i..."
python -m pytest --ddtrace ${{ inputs.test_path }}
exit_code=$?
if [[ $exit_code -eq 0 ]]; then
success=$((success + 1))
echo "Iteration $i: Success"
else
failure=$((failure + 1))
echo "Iteration $i: Failure"
fi
echo
echo "==========================="
echo "Successful runs: $success"
echo "Failed runs: $failure"
echo "==========================="
echo
done
echo "failure=$failure" >> $GITHUB_OUTPUT
- name: "Success and Failure Summary: ${{ inputs.os }}/Python ${{ inputs.python_version }}"
run: |
echo "Batch: ${{ matrix.batch }}"
echo "Successful runs: ${{ steps.pytest.outputs.success }}"
echo "Failed runs: ${{ steps.pytest.outputs.failure }}"
- name: "Error for Failures"
if: ${{ steps.pytest.outputs.failure }}
run: |
echo "Batch ${{ matrix.batch }} failed ${{ steps.pytest.outputs.failure }} of ${{ inputs.num_runs_per_batch }} tests"
exit 1

View File

@@ -1 +0,0 @@
-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest

View File

@@ -1 +0,0 @@
.secrets

View File

@@ -1 +0,0 @@
GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE

View File

@@ -1,6 +0,0 @@
{
"inputs": {
"version_number": "1.0.1",
"package": "dbt-postgres"
}
}

View File

@@ -1,31 +0,0 @@
# **what?**
# When the core team triages, we sometimes need more information from the issue creator. In
# those cases we remove the `triage` label and add the `awaiting_response` label. Once we
# recieve a response in the form of a comment, we want the `awaiting_response` label removed
# in favor of the `triage` label so we are aware that the issue needs action.
# **why?**
# To help with out team triage issue tracking
# **when?**
# This will run when a comment is added to an issue and that issue has to `awaiting_response` label.
name: Update Triage Label
on: issue_comment
defaults:
run:
shell: bash
permissions:
issues: write
jobs:
triage_label:
if: contains(github.event.issue.labels.*.name, 'awaiting_response')
uses: dbt-labs/actions/.github/workflows/swap-labels.yml@main
with:
add_label: "triage"
remove_label: "awaiting_response"
secrets: inherit

View File

@@ -1,15 +1,18 @@
# **what?**
# This workflow will take the new version number to bump to. With that
# it will run versionbump to update the version number everywhere in the
# code base and then run changie to create the corresponding changelog.
# A PR will be created with the changes that can be reviewed before committing.
# This workflow will take a version number and a dry run flag. With that
# it will run versionbump to update the version number everywhere in the
# code base and then generate an update Docker requirements file. If this
# is a dry run, a draft PR will open with the changes. If this isn't a dry
# run, the changes will be committed to the branch this is run on.
# **why?**
# This is to aid in releasing dbt and making sure we have updated
# the version in all places and generated the changelog.
# This is to aid in releasing dbt and making sure we have updated
# the versions and Docker requirements in all places.
# **when?**
# This is triggered manually
# This is triggered either manually OR
# from the repository_dispatch event "version-bump" which is sent from
# the dbt-release repo Action
name: Version Bump
@@ -17,12 +20,90 @@ on:
workflow_dispatch:
inputs:
version_number:
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
description: 'The version number to bump to'
required: true
is_dry_run:
description: 'Creates a draft PR to allow testing instead of committing to a branch'
required: true
default: 'true'
repository_dispatch:
types: [version-bump]
jobs:
version_bump_and_changie:
uses: dbt-labs/actions/.github/workflows/version-bump.yml@main
with:
version_number: ${{ inputs.version_number }}
secrets: inherit # ok since what we are calling is internally maintained
bump:
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v2
- name: Set version and dry run values
id: variables
env:
VERSION_NUMBER: "${{ github.event.client_payload.version_number == '' && github.event.inputs.version_number || github.event.client_payload.version_number }}"
IS_DRY_RUN: "${{ github.event.client_payload.is_dry_run == '' && github.event.inputs.is_dry_run || github.event.client_payload.is_dry_run }}"
run: |
echo Repository dispatch event version: ${{ github.event.client_payload.version_number }}
echo Repository dispatch event dry run: ${{ github.event.client_payload.is_dry_run }}
echo Workflow dispatch event version: ${{ github.event.inputs.version_number }}
echo Workflow dispatch event dry run: ${{ github.event.inputs.is_dry_run }}
echo ::set-output name=VERSION_NUMBER::$VERSION_NUMBER
echo ::set-output name=IS_DRY_RUN::$IS_DRY_RUN
- uses: actions/setup-python@v2
with:
python-version: "3.8"
- name: Install python dependencies
run: |
python3 -m venv env
source env/bin/activate
pip install --upgrade pip
- name: Create PR branch
if: ${{ steps.variables.outputs.IS_DRY_RUN == 'true' }}
run: |
git checkout -b bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
git push origin bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
git branch --set-upstream-to=origin/bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
# - name: Generate Docker requirements
# run: |
# source env/bin/activate
# pip install -r requirements.txt
# pip freeze -l > docker/requirements/requirements.txt
# git status
- name: Bump version
run: |
source env/bin/activate
pip install -r dev-requirements.txt
env/bin/bumpversion --allow-dirty --new-version ${{steps.variables.outputs.VERSION_NUMBER}} major
git status
- name: Commit version bump directly
uses: EndBug/add-and-commit@v7
if: ${{ steps.variables.outputs.IS_DRY_RUN == 'false' }}
with:
author_name: 'Github Build Bot'
author_email: 'buildbot@fishtownanalytics.com'
message: 'Bumping version to ${{steps.variables.outputs.VERSION_NUMBER}}'
- name: Commit version bump to branch
uses: EndBug/add-and-commit@v7
if: ${{ steps.variables.outputs.IS_DRY_RUN == 'true' }}
with:
author_name: 'Github Build Bot'
author_email: 'buildbot@fishtownanalytics.com'
message: 'Bumping version to ${{steps.variables.outputs.VERSION_NUMBER}}'
branch: 'bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_${{GITHUB.RUN_ID}}'
push: 'origin origin/bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_${{GITHUB.RUN_ID}}'
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
if: ${{ steps.variables.outputs.IS_DRY_RUN == 'true' }}
with:
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
draft: true
base: ${{github.ref}}
title: 'Bumping version to ${{steps.variables.outputs.VERSION_NUMBER}}'
branch: 'bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_${{GITHUB.RUN_ID}}'

21
.gitignore vendored
View File

@@ -11,8 +11,6 @@ __pycache__/
env*/
dbt_env/
build/
!tests/functional/build
!core/dbt/docs/build
develop-eggs/
dist/
downloads/
@@ -26,11 +24,8 @@ var/
*.egg-info/
.installed.cfg
*.egg
.mypy_cache/
.dmypy.json
*.mypy_cache/
logs/
.user.yml
profiles.yml
# PyInstaller
# Usually these files are written by a python script from a template
@@ -54,9 +49,9 @@ coverage.xml
*,cover
.hypothesis/
test.env
makefile.test.env
*.pytest_cache/
# Mypy
.mypy_cache/
# Translations
*.mo
@@ -71,10 +66,10 @@ docs/_build/
# PyBuilder
target/
# Ipython Notebook
#Ipython Notebook
.ipynb_checkpoints
# Emacs
#Emacs
*~
# Sublime Text
@@ -83,7 +78,6 @@ target/
# Vim
*.sw*
# Pyenv
.python-version
# Vim
@@ -96,12 +90,7 @@ venv/
# AWS credentials
.aws/
# MacOS
.DS_Store
# vscode
.vscode/
*.code-workspace
# poetry
poetry.lock

View File

@@ -1,62 +0,0 @@
# Configuration for pre-commit hooks (see https://pre-commit.com/).
# Eventually the hooks described here will be run as tests before merging each PR.
exclude: ^(core/dbt/docs/build/|core/dbt/events/types_pb2.py)
# Force all unspecified python hooks to run python 3.8
default_language_version:
python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: check-yaml
args: [--unsafe]
- id: check-json
- id: end-of-file-fixer
- id: trailing-whitespace
exclude_types:
- "markdown"
- id: check-case-conflict
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
- id: black
alias: black-check
stages: [manual]
args:
- "--check"
- "--diff"
- repo: https://github.com/pycqa/flake8
rev: 4.0.1
hooks:
- id: flake8
- id: flake8
alias: flake8-check
stages: [manual]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.3.0
hooks:
- id: mypy
# N.B.: Mypy is... a bit fragile.
#
# By using `language: system` we run this hook in the local
# environment instead of a pre-commit isolated one. This is needed
# to ensure mypy correctly parses the project.
# It may cause trouble
# in that it adds environmental variables out of our control to the
# mix. Unfortunately, there's nothing we can do about per pre-commit's
# author.
# See https://github.com/pre-commit/pre-commit/issues/730 for details.
args: [--show-error-codes]
files: ^core/dbt/
language: system
- id: mypy
alias: mypy-check
stages: [manual]
args: [--show-error-codes, --pretty]
files: ^core/dbt/
language: system

View File

@@ -2,10 +2,8 @@ The core function of dbt is SQL compilation and execution. Users create projects
## dbt-core
Most of the python code in the repository is within the `core/dbt` directory.
- [`single python files`](core/dbt/README.md): A number of individual files, such as 'compilation.py' and 'exceptions.py'
Most of the python code in the repository is within the `core/dbt` directory. Currently the main subdirectories are:
The main subdirectories of core/dbt:
- [`adapters`](core/dbt/adapters/README.md): Define base classes for behavior that is likely to differ across databases
- [`clients`](core/dbt/clients/README.md): Interface with dependencies (agate, jinja) or across operating systems
- [`config`](core/dbt/config/README.md): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
@@ -18,10 +16,6 @@ The main subdirectories of core/dbt:
- [`parser`](core/dbt/parser/README.md): Read project files, validate, construct python objects
- [`task`](core/dbt/task/README.md): Set forth the actions that dbt can perform when invoked
Legacy tests are found in the 'test' directory:
- [`unit tests`](core/dbt/test/unit/README.md): Unit tests
- [`integration tests`](core/dbt/test/integration/README.md): Integration tests
### Invoking dbt
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.

3533
CHANGELOG.md Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,30 +1,78 @@
# Contributing to `dbt-core`
`dbt-core` is open source software. It is what it is today because community members have opened issues, provided feedback, and [contributed to the knowledge loop](https://www.getdbt.com/dbt-labs/values/). Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
# Contributing to `dbt`
1. [About this document](#about-this-document)
2. [Getting the code](#getting-the-code)
3. [Setting up an environment](#setting-up-an-environment)
4. [Running dbt-core in development](#running-dbt-core-in-development)
5. [Testing dbt-core](#testing)
6. [Debugging](#debugging)
7. [Adding or modifying a changelog entry](#adding-or-modifying-a-changelog-entry)
8. [Submitting a Pull Request](#submitting-a-pull-request)
2. [Proposing a change](#proposing-a-change)
3. [Getting the code](#getting-the-code)
4. [Setting up an environment](#setting-up-an-environment)
5. [Running `dbt` in development](#running-dbt-in-development)
6. [Testing](#testing)
7. [Submitting a Pull Request](#submitting-a-pull-request)
## About this document
There are many ways to contribute to the ongoing development of `dbt-core`, such as by participating in discussions and issues. We encourage you to first read our higher-level document: ["Expectations for Open Source Contributors"](https://docs.getdbt.com/docs/contributing/oss-expectations).
This document is a guide intended for folks interested in contributing to `dbt-core`. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using `dbt-core`, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
The rest of this document serves as a more granular guide for contributing code changes to `dbt-core` (this repository). It is not intended as a guide for using `dbt-core`, and some pieces assume a level of familiarity with Python development (virtualenvs, `pip`, etc). Specific code snippets in this guide assume you are using macOS or Linux and are comfortable with the command line.
If you're new to python development or contributing to open-source software, we encourage you to read this document from start to finish. If you get stuck, drop us a line in the `#dbt-core-development` channel on [slack](https://community.getdbt.com).
If you get stuck, we're happy to help! Drop us a line in the `#dbt-core-development` channel in the [dbt Community Slack](https://community.getdbt.com).
#### Adapters
### Notes
If you have an issue or code change suggestion related to a specific database [adapter](https://docs.getdbt.com/docs/available-adapters); please refer to that supported databases seperate repo for those contributions.
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead. The sole exception is Postgres; the `dbt-postgres` plugin lives in this repository (`dbt-core`).
- **CLA:** Please note that anyone contributing code to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements). If you are unable to sign the CLA, the `dbt-core` maintainers will unfortunately be unable to merge any of your Pull Requests. We welcome you to participate in discussions, open issues, and comment on existing ones.
- **Branches:** All pull requests from community contributors should target the `main` branch (default). If the change is needed as a patch for a minor version of dbt that has already been released (or is already a release candidate), a maintainer will backport the changes in your PR to the relevant "latest" release branch (`1.0.latest`, `1.1.latest`, ...). If an issue fix applies to a release branch, that fix should be first committed to the development branch and then to the release branch (rarely release-branch fixes may not apply to `main`).
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via pip, homebrew, and dbt Cloud.
### Signing the CLA
Please note that all contributors to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the `dbt-core` codebase. If you are unable to sign the CLA, then the `dbt-core` maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
## Proposing a change
`dbt-core` is Apache 2.0-licensed open source software. `dbt-core` is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
### Defining the problem
If you have an idea for a new feature or if you've discovered a bug in `dbt-core`, the first step is to open an issue. Please check the list of [open issues](https://github.com/dbt-labs/dbt-core/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The `dbt-core` maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
> **Note:** All community-contributed Pull Requests _must_ be associated with an open issue. If you submit a Pull Request that does not pertain to an open issue, you will be asked to create an issue describing the problem before the Pull Request can be reviewed.
### Discussing the idea
After you open an issue, a `dbt-core` maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The `dbt-core` maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
### Submitting a change
If an issue is appropriately well scoped and describes a beneficial change to the `dbt-core` codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
The `dbt-core` maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/dbt-labs/dbt-core/contribute) page.
Here's a good workflow:
- Comment on the open issue, expressing your interest in contributing the required code change
- Outline your planned implementation. If you want help getting started, ask!
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the `dbt-core` maintainers will work with you to review your code.
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding `dbt-core`'s [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
In some cases, the right resolution to an open issue might be tangential to the `dbt-core` codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the `dbt-core` maintainers are unwilling or unable to incorporate into the `dbt-core` codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the `dbt-core` repository, the issue will be tagged with the `wontfix` label (see below) and closed.
### Using issue labels
The `dbt-core` maintainers use labels to categorize open issues. Most labels describe the domain in the `dbt-core` codebase germane to the discussion.
| tag | description |
| --- | ----------- |
| [triage](https://github.com/dbt-labs/dbt-core/labels/triage) | This is a new issue which has not yet been reviewed by a `dbt-core` maintainer. This label is removed when a maintainer reviews and responds to the issue. |
| [bug](https://github.com/dbt-labs/dbt-core/labels/bug) | This issue represents a defect or regression in `dbt-core` |
| [enhancement](https://github.com/dbt-labs/dbt-core/labels/enhancement) | This issue represents net-new functionality in `dbt-core` |
| [good first issue](https://github.com/dbt-labs/dbt-core/labels/good%20first%20issue) | This issue does not require deep knowledge of the `dbt-core` codebase to implement. This issue is appropriate for a first-time contributor. |
| [help wanted](https://github.com/dbt-labs/dbt-core/labels/help%20wanted) / [discussion](https://github.com/dbt-labs/dbt-core/labels/discussion) | Conversation around this issue in ongoing, and there isn't yet a clear path forward. Input from community members is most welcome. |
| [duplicate](https://github.com/dbt-labs/dbt-core/issues/duplicate) | This issue is functionally identical to another open issue. The `dbt-core` maintainers will close this issue and encourage community members to focus conversation on the other one. |
| [snoozed](https://github.com/dbt-labs/dbt-core/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The `dbt-core` maintainers will revist these issues periodically and re-prioritize them accordingly. |
| [stale](https://github.com/dbt-labs/dbt-core/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by `dbt-core` maintainers, but they can be re-opened if the discussion is restarted. |
| [wontfix](https://github.com/dbt-labs/dbt-core/labels/wontfix) | This issue does not require a code change in the `dbt-core` repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
#### Branching Strategy
`dbt-core` has three types of branches:
- **Trunks** are where active development of the next release takes place. There is one trunk named `main` at the time of writing this, and will be the default branch of the repository.
- **Release Branches** track a specific, not yet complete release of `dbt-core`. Each minor version release has a corresponding release branch. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of `dbt-core`.
- **Feature Branches** track individual features and fixes. On completion they should be merged into the trunk branch or a specific release branch.
## Getting the code
@@ -36,17 +84,15 @@ You will need `git` in order to download and modify the `dbt-core` source code.
If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt-core` by forking the `dbt-core` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
1. Fork the `dbt-core` repository
2. Clone your fork locally
3. Check out a new branch for your proposed changes
4. Push changes to your fork
5. Open a pull request against `dbt-labs/dbt-core` from your forked repository
1. fork the `dbt-core` repository
2. clone your fork locally
3. check out a new branch for your proposed changes
4. push changes to your fork
5. open a pull request against `dbt-labs/dbt` from your forked repository
### dbt Labs contributors
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch. Branch names should be fixed by `CT-XXX/` where:
* CT stands for 'core team'
* XXX stands for a JIRA ticket number
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
## Setting up an environment
@@ -54,21 +100,18 @@ There are some tools that will be helpful to you in developing locally. While th
### Tools
These are the tools used in `dbt-core` development and testing:
A short list of tools used in `dbt-core` testing that will be helpful to your understanding:
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.8, 3.9, 3.10 and 3.11
- [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, Python 3.8, and Python 3.9
- [`pytest`](https://docs.pytest.org/en/latest/) to discover/run tests
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) - but don't worry too much, nobody _really_ understands how make works and our Makefile is super simple
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
- [`black`](https://github.com/psf/black) for code formatting
- [`mypy`](https://mypy.readthedocs.io/en/stable/) for static type checking
- [`pre-commit`](https://pre-commit.com) to easily run those checks
- [`changie`](https://changie.dev/) to create changelog entries, without merge conflicts
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) to run multiple setup or test steps in combination. Don't worry too much, nobody _really_ understands how `make` works, and our Makefile aims to be super simple.
- [GitHub Actions](https://github.com/features/actions) for automating tests and checks, once a PR is pushed to the `dbt-core` repository
- [Github Actions](https://github.com/features/actions)
A deep understanding of these tools in not required to effectively contribute to `dbt-core`, but we recommend checking out the attached documentation if you're interested in learning more about each one.
A deep understanding of these tools in not required to effectively contribute to `dbt-core`, but we recommend checking out the attached documentation if you're interested in learning more about them.
#### Virtual environments
#### virtual environments
We strongly recommend using virtual environments when developing code in `dbt-core`. We recommend creating this virtualenv
in the root of the `dbt-core` repository. To create a new virtualenv, run:
@@ -79,12 +122,12 @@ source env/bin/activate
This will create and activate a new Python virtual environment.
#### Docker and `docker-compose`
#### docker and docker-compose
Docker and `docker-compose` are both used in testing. Specific instructions for you OS can be found [here](https://docs.docker.com/get-docker/).
Docker and docker-compose are both used in testing. Specific instructions for you OS can be found [here](https://docs.docker.com/get-docker/).
#### Postgres (optional)
#### postgres (optional)
For testing, and later in the examples in this document, you may want to have `psql` available so you can poke around in the database and see what happened. We recommend that you use [homebrew](https://brew.sh/) for that on macOS, and your package manager on Linux. You can install any version of the postgres client that you'd like. On macOS, with homebrew setup, you can run:
@@ -96,37 +139,32 @@ brew install postgresql
### Installation
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies):
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies) with:
```sh
make dev
```
or, alternatively:
```sh
# or
pip install -r dev-requirements.txt -r editable-requirements.txt
pre-commit install
```
When installed in this way, any changes you make to your local copy of the source code will be reflected immediately in your next `dbt` run.
When `dbt-core` is installed this way, any changes you make to the `dbt-core` source code will be reflected immediately in your next `dbt-core` run.
### Running `dbt-core`
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
With your virtualenv activated, the `dbt-core` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local Postgres instance, or a specific test sandbox within your data warehouse if appropriate. Make sure to create a profile before running integration tests.
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local postgres instance, or a specific test sandbox within your data warehouse if appropriate.
## Testing
Once you're able to manually test that your code change is working as expected, it's important to run existing automated tests, as well as adding some new ones. These tests will ensure that:
- Your code changes do not unexpectedly break other established functionality
- Your code changes can handle all known edge cases
- The functionality you're adding will _keep_ working in the future
Getting the `dbt-core` integration tests set up in your local environment will be very helpful as you start to make changes to your local version of `dbt-core`. The section that follows outlines some helpful tips for setting up the test environment.
Although `dbt-core` works with a number of different databases, you won't need to supply credentials for every one of these databases in your test environment. Instead, you can test most `dbt-core` code changes with Python and Postgres.
Although `dbt-core` works with a number of different databases, you won't need to supply credentials for every one of these databases in your test environment. Instead you can test all dbt-core code changes with Python and Postgres.
### Initial setup
Postgres offers the easiest way to test most `dbt-core` functionality today. They are the fastest to run, and the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
We recommend starting with `dbt-core`'s Postgres tests. These tests cover most of the functionality in `dbt-core`, are the fastest to run, and are the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
```sh
make setup-db
@@ -152,79 +190,40 @@ make test
# Runs postgres integration tests with py38 in "fail fast" mode.
make integration
```
> These make targets assume you have a local installation of a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) for unit/integration testing and pre-commit for code quality checks,
> These make targets assume you have a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) installed locally,
> unless you use choose a Docker container to run tests. Run `make help` for more info.
Check out the other targets in the Makefile to see other commonly used test
suites.
#### `pre-commit`
[`pre-commit`](https://pre-commit.com) takes care of running all code-checks for formatting and linting. Run `make dev` to install `pre-commit` in your local environment (we recommend running this command with a python virtual environment active). This command installs several pip executables including black, mypy, and flake8. Once this is done you can use any of the linter-based make targets as well as a git pre-commit hook that will ensure proper formatting and linting.
#### `tox`
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py38`. The configuration for these tests in located in `tox.ini`.
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run
tests. You can also run tests in parallel, for example, you can run unit tests
for Python 3.7, Python 3.8, Python 3.9, `flake8` checks, and `mypy` checks in
parallel with `tox -p`. Also, you can run unit tests for specific python versions
with `tox -e py37`. The configuration for these tests in located in `tox.ini`.
#### `pytest`
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. With a virtualenv active and dev dependencies installed you can do things like:
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. With a virtualenv
active and dev dependencies installed you can do things like:
```sh
# run specific postgres integration tests
python -m pytest -m profile_postgres test/integration/001_simple_copy_test
# run all unit tests in a file
python3 -m pytest tests/unit/test_graph.py
python -m pytest test/unit/test_graph.py
# run a specific unit test
python3 -m pytest tests/unit/test_graph.py::GraphTest::test__dependency_list
# run specific Postgres functional tests
python3 -m pytest tests/functional/sources
python -m pytest test/unit/test_graph.py::GraphTest::test__dependency_list
```
> See [pytest usage docs](https://docs.pytest.org/en/6.2.x/usage.html) for an overview of useful command-line options.
### Unit, Integration, Functional?
Here are some general rules for adding tests:
* unit tests (`tests/unit`) dont need to access a database; "pure Python" tests should be written as unit tests
* functional tests (`tests/functional`) cover anything that interacts with a database, namely adapter
## Debugging
1. The logs for a `dbt run` have stack traces and other information for debugging errors (in `logs/dbt.log` in your project directory).
2. Try using a debugger, like `ipdb`. For pytest: `--pdb --pdbcls=IPython.terminal.debugger:pdb`
3. Sometimes, its easier to debug on a single thread: `dbt --single-threaded run`
4. To make print statements from Jinja macros: `{{ log(msg, info=true) }}`
5. You can also add `{{ debug() }}` statements, which will drop you into some auto-generated code that the macro wrote.
6. The dbt “artifacts” are written out to the target directory of your dbt project. They are in unformatted json, which can be hard to read. Format them with:
> python -m json.tool target/run_results.json > run_results.json
### Assorted development tips
* Append `# type: ignore` to the end of a line if you need to disable `mypy` on that line.
* Sometimes flake8 complains about lines that are actually fine, in which case you can put a comment on the line such as: # noqa or # noqa: ANNN, where ANNN is the error code that flake8 issues.
* To collect output for `CProfile`, run dbt with the `-r` option and the name of an output file, i.e. `dbt -r dbt.cprof run`. If you just want to profile parsing, you can do: `dbt -r dbt.cprof parse`. `pip` install `snakeviz` to view the output. Run `snakeviz dbt.cprof` and output will be rendered in a browser window.
## Adding or modifying a CHANGELOG Entry
We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost.
Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system.
Once changie is installed and your PR is created for a new feature, simply run the following command and changie will walk you through the process of creating a changelog entry:
```shell
changie new
```
Commit the file that's created and your changelog entry is complete!
If you are contributing to a feature already in progress, you will modify the changie yaml file in dbt/.changes/unreleased/ related to your change. If you need help finding this file, please ask within the discussion for the pull request!
You don't need to worry about which `dbt-core` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-core`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-core`.
> [Here](https://docs.pytest.org/en/reorganize-docs/new-docs/user/commandlineuseful.html)
> is a list of useful command-line options for `pytest` to use while developing.
## Submitting a Pull Request
Code can be merged into the current development branch `main` by opening a pull request. A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
dbt Labs provides a CI environment to test changes to specific adapters, and periodic maintenance checks of `dbt-core` through Github Actions. For example, if you submit a pull request to the `dbt-redshift` repo, GitHub will trigger automated code checks and tests against Redshift.
A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
- First time contributors should note code checks + unit tests require a maintainer to approve.
Automated tests run via GitHub Actions. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-core` repository trigger integration tests against Postgres. dbt Labs also provides CI environments in which to test changes to other adapters, triggered by PRs in those adapters' repositories, as well as periodic maintenance checks of each adapter in concert with the latest `dbt-core` code changes.
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
Sometimes, the content license agreement auto-check bot doesn't find a user's entry in its roster. If you need to force a rerun, add `@cla-bot check` in a comment on the pull request.

View File

@@ -3,13 +3,13 @@
# See `/docker` for a generic and production-ready docker file
##
FROM ubuntu:22.04
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
software-properties-common gpg-agent \
software-properties-common \
&& add-apt-repository ppa:git-core/ppa -y \
&& apt-get dist-upgrade -y \
&& apt-get install -y --no-install-recommends \
@@ -30,21 +30,22 @@ RUN apt-get update \
unixodbc-dev \
&& add-apt-repository ppa:deadsnakes/ppa \
&& apt-get install -y \
python-is-python3 \
python-dev-is-python3 \
python \
python-dev \
python3-pip \
python3.6 \
python3.6-dev \
python3-pip \
python3.6-venv \
python3.7 \
python3.7-dev \
python3.7-venv \
python3.8 \
python3.8-dev \
python3.8-venv \
python3.9 \
python3.9-dev \
python3.9-venv \
python3.10 \
python3.10-dev \
python3.10-venv \
python3.11 \
python3.11-dev \
python3.11-venv \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

127
Makefile
View File

@@ -6,97 +6,47 @@ ifeq ($(USE_DOCKER),true)
DOCKER_CMD := docker-compose run --rm test
endif
#
# To override CI_flags, create a file at this repo's root dir named `makefile.test.env`. Fill it
# with any ENV_VAR overrides required by your test environment, e.g.
# DBT_TEST_USER_1=user
# LOG_DIR="dir with a space in it"
#
# Warn: Restrict each line to one variable only.
#
ifeq (./makefile.test.env,$(wildcard ./makefile.test.env))
include ./makefile.test.env
endif
CI_FLAGS =\
DBT_TEST_USER_1=$(if $(DBT_TEST_USER_1),$(DBT_TEST_USER_1),dbt_test_user_1)\
DBT_TEST_USER_2=$(if $(DBT_TEST_USER_2),$(DBT_TEST_USER_2),dbt_test_user_2)\
DBT_TEST_USER_3=$(if $(DBT_TEST_USER_3),$(DBT_TEST_USER_3),dbt_test_user_3)\
RUSTFLAGS=$(if $(RUSTFLAGS),$(RUSTFLAGS),"-D warnings")\
LOG_DIR=$(if $(LOG_DIR),$(LOG_DIR),./logs)\
DBT_LOG_FORMAT=$(if $(DBT_LOG_FORMAT),$(DBT_LOG_FORMAT),json)
.PHONY: dev_req
dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
@\
pip install -r dev-requirements.txt
pip install -r editable-requirements.txt
.PHONY: dev
dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
@\
pre-commit install
.PHONY: proto_types
proto_types: ## generates google protobuf python file from types.proto
protoc -I=./core/dbt/events --python_out=./core/dbt/events ./core/dbt/events/types.proto
dev: ## Installs dbt-* packages in develop mode along with development dependencies.
pip install -r dev-requirements.txt -r editable-requirements.txt
.PHONY: mypy
mypy: .env ## Runs mypy against staged changes for static type checking.
@\
$(DOCKER_CMD) pre-commit run --hook-stage manual mypy-check | grep -v "INFO"
mypy: .env ## Runs mypy for static type checking.
$(DOCKER_CMD) tox -e mypy
.PHONY: flake8
flake8: .env ## Runs flake8 against staged changes to enforce style guide.
@\
$(DOCKER_CMD) pre-commit run --hook-stage manual flake8-check | grep -v "INFO"
.PHONY: black
black: .env ## Runs black against staged changes to enforce style guide.
@\
$(DOCKER_CMD) pre-commit run --hook-stage manual black-check -v | grep -v "INFO"
flake8: .env ## Runs flake8 to enforce style guide.
$(DOCKER_CMD) tox -e flake8
.PHONY: lint
lint: .env ## Runs flake8 and mypy code checks against staged changes.
@\
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
lint: .env ## Runs all code checks in parallel.
$(DOCKER_CMD) tox -p -e flake8,mypy
.PHONY: unit
unit: .env ## Runs unit tests with py
@\
$(DOCKER_CMD) tox -e py
unit: .env ## Runs unit tests with py38.
$(DOCKER_CMD) tox -e py38
.PHONY: test
test: .env ## Runs unit tests with py and code checks against staged changes.
@\
$(DOCKER_CMD) tox -e py; \
$(DOCKER_CMD) pre-commit run black-check --hook-stage manual | grep -v "INFO"; \
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
test: .env ## Runs unit tests with py38 and code checks in parallel.
$(DOCKER_CMD) tox -p -e py38,flake8,mypy
.PHONY: integration
integration: .env ## Runs postgres integration tests with py-integration
@\
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto
integration: .env integration-postgres ## Alias for integration-postgres.
.PHONY: integration-fail-fast
integration-fail-fast: .env ## Runs postgres integration tests with py-integration in "fail fast" mode.
@\
$(DOCKER_CMD) tox -e py-integration -- -x -nauto
integration-fail-fast: .env integration-postgres-fail-fast ## Alias for integration-postgres-fail-fast.
.PHONY: interop
interop: clean
@\
mkdir $(LOG_DIR) && \
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto && \
LOG_DIR=$(LOG_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
.PHONY: integration-postgres
integration-postgres: .env ## Runs postgres integration tests with py38.
$(DOCKER_CMD) tox -e py38-postgres -- -nauto
.PHONY: integration-postgres-fail-fast
integration-postgres-fail-fast: .env ## Runs postgres integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-postgres -- -x -nauto
.PHONY: setup-db
setup-db: ## Setup Postgres database with docker-compose for system testing.
@\
docker-compose up -d database && \
docker-compose up -d database
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
# This rule creates a file named .env that is used by docker-compose for passing
@@ -112,30 +62,27 @@ endif
.PHONY: clean
clean: ## Resets development environment.
@echo 'cleaning repo...'
@rm -f .coverage
@rm -f .coverage.*
@rm -rf .eggs/
@rm -f .env
@rm -rf .tox/
@rm -rf build/
@rm -rf dbt.egg-info/
@rm -f dbt_project.yml
@rm -rf dist/
@rm -f htmlcov/*.{css,html,js,json,png}
@rm -rf logs/
@rm -rf target/
@find . -type f -name '*.pyc' -delete
@find . -type d -name '__pycache__' -depth -delete
@echo 'done.'
rm -f .coverage
rm -rf .eggs/
rm -f .env
rm -rf .tox/
rm -rf build/
rm -rf dbt.egg-info/
rm -f dbt_project.yml
rm -rf dist/
rm -f htmlcov/*.{css,html,js,json,png}
rm -rf logs/
rm -rf target/
find . -type f -name '*.pyc' -delete
find . -type d -name '__pycache__' -depth -delete
.PHONY: help
help: ## Show this help message.
@echo 'usage: make [target] [USE_DOCKER=true]'
@echo
@echo 'targets:'
@grep -E '^[8+a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
@echo
@echo 'options:'
@echo 'use USE_DOCKER=true to run target in a docker container'

View File

@@ -3,13 +3,16 @@
</p>
<p align="center">
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="Unit Tests Badge"/>
</a>
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml">
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml/badge.svg?event=push" alt="Integration Tests Badge"/>
</a>
</p>
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
![architecture](https://github.com/dbt-labs/dbt-core/blob/202cb7e51e218c7b29eb3b11ad058bd56b7739de/etc/dbt-transform.png)
![architecture](https://raw.githubusercontent.com/dbt-labs/dbt-core/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-arch.png)
## Understanding dbt
@@ -21,7 +24,7 @@ These select statements, or "models", form a dbt project. Models frequently buil
## Getting started
- [Install dbt](https://docs.getdbt.com/docs/get-started/installation)
- [Install dbt](https://docs.getdbt.com/docs/installation)
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
## Join the dbt Community

View File

@@ -1,2 +1 @@
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore
include dbt/py.typed

View File

@@ -3,7 +3,10 @@
</p>
<p align="center">
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="Unit Tests Badge"/>
</a>
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml">
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/integration.yml/badge.svg?event=push" alt="Integration Tests Badge"/>
</a>
</p>

View File

@@ -2,59 +2,51 @@
## The following are individual files in this directory.
### compilation.py
### constants.py
### dataclass_schema.py
### deprecations.py
### exceptions.py
### flags.py
### flags.py
### main.py
### helper_types.py
### hooks.py
### lib.py
### links.py
### logger.py
### main.py
### node_types.py
### profiler.py
### selected_resources.py
### semver.py
### tracking.py
### ui.py
### utils.py
### tracking.py
### version.py
### lib.py
### node_types.py
### helper_types.py
### links.py
### semver.py
### ui.py
### compilation.py
### dataclass_schema.py
### exceptions.py
### hooks.py
### logger.py
### profiler.py
### utils.py
## The subdirectories will be documented in a README in the subdirectory
* adapters
* cli
* clients
* config
* context
* contracts
* deps
* docs
* events
* graph
* include
* parser
* adapters
* context
* deps
* graph
* task
* tests
* clients
* events

View File

@@ -1,7 +0,0 @@
# N.B.
# This will add to the packages __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
# The matching statement is in plugins/postgres/dbt/__init__.py
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)

View File

@@ -1,30 +1 @@
# Adapters README
The Adapters module is responsible for defining database connection methods, caching information from databases, how relations are defined, and the two major connection types we have - base and sql.
# Directories
## `base`
Defines the base implementation Adapters can use to build out full functionality.
## `sql`
Defines a sql implementation for adapters that initially inherits the above base implementation and comes with some premade methods and macros that can be overwritten as needed per adapter. (most common type of adapter.)
# Files
## `cache.py`
Cached information from the database.
## `factory.py`
Defines how we generate adapter objects
## `protocol.py`
Defines various interfaces for various adapter objects. Helps mypy correctly resolve methods.
## `reference_keys.py`
Configures naming scheme for cache elements to be universal.

View File

@@ -1,7 +0,0 @@
# N.B.
# This will add to the packages __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
# The matching statement is in plugins/postgres/dbt/adapters/__init__.py
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)

View File

@@ -1,10 +0,0 @@
## Base adapters
### impl.py
The class `SQLAdapter` in [base/imply.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/adapters/base/impl.py) is a (mostly) abstract object that adapter objects inherit from. The base class scaffolds out methods that every adapter project usually should implement for smooth communication between dbt and database.
Some target databases require more or fewer methods--it all depends on what the warehouse's featureset is.
Look into the class for function-level comments.

View File

@@ -1,19 +1,14 @@
# these are all just exports, #noqa them so flake8 will be happy
# TODO: Should we still include this in the `adapters` namespace?
from dbt.contracts.connection import Credentials # noqa: F401
from dbt.adapters.base.meta import available # noqa: F401
from dbt.adapters.base.connections import BaseConnectionManager # noqa: F401
from dbt.adapters.base.relation import ( # noqa: F401
from dbt.contracts.connection import Credentials # noqa
from dbt.adapters.base.meta import available # noqa
from dbt.adapters.base.connections import BaseConnectionManager # noqa
from dbt.adapters.base.relation import ( # noqa
BaseRelation,
RelationType,
SchemaSearchMap,
)
from dbt.adapters.base.column import Column # noqa: F401
from dbt.adapters.base.impl import ( # noqa: F401
AdapterConfig,
BaseAdapter,
PythonJobHelper,
ConstraintSupport,
)
from dbt.adapters.base.plugin import AdapterPlugin # noqa: F401
from dbt.adapters.base.column import Column # noqa
from dbt.adapters.base.impl import AdapterConfig, BaseAdapter # noqa
from dbt.adapters.base.plugin import AdapterPlugin # noqa

View File

@@ -2,17 +2,16 @@ from dataclasses import dataclass
import re
from typing import Dict, ClassVar, Any, Optional
from dbt.exceptions import DbtRuntimeError
from dbt.exceptions import RuntimeException
@dataclass
class Column:
TYPE_LABELS: ClassVar[Dict[str, str]] = {
"STRING": "TEXT",
"TIMESTAMP": "TIMESTAMP",
"FLOAT": "FLOAT",
"INTEGER": "INT",
"BOOLEAN": "BOOLEAN",
'STRING': 'TEXT',
'TIMESTAMP': 'TIMESTAMP',
'FLOAT': 'FLOAT',
'INTEGER': 'INT'
}
column: str
dtype: str
@@ -25,7 +24,7 @@ class Column:
return cls.TYPE_LABELS.get(dtype.upper(), dtype)
@classmethod
def create(cls, name, label_or_dtype: str) -> "Column":
def create(cls, name, label_or_dtype: str) -> 'Column':
column_type = cls.translate_type(label_or_dtype)
return cls(name, column_type)
@@ -40,14 +39,16 @@ class Column:
@property
def data_type(self) -> str:
if self.is_string():
return self.string_type(self.string_size())
return Column.string_type(self.string_size())
elif self.is_numeric():
return self.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
return Column.numeric_type(self.dtype, self.numeric_precision,
self.numeric_scale)
else:
return self.dtype
def is_string(self) -> bool:
return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
return self.dtype.lower() in ['text', 'character varying', 'character',
'varchar']
def is_number(self):
return any([self.is_integer(), self.is_numeric(), self.is_float()])
@@ -55,46 +56,33 @@ class Column:
def is_float(self):
return self.dtype.lower() in [
# floats
"real",
"float4",
"float",
"double precision",
"float8",
"double",
'real', 'float4', 'float', 'double precision', 'float8'
]
def is_integer(self) -> bool:
return self.dtype.lower() in [
# real types
"smallint",
"integer",
"bigint",
"smallserial",
"serial",
"bigserial",
'smallint', 'integer', 'bigint',
'smallserial', 'serial', 'bigserial',
# aliases
"int2",
"int4",
"int8",
"serial2",
"serial4",
"serial8",
'int2', 'int4', 'int8',
'serial2', 'serial4', 'serial8',
]
def is_numeric(self) -> bool:
return self.dtype.lower() in ["numeric", "decimal"]
return self.dtype.lower() in ['numeric', 'decimal']
def string_size(self) -> int:
if not self.is_string():
raise DbtRuntimeError("Called string_size() on non-string field!")
raise RuntimeException("Called string_size() on non-string field!")
if self.dtype == "text" or self.char_size is None:
if self.dtype == 'text' or self.char_size is None:
# char_size should never be None. Handle it reasonably just in case
return 256
else:
return int(self.char_size)
def can_expand_to(self, other_column: "Column") -> bool:
def can_expand_to(self, other_column: 'Column') -> bool:
"""returns True if this column can be expanded to the size of the
other column"""
if not self.is_string() or not other_column.is_string():
@@ -122,10 +110,12 @@ class Column:
return "<Column {} ({})>".format(self.name, self.data_type)
@classmethod
def from_description(cls, name: str, raw_data_type: str) -> "Column":
match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
def from_description(cls, name: str, raw_data_type: str) -> 'Column':
match = re.match(r'([^(]+)(\([^)]+\))?', raw_data_type)
if match is None:
raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"')
raise RuntimeException(
f'Could not interpret data type "{raw_data_type}"'
)
data_type, size_info = match.groups()
char_size = None
numeric_precision = None
@@ -133,12 +123,12 @@ class Column:
if size_info is not None:
# strip out the parentheses
size_info = size_info[1:-1]
parts = size_info.split(",")
parts = size_info.split(',')
if len(parts) == 1:
try:
char_size = int(parts[0])
except ValueError:
raise DbtRuntimeError(
raise RuntimeException(
f'Could not interpret data_type "{raw_data_type}": '
f'could not convert "{parts[0]}" to an integer'
)
@@ -146,16 +136,18 @@ class Column:
try:
numeric_precision = int(parts[0])
except ValueError:
raise DbtRuntimeError(
raise RuntimeException(
f'Could not interpret data_type "{raw_data_type}": '
f'could not convert "{parts[0]}" to an integer'
)
try:
numeric_scale = int(parts[1])
except ValueError:
raise DbtRuntimeError(
raise RuntimeException(
f'Could not interpret data_type "{raw_data_type}": '
f'could not convert "{parts[1]}" to an integer'
)
return cls(name, data_type, char_size, numeric_precision, numeric_scale)
return cls(
name, data_type, char_size, numeric_precision, numeric_scale
)

View File

@@ -1,59 +1,35 @@
import abc
import os
from time import sleep
import sys
import traceback
# multiprocessing.RLock is a function returning this type
from multiprocessing.synchronize import RLock
from threading import get_ident
from typing import (
Any,
Dict,
Tuple,
Hashable,
Optional,
ContextManager,
List,
Type,
Union,
Iterable,
Callable,
Dict, Tuple, Hashable, Optional, ContextManager, List, Union
)
import agate
import dbt.exceptions
from dbt.contracts.connection import (
Connection,
Identifier,
ConnectionState,
AdapterRequiredConfig,
LazyHandle,
AdapterResponse,
Connection, Identifier, ConnectionState,
AdapterRequiredConfig, LazyHandle, AdapterResponse
)
from dbt.contracts.graph.manifest import Manifest
from dbt.adapters.base.query_headers import (
MacroQueryStringSetter,
)
from dbt.events import AdapterLogger
from dbt.events.functions import fire_event
from dbt.events.types import (
NewConnection,
ConnectionReused,
ConnectionLeftOpenInCleanup,
ConnectionLeftOpen,
ConnectionClosedInCleanup,
ConnectionLeftOpen2,
ConnectionClosed,
ConnectionClosed2,
Rollback,
RollbackFailed,
RollbackFailed
)
from dbt.events.contextvars import get_node_info
from dbt import flags
from dbt.utils import cast_to_str
SleepTime = Union[int, float] # As taken by time.sleep.
AdapterHandle = Any # Adapter connection handle objects can be any class.
class BaseConnectionManager(metaclass=abc.ABCMeta):
@@ -69,7 +45,6 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
You must also set the 'TYPE' class attribute with a class-unique constant
string.
"""
TYPE: str = NotImplemented
def __init__(self, profile: AdapterRequiredConfig):
@@ -91,14 +66,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
key = self.get_thread_identifier()
with self.lock:
if key not in self.thread_connections:
raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections))
raise dbt.exceptions.InvalidConnectionException(
key, list(self.thread_connections)
)
return self.thread_connections[key]
def set_thread_connection(self, conn: Connection) -> None:
key = self.get_thread_identifier()
if key in self.thread_connections:
raise dbt.exceptions.DbtInternalError(
"In set_thread_connection, existing connection exists for {}"
raise dbt.exceptions.InternalException(
'In set_thread_connection, existing connection exists for {}'
)
self.thread_connections[key] = conn
@@ -137,148 +114,56 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
:return: A context manager that handles exceptions raised by the
underlying database.
"""
raise dbt.exceptions.NotImplementedError(
"`exception_handler` is not implemented for this adapter!"
)
raise dbt.exceptions.NotImplementedException(
'`exception_handler` is not implemented for this adapter!')
def set_connection_name(self, name: Optional[str] = None) -> Connection:
"""Called by 'acquire_connection' in BaseAdapter, which is called by
'connection_named', called by 'connection_for(node)'.
Creates a connection for this thread if one doesn't already
exist, and will rename an existing connection."""
conn_name: str
if name is None:
# if a name isn't specified, we'll re-use a single handle
# named 'master'
conn_name = 'master'
else:
if not isinstance(name, str):
raise dbt.exceptions.CompilerException(
f'For connection name, got {name} - not a string!'
)
assert isinstance(name, str)
conn_name = name
conn_name: str = "master" if name is None else name
# Get a connection for this thread
conn = self.get_if_exists()
if conn and conn.name == conn_name and conn.state == "open":
# Found a connection and nothing to do, so just return it
return conn
if conn is None:
# Create a new connection
conn = Connection(
type=Identifier(self.TYPE),
name=conn_name,
name=None,
state=ConnectionState.INIT,
transaction_open=False,
handle=None,
credentials=self.profile.credentials,
credentials=self.profile.credentials
)
conn.handle = LazyHandle(self.open)
# Add the connection to thread_connections for this thread
self.set_thread_connection(conn)
fire_event(
NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
)
else: # existing connection either wasn't open or didn't have the right name
if conn.state != "open":
conn.handle = LazyHandle(self.open)
if conn.name != conn_name:
orig_conn_name: str = conn.name or ""
conn.name = conn_name
fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name))
if conn.name == conn_name and conn.state == 'open':
return conn
fire_event(NewConnection(conn_name=conn_name, conn_type=self.TYPE))
if conn.state == 'open':
fire_event(ConnectionReused(conn_name=conn_name))
else:
conn.handle = LazyHandle(self.open)
conn.name = conn_name
return conn
@classmethod
def retry_connection(
cls,
connection: Connection,
connect: Callable[[], AdapterHandle],
logger: AdapterLogger,
retryable_exceptions: Iterable[Type[Exception]],
retry_limit: int = 1,
retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1,
_attempts: int = 0,
) -> Connection:
"""Given a Connection, set its handle by calling connect.
The calls to connect will be retried up to retry_limit times to deal with transient
connection errors. By default, one retry will be attempted if retryable_exceptions is set.
:param Connection connection: An instance of a Connection that needs a handle to be set,
usually when attempting to open it.
:param connect: A callable that returns the appropiate connection handle for a
given adapter. This callable will be retried retry_limit times if a subclass of any
Exception in retryable_exceptions is raised by connect.
:type connect: Callable[[], AdapterHandle]
:param AdapterLogger logger: A logger to emit messages on retry attempts or errors. When
handling expected errors, we call debug, and call warning on unexpected errors or when
all retry attempts have been exhausted.
:param retryable_exceptions: An iterable of exception classes that if raised by
connect should trigger a retry.
:type retryable_exceptions: Iterable[Type[Exception]]
:param int retry_limit: How many times to retry the call to connect. If this limit
is exceeded before a successful call, a FailedToConnectError will be raised.
Must be non-negative.
:param retry_timeout: Time to wait between attempts to connect. Can also take a
Callable that takes the number of attempts so far, beginning at 0, and returns an int
or float to be passed to time.sleep.
:type retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1
:param int _attempts: Parameter used to keep track of the number of attempts in calling the
connect function across recursive calls. Passed as an argument to retry_timeout if it
is a Callable. This parameter should not be set by the initial caller.
:raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without
successfully acquiring a handle.
:return: The given connection with its appropriate state and handle attributes set
depending on whether we successfully acquired a handle or not.
"""
timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout
if timeout < 0:
raise dbt.exceptions.FailedToConnectError(
"retry_timeout cannot be negative or return a negative time."
)
if retry_limit < 0 or retry_limit > sys.getrecursionlimit():
# This guard is not perfect others may add to the recursion limit (e.g. built-ins).
connection.handle = None
connection.state = ConnectionState.FAIL
raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative")
try:
connection.handle = connect()
connection.state = ConnectionState.OPEN
return connection
except tuple(retryable_exceptions) as e:
if retry_limit <= 0:
connection.handle = None
connection.state = ConnectionState.FAIL
raise dbt.exceptions.FailedToConnectError(str(e))
logger.debug(
f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n"
f"{retry_limit} attempts remaining. Retrying in {timeout} seconds.\n"
f"Error:\n{e}"
)
sleep(timeout)
return cls.retry_connection(
connection=connection,
connect=connect,
logger=logger,
retry_limit=retry_limit - 1,
retry_timeout=retry_timeout,
retryable_exceptions=retryable_exceptions,
_attempts=_attempts + 1,
)
except Exception as e:
connection.handle = None
connection.state = ConnectionState.FAIL
raise dbt.exceptions.FailedToConnectError(str(e))
@abc.abstractmethod
def cancel_open(self) -> Optional[List[str]]:
"""Cancel all open connections on the adapter. (passable)"""
raise dbt.exceptions.NotImplementedError(
"`cancel_open` is not implemented for this adapter!"
raise dbt.exceptions.NotImplementedException(
'`cancel_open` is not implemented for this adapter!'
)
@classmethod
@abc.abstractmethod
@abc.abstractclassmethod
def open(cls, connection: Connection) -> Connection:
"""Open the given connection on the adapter and return it.
@@ -288,7 +173,9 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
This should be thread-safe, or hold the lock if necessary. The given
connection should not be in either in_use or available.
"""
raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!")
raise dbt.exceptions.NotImplementedException(
'`open` is not implemented for this adapter!'
)
def release(self) -> None:
with self.lock:
@@ -308,10 +195,10 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
def cleanup_all(self) -> None:
with self.lock:
for connection in self.thread_connections.values():
if connection.state not in {"closed", "init"}:
fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name)))
if connection.state not in {'closed', 'init'}:
fire_event(ConnectionLeftOpen(conn_name=connection.name))
else:
fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name)))
fire_event(ConnectionClosed(conn_name=connection.name))
self.close(connection)
# garbage collect these connections
@@ -320,12 +207,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
@abc.abstractmethod
def begin(self) -> None:
"""Begin a transaction. (passable)"""
raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!")
raise dbt.exceptions.NotImplementedException(
'`begin` is not implemented for this adapter!'
)
@abc.abstractmethod
def commit(self) -> None:
"""Commit a transaction. (passable)"""
raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!")
raise dbt.exceptions.NotImplementedException(
'`commit` is not implemented for this adapter!'
)
@classmethod
def _rollback_handle(cls, connection: Connection) -> None:
@@ -333,40 +224,28 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
try:
connection.handle.rollback()
except Exception:
fire_event(
RollbackFailed(
conn_name=cast_to_str(connection.name),
exc_info=traceback.format_exc(),
node_info=get_node_info(),
)
)
fire_event(RollbackFailed(conn_name=connection.name))
@classmethod
def _close_handle(cls, connection: Connection) -> None:
"""Perform the actual close operation."""
# On windows, sometimes connection handles don't have a close() attr.
if hasattr(connection.handle, "close"):
fire_event(
ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info())
)
if hasattr(connection.handle, 'close'):
fire_event(ConnectionClosed2(conn_name=connection.name))
connection.handle.close()
else:
fire_event(
ConnectionLeftOpen(
conn_name=cast_to_str(connection.name), node_info=get_node_info()
)
)
fire_event(ConnectionLeftOpen2(conn_name=connection.name))
@classmethod
def _rollback(cls, connection: Connection) -> None:
"""Roll back the given connection."""
if connection.transaction_open is False:
raise dbt.exceptions.DbtInternalError(
f"Tried to rollback transaction on connection "
raise dbt.exceptions.InternalException(
f'Tried to rollback transaction on connection '
f'"{connection.name}", but it does not have one open!'
)
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
fire_event(Rollback(conn_name=connection.name))
cls._rollback_handle(connection)
connection.transaction_open = False
@@ -378,7 +257,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
return connection
if connection.transaction_open and connection.handle:
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
fire_event(Rollback(conn_name=connection.name))
cls._rollback_handle(connection)
connection.transaction_open = False
@@ -401,14 +280,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
@abc.abstractmethod
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[AdapterResponse, agate.Table]:
) -> Tuple[Union[str, AdapterResponse], agate.Table]:
"""Execute the given SQL.
:param str sql: The sql to execute.
:param bool auto_begin: If set, and dbt is not currently inside a
transaction, automatically begin one.
:param bool fetch: If set, fetch results.
:return: A tuple of the query status and results (empty if fetch=False).
:rtype: Tuple[AdapterResponse, agate.Table]
:return: A tuple of the status and the results (empty if fetch=False).
:rtype: Tuple[Union[str, AdapterResponse], agate.Table]
"""
raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!")
raise dbt.exceptions.NotImplementedException(
'`execute` is not implemented for this adapter!'
)

File diff suppressed because it is too large Load Diff

View File

@@ -30,11 +30,9 @@ class _Available:
x.update(big_expensive_db_query())
return x
"""
def inner(func):
func._parse_replacement_ = parse_replacement
return self(func)
return inner
def deprecated(
@@ -59,14 +57,13 @@ class _Available:
The optional parse_replacement, if provided, will provide a parse-time
replacement for the actual method (see `available.parse`).
"""
def wrapper(func):
func_name = func.__name__
renamed_method(func_name, supported_name)
@wraps(func)
def inner(*args, **kwargs):
warn("adapter:{}".format(func_name))
warn('adapter:{}'.format(func_name))
return func(*args, **kwargs)
if parse_replacement:
@@ -74,7 +71,6 @@ class _Available:
else:
available_function = self
return available_function(inner)
return wrapper
def parse_none(self, func: Callable) -> Callable:
@@ -99,7 +95,9 @@ class AdapterMeta(abc.ABCMeta):
# I'm not sure there is any benefit to it after poking around a bit,
# but having it doesn't hurt on the python side (and omitting it could
# hurt for obscure metaclass reasons, for all I know)
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) # type: ignore
cls = abc.ABCMeta.__new__( # type: ignore
mcls, name, bases, namespace, **kwargs
)
# this is very much inspired by ABCMeta's own implementation
@@ -111,14 +109,14 @@ class AdapterMeta(abc.ABCMeta):
# collect base class data first
for base in bases:
available.update(getattr(base, "_available_", set()))
replacements.update(getattr(base, "_parse_replacements_", set()))
available.update(getattr(base, '_available_', set()))
replacements.update(getattr(base, '_parse_replacements_', set()))
# override with local data if it exists
for name, value in namespace.items():
if getattr(value, "_is_available_", False):
if getattr(value, '_is_available_', False):
available.add(name)
parse_replacement = getattr(value, "_parse_replacement_", None)
parse_replacement = getattr(value, '_parse_replacement_', None)
if parse_replacement is not None:
replacements[name] = parse_replacement

View File

@@ -1,17 +1,18 @@
from typing import List, Optional, Type
from dbt.adapters.base import Credentials
from dbt.exceptions import CompilationError
from dbt.exceptions import CompilationException
from dbt.adapters.protocol import AdapterProtocol
def project_name_from_path(include_path: str) -> str:
# avoid an import cycle
from dbt.config.project import PartialProject
partial = PartialProject.from_project_root(include_path)
from dbt.config.project import Project
partial = Project.partial_load(include_path)
if partial.project_name is None:
raise CompilationError(f"Invalid project at {include_path}: name not set!")
raise CompilationException(
f'Invalid project at {include_path}: name not set!'
)
return partial.project_name
@@ -22,13 +23,12 @@ class AdapterPlugin:
:param dependencies: A list of adapter names that this adapter depends
upon.
"""
def __init__(
self,
adapter: Type[AdapterProtocol],
credentials: Type[Credentials],
include_path: str,
dependencies: Optional[List[str]] = None,
dependencies: Optional[List[str]] = None
):
self.adapter: Type[AdapterProtocol] = adapter

View File

@@ -5,9 +5,9 @@ from dbt.clients.jinja import QueryStringGenerator
from dbt.context.manifest import generate_query_header_context
from dbt.contracts.connection import AdapterRequiredConfig, QueryComment
from dbt.contracts.graph.nodes import ResultNode
from dbt.contracts.graph.compiled import CompileResultNode
from dbt.contracts.graph.manifest import Manifest
from dbt.exceptions import DbtRuntimeError
from dbt.exceptions import RuntimeException
class NodeWrapper:
@@ -15,7 +15,7 @@ class NodeWrapper:
self._inner_node = node
def __getattr__(self, name):
return getattr(self._inner_node, name, "")
return getattr(self._inner_node, name, '')
class _QueryComment(local):
@@ -24,7 +24,6 @@ class _QueryComment(local):
- the current thread's query comment.
- a source_name indicating what set the current thread's query comment
"""
def __init__(self, initial):
self.query_comment: Optional[str] = initial
self.append = False
@@ -36,19 +35,21 @@ class _QueryComment(local):
if self.append:
# replace last ';' with '<comment>;'
sql = sql.rstrip()
if sql[-1] == ";":
if sql[-1] == ';':
sql = sql[:-1]
return "{}\n/* {} */;".format(sql, self.query_comment.strip())
return '{}\n/* {} */;'.format(sql, self.query_comment.strip())
return "{}\n/* {} */".format(sql, self.query_comment.strip())
return '{}\n/* {} */'.format(sql, self.query_comment.strip())
return "/* {} */\n{}".format(self.query_comment.strip(), sql)
return '/* {} */\n{}'.format(self.query_comment.strip(), sql)
def set(self, comment: Optional[str], append: bool):
if isinstance(comment, str) and "*/" in comment:
if isinstance(comment, str) and '*/' in comment:
# tell the user "no" so they don't hurt themselves by writing
# garbage
raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}')
raise RuntimeException(
f'query comment contains illegal value "*/": {comment}'
)
self.query_comment = comment
self.append = append
@@ -62,17 +63,15 @@ class MacroQueryStringSetter:
self.config = config
comment_macro = self._get_comment_macro()
self.generator: QueryStringFunc = lambda name, model: ""
self.generator: QueryStringFunc = lambda name, model: ''
# if the comment value was None or the empty string, just skip it
if comment_macro:
assert isinstance(comment_macro, str)
macro = "\n".join(
(
"{%- macro query_comment_macro(connection_name, node) -%}",
comment_macro,
"{% endmacro %}",
)
)
macro = '\n'.join((
'{%- macro query_comment_macro(connection_name, node) -%}',
comment_macro,
'{% endmacro %}'
))
ctx = self._get_context()
self.generator = QueryStringGenerator(macro, ctx)
self.comment = _QueryComment(None)
@@ -88,9 +87,9 @@ class MacroQueryStringSetter:
return self.comment.add(sql)
def reset(self):
self.set("master", None)
self.set('master', None)
def set(self, name: str, node: Optional[ResultNode]):
def set(self, name: str, node: Optional[CompileResultNode]):
wrapped: Optional[NodeWrapper] = None
if node is not None:
wrapped = NodeWrapper(node)

View File

@@ -1,28 +1,22 @@
from collections.abc import Hashable
from dataclasses import dataclass, field
from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set
from dataclasses import dataclass
from typing import (
Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
)
from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode
from dbt.contracts.graph.compiled import CompiledNode
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
from dbt.contracts.relation import (
RelationType,
ComponentName,
HasQuoting,
FakeAPIObject,
Policy,
Path,
)
from dbt.exceptions import (
ApproximateMatchError,
DbtInternalError,
MultipleDatabasesNotAllowedError,
RelationType, ComponentName, HasQuoting, FakeAPIObject, Policy, Path
)
from dbt.exceptions import InternalException
from dbt.node_types import NodeType
from dbt.utils import filter_null_values, deep_merge, classproperty
import dbt.exceptions
Self = TypeVar("Self", bound="BaseRelation")
Self = TypeVar('Self', bound='BaseRelation')
@dataclass(frozen=True, eq=False, repr=False)
@@ -30,10 +24,8 @@ class BaseRelation(FakeAPIObject, Hashable):
path: Path
type: Optional[RelationType] = None
quote_character: str = '"'
# Python 3.11 requires that these use default_factory instead of simple default
# ValueError: mutable default <class 'dbt.contracts.relation.Policy'> for field include_policy is not allowed: use default_factory
include_policy: Policy = field(default_factory=lambda: Policy())
quote_policy: Policy = field(default_factory=lambda: Policy())
include_policy: Policy = Policy()
quote_policy: Policy = Policy()
dbt_created: bool = False
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
@@ -44,11 +36,11 @@ class BaseRelation(FakeAPIObject, Hashable):
@classmethod
def _get_field_named(cls, field_name):
for f, _ in cls._get_fields():
if f.name == field_name:
return f
for field, _ in cls._get_fields():
if field.name == field_name:
return field
# this should be unreachable
raise ValueError(f"BaseRelation has no {field_name} field!")
raise ValueError(f'BaseRelation has no {field_name} field!')
def __eq__(self, other):
if not isinstance(other, self.__class__):
@@ -57,18 +49,20 @@ class BaseRelation(FakeAPIObject, Hashable):
@classmethod
def get_default_quote_policy(cls) -> Policy:
return cls._get_field_named("quote_policy").default_factory()
return cls._get_field_named('quote_policy').default
@classmethod
def get_default_include_policy(cls) -> Policy:
return cls._get_field_named("include_policy").default_factory()
return cls._get_field_named('include_policy').default
def get(self, key, default=None):
"""Override `.get` to return a metadata object so we don't break
dbt_utils.
"""
if key == "metadata":
return {"type": self.__class__.__name__}
if key == 'metadata':
return {
'type': self.__class__.__name__
}
return super().get(key, default)
def matches(
@@ -77,19 +71,16 @@ class BaseRelation(FakeAPIObject, Hashable):
schema: Optional[str] = None,
identifier: Optional[str] = None,
) -> bool:
search = filter_null_values(
{
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier,
}
)
search = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
if not search:
# nothing was passed in
raise dbt.exceptions.DbtRuntimeError(
"Tried to match relation, but no search path was passed!"
)
raise dbt.exceptions.RuntimeException(
"Tried to match relation, but no search path was passed!")
exact_match = True
approximate_match = True
@@ -97,14 +88,18 @@ class BaseRelation(FakeAPIObject, Hashable):
for k, v in search.items():
if not self._is_exactish_match(k, v):
exact_match = False
if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
self.quote_character
if (
self.path.get_lowered_part(k).strip(self.quote_character) !=
v.lower().strip(self.quote_character)
):
approximate_match = False # type: ignore[union-attr]
approximate_match = False
if approximate_match and not exact_match:
target = self.create(database=database, schema=schema, identifier=identifier)
raise ApproximateMatchError(target, self)
target = self.create(
database=database, schema=schema, identifier=identifier
)
dbt.exceptions.approximate_relation_match(target, self)
return exact_match
@@ -117,13 +112,11 @@ class BaseRelation(FakeAPIObject, Hashable):
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values(
{
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier,
}
)
policy = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
new_quote_policy = self.quote_policy.replace_dict(policy)
return self.replace(quote_policy=new_quote_policy)
@@ -134,18 +127,16 @@ class BaseRelation(FakeAPIObject, Hashable):
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values(
{
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier,
}
)
policy = filter_null_values({
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier
})
new_include_policy = self.include_policy.replace_dict(policy)
return self.replace(include_policy=new_include_policy)
def information_schema(self, view_name=None) -> "InformationSchema":
def information_schema(self, view_name=None) -> 'InformationSchema':
# some of our data comes from jinja, where things can be `Undefined`.
if not isinstance(view_name, str):
view_name = None
@@ -155,10 +146,10 @@ class BaseRelation(FakeAPIObject, Hashable):
info_schema = InformationSchema.from_relation(self, view_name)
return info_schema.incorporate(path={"schema": None})
def information_schema_only(self) -> "InformationSchema":
def information_schema_only(self) -> 'InformationSchema':
return self.information_schema()
def without_identifier(self) -> "BaseRelation":
def without_identifier(self) -> 'BaseRelation':
"""Return a form of this relation that only has the database and schema
set to included. To get the appropriately-quoted form the schema out of
the result (for use as part of a query), use `.render()`. To get the
@@ -168,7 +159,9 @@ class BaseRelation(FakeAPIObject, Hashable):
"""
return self.include(identifier=False).replace_path(identifier=None)
def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
def _render_iterator(
self
) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
for key in ComponentName:
path_part: Optional[str] = None
@@ -180,22 +173,27 @@ class BaseRelation(FakeAPIObject, Hashable):
def render(self) -> str:
# if there is nothing set, this will return the empty string.
return ".".join(part for _, part in self._render_iterator() if part is not None)
return '.'.join(
part for _, part in self._render_iterator()
if part is not None
)
def quoted(self, identifier):
return "{quote_char}{identifier}{quote_char}".format(
return '{quote_char}{identifier}{quote_char}'.format(
quote_char=self.quote_character,
identifier=identifier,
)
@classmethod
def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self:
def create_from_source(
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
) -> Self:
source_quoting = source.quoting.to_dict(omit_none=True)
source_quoting.pop("column", None)
source_quoting.pop('column', None)
quote_policy = deep_merge(
cls.get_default_quote_policy().to_dict(omit_none=True),
source_quoting,
kwargs.get("quote_policy", {}),
kwargs.get('quote_policy', {}),
)
return cls.create(
@@ -203,18 +201,18 @@ class BaseRelation(FakeAPIObject, Hashable):
schema=source.schema,
identifier=source.identifier,
quote_policy=quote_policy,
**kwargs,
**kwargs
)
@staticmethod
def add_ephemeral_prefix(name: str):
return f"__dbt__cte__{name}"
return f'__dbt__cte__{name}'
@classmethod
def create_ephemeral_from_node(
cls: Type[Self],
config: HasQuoting,
node: ManifestNode,
node: Union[ParsedNode, CompiledNode],
) -> Self:
# Note that ephemeral models are based on the name.
identifier = cls.add_ephemeral_prefix(node.name)
@@ -227,7 +225,7 @@ class BaseRelation(FakeAPIObject, Hashable):
def create_from_node(
cls: Type[Self],
config: HasQuoting,
node,
node: Union[ParsedNode, CompiledNode],
quote_policy: Optional[Dict[str, bool]] = None,
**kwargs: Any,
) -> Self:
@@ -241,27 +239,27 @@ class BaseRelation(FakeAPIObject, Hashable):
schema=node.schema,
identifier=node.alias,
quote_policy=quote_policy,
**kwargs,
)
**kwargs)
@classmethod
def create_from(
cls: Type[Self],
config: HasQuoting,
node: ResultNode,
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
**kwargs: Any,
) -> Self:
if node.resource_type == NodeType.Source:
if not isinstance(node, SourceDefinition):
raise DbtInternalError(
"type mismatch, expected SourceDefinition but got {}".format(type(node))
if not isinstance(node, ParsedSourceDefinition):
raise InternalException(
'type mismatch, expected ParsedSourceDefinition but got {}'
.format(type(node))
)
return cls.create_from_source(node, **kwargs)
else:
# Can't use ManifestNode here because of parameterized generics
if not isinstance(node, (ParsedNode)):
raise DbtInternalError(
f"type mismatch, expected ManifestNode but got {type(node)}"
if not isinstance(node, (ParsedNode, CompiledNode)):
raise InternalException(
'type mismatch, expected ParsedNode or CompiledNode but '
'got {}'.format(type(node))
)
return cls.create_from_node(config, node, **kwargs)
@@ -274,16 +272,14 @@ class BaseRelation(FakeAPIObject, Hashable):
type: Optional[RelationType] = None,
**kwargs,
) -> Self:
kwargs.update(
{
"path": {
"database": database,
"schema": schema,
"identifier": identifier,
},
"type": type,
}
)
kwargs.update({
'path': {
'database': database,
'schema': schema,
'identifier': identifier,
},
'type': type,
})
return cls.from_dict(kwargs)
def __repr__(self) -> str:
@@ -328,10 +324,6 @@ class BaseRelation(FakeAPIObject, Hashable):
def is_view(self) -> bool:
return self.type == RelationType.View
@property
def is_materialized_view(self) -> bool:
return self.type == RelationType.MaterializedView
@classproperty
def Table(cls) -> str:
return str(RelationType.Table)
@@ -348,16 +340,12 @@ class BaseRelation(FakeAPIObject, Hashable):
def External(cls) -> str:
return str(RelationType.External)
@classproperty
def MaterializedView(cls) -> str:
return str(RelationType.MaterializedView)
@classproperty
def get_relation_type(cls) -> Type[RelationType]:
return RelationType
Info = TypeVar("Info", bound="InformationSchema")
Info = TypeVar('Info', bound='InformationSchema')
@dataclass(frozen=True, eq=False, repr=False)
@@ -366,16 +354,18 @@ class InformationSchema(BaseRelation):
def __post_init__(self):
if not isinstance(self.information_schema_view, (type(None), str)):
raise dbt.exceptions.CompilationError(
"Got an invalid name: {}".format(self.information_schema_view)
raise dbt.exceptions.CompilationException(
'Got an invalid name: {}'.format(self.information_schema_view)
)
@classmethod
def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
def get_path(
cls, relation: BaseRelation, information_schema_view: Optional[str]
) -> Path:
return Path(
database=relation.database,
schema=relation.schema,
identifier="INFORMATION_SCHEMA",
identifier='INFORMATION_SCHEMA',
)
@classmethod
@@ -406,7 +396,9 @@ class InformationSchema(BaseRelation):
relation: BaseRelation,
information_schema_view: Optional[str],
) -> Info:
include_policy = cls.get_include_policy(relation, information_schema_view)
include_policy = cls.get_include_policy(
relation, information_schema_view
)
quote_policy = cls.get_quote_policy(relation, information_schema_view)
path = cls.get_path(relation, information_schema_view)
return cls(
@@ -428,7 +420,6 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
search for what schemas. The schema values are all lowercased to avoid
duplication.
"""
def add(self, relation: BaseRelation):
key = relation.information_schema_only()
if key not in self:
@@ -438,7 +429,9 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
schema = relation.schema.lower()
self[key].add(schema)
def search(self) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
def search(
self
) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
for information_schema_name, schemas in self.items():
for schema in schemas:
yield information_schema_name, schema
@@ -450,16 +443,17 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
if not allow_multiple_databases:
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
raise MultipleDatabasesNotAllowedError(seen)
dbt.exceptions.raise_compiler_error(str(seen))
for information_schema_name, schema in self.search():
path = {"database": information_schema_name.database, "schema": schema}
new.add(
information_schema_name.incorporate(
path=path,
quote_policy={"database": False},
include_policy={"database": False},
)
)
path = {
'database': information_schema_name.database,
'schema': schema
}
new.add(information_schema_name.incorporate(
path=path,
quote_policy={'database': False},
include_policy={'database': False},
))
return new

View File

@@ -2,22 +2,26 @@ import threading
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from dbt.adapters.reference_keys import (
_make_ref_key,
_make_ref_key_dict,
_ReferenceKey,
from dbt.adapters.reference_keys import _make_key, _ReferenceKey
import dbt.exceptions
from dbt.events.functions import fire_event
from dbt.events.types import (
AddLink,
AddRelation,
DropCascade,
DropMissingRelation,
DropRelation,
DumpAfterAddGraph,
DumpAfterRenameSchema,
DumpBeforeAddGraph,
DumpBeforeRenameSchema,
RenameSchema,
TemporaryRelation,
UncachedRelation,
UpdateReference
)
from dbt.exceptions import (
DependentLinkNotCachedError,
NewNameAlreadyInCacheError,
NoneRelationFoundError,
ReferencedLinkNotCachedError,
TruncatedModelNameCausedCollisionError,
)
from dbt.events.functions import fire_event, fire_event_if
from dbt.events.types import CacheAction, CacheDumpGraph
from dbt.flags import get_flags
from dbt.utils import lowercase
from dbt.helper_types import Lazy
def dot_separated(key: _ReferenceKey) -> str:
@@ -25,7 +29,7 @@ def dot_separated(key: _ReferenceKey) -> str:
:param _ReferenceKey key: The key to stringify.
"""
return ".".join(map(str, key))
return '.'.join(map(str, key))
class _CachedRelation:
@@ -37,15 +41,14 @@ class _CachedRelation:
that refer to this relation.
:attr BaseRelation inner: The underlying dbt relation.
"""
def __init__(self, inner):
self.referenced_by = {}
self.inner = inner
def __str__(self) -> str:
return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
self.database, self.schema, self.identifier, self.inner
)
return (
'_CachedRelation(database={}, schema={}, identifier={}, inner={})'
).format(self.database, self.schema, self.identifier, self.inner)
@property
def database(self) -> Optional[str]:
@@ -77,9 +80,9 @@ class _CachedRelation:
:return _ReferenceKey: A key for this relation.
"""
return _make_ref_key(self)
return _make_key(self)
def add_reference(self, referrer: "_CachedRelation"):
def add_reference(self, referrer: '_CachedRelation'):
"""Add a reference from referrer to self, indicating that if this node
were drop...cascaded, the referrer would be dropped as well.
@@ -123,9 +126,9 @@ class _CachedRelation:
# table_name is ever anything but the identifier (via .create())
self.inner = self.inner.incorporate(
path={
"database": new_relation.inner.database,
"schema": new_relation.inner.schema,
"identifier": new_relation.inner.identifier,
'database': new_relation.inner.database,
'schema': new_relation.inner.schema,
'identifier': new_relation.inner.identifier
},
)
@@ -140,7 +143,10 @@ class _CachedRelation:
:raises InternalError: If the new key already exists.
"""
if new_key in self.referenced_by:
raise NewNameAlreadyInCacheError(old_key, new_key)
dbt.exceptions.raise_cache_inconsistent(
'in rename of "{}" -> "{}", new name is in the cache already'
.format(old_key, new_key)
)
if old_key not in self.referenced_by:
return
@@ -164,16 +170,13 @@ class RelationsCache:
The adapters also hold this lock while filling the cache.
:attr Set[str] schemas: The set of known/cached schemas, all lowercased.
"""
def __init__(self) -> None:
self.relations: Dict[_ReferenceKey, _CachedRelation] = {}
self.lock = threading.RLock()
self.schemas: Set[Tuple[Optional[str], Optional[str]]] = set()
def add_schema(
self,
database: Optional[str],
schema: Optional[str],
self, database: Optional[str], schema: Optional[str],
) -> None:
"""Add a schema to the set of known schemas (case-insensitive)
@@ -183,9 +186,7 @@ class RelationsCache:
self.schemas.add((lowercase(database), lowercase(schema)))
def drop_schema(
self,
database: Optional[str],
schema: Optional[str],
self, database: Optional[str], schema: Optional[str],
) -> None:
"""Drop the given schema and remove it from the set of known schemas.
@@ -229,7 +230,10 @@ class RelationsCache:
# self.relations or any cache entry's referenced_by during iteration
# it's a runtime error!
with self.lock:
return {dot_separated(k): str(v.dump_graph_entry()) for k, v in self.relations.items()}
return {
dot_separated(k): v.dump_graph_entry()
for k, v in self.relations.items()
}
def _setdefault(self, relation: _CachedRelation):
"""Add a relation to the cache, or return it if it already exists.
@@ -256,17 +260,23 @@ class RelationsCache:
if referenced is None:
return
if referenced is None:
raise ReferencedLinkNotCachedError(referenced_key)
dbt.exceptions.raise_cache_inconsistent(
'in add_link, referenced link key {} not in cache!'
.format(referenced_key)
)
dependent = self.relations.get(dependent_key)
if dependent is None:
raise DependentLinkNotCachedError(dependent_key)
dbt.exceptions.raise_cache_inconsistent(
'in add_link, dependent link key {} not in cache!'
.format(dependent_key)
)
assert dependent is not None # we just raised!
referenced.add_reference(dependent)
# This is called in plugins/postgres/dbt/adapters/postgres/impl.py
# TODO: Is this dead code? I can't seem to find it grepping the codebase.
def add_link(self, referenced, dependent):
"""Add a link between two relations to the database. If either relation
does not exist, it will be added as an "external" relation.
@@ -281,34 +291,27 @@ class RelationsCache:
:param BaseRelation dependent: The dependent model.
:raises InternalError: If either entry does not exist.
"""
ref_key = _make_ref_key(referenced)
dep_key = _make_ref_key(dependent)
ref_key = _make_key(referenced)
dep_key = _make_key(dependent)
if (ref_key.database, ref_key.schema) not in self:
# if we have not cached the referenced schema at all, we must be
# referring to a table outside our control. There's no need to make
# a link - we will never drop the referenced relation during a run.
fire_event(
CacheAction(
ref_key=ref_key._asdict(),
ref_key_2=dep_key._asdict(),
)
)
fire_event(UncachedRelation(dep_key=dep_key, ref_key=ref_key))
return
if ref_key not in self.relations:
# Insert a dummy "external" relation.
referenced = referenced.replace(type=referenced.External)
referenced = referenced.replace(
type=referenced.External
)
self.add(referenced)
if dep_key not in self.relations:
# Insert a dummy "external" relation.
dependent = dependent.replace(type=referenced.External)
self.add(dependent)
fire_event(
CacheAction(
action="add_link",
ref_key=dep_key._asdict(),
ref_key_2=ref_key._asdict(),
dependent = dependent.replace(
type=referenced.External
)
)
self.add(dependent)
fire_event(AddLink(dep_key=dep_key, ref_key=ref_key))
with self.lock:
self._add_link(ref_key, dep_key)
@@ -318,20 +321,13 @@ class RelationsCache:
:param BaseRelation relation: The underlying relation.
"""
flags = get_flags()
cached = _CachedRelation(relation)
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()),
)
fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_dict(cached)))
fire_event(AddRelation(relation=_make_key(cached)))
fire_event(DumpBeforeAddGraph(dump=Lazy.defer(lambda: self.dump_graph())))
with self.lock:
self._setdefault(cached)
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()),
)
fire_event(DumpAfterAddGraph(dump=Lazy.defer(lambda: self.dump_graph())))
def _remove_refs(self, keys):
"""Removes all references to all entries in keys. This does not
@@ -346,6 +342,19 @@ class RelationsCache:
for cached in self.relations.values():
cached.release_references(keys)
def _drop_cascade_relation(self, dropped_key):
"""Drop the given relation and cascade it appropriately to all
dependent relations.
:param _CachedRelation dropped: An existing _CachedRelation to drop.
"""
if dropped_key not in self.relations:
fire_event(DropMissingRelation(relation=dropped_key))
return
consequences = self.relations[dropped_key].collect_consequences()
fire_event(DropCascade(dropped=dropped_key, consequences=consequences))
self._remove_refs(consequences)
def drop(self, relation):
"""Drop the named relation and cascade it appropriately to all
dependent relations.
@@ -357,22 +366,10 @@ class RelationsCache:
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop.
"""
dropped_key = _make_ref_key(relation)
dropped_key_msg = _make_ref_key_dict(relation)
fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg))
dropped_key = _make_key(relation)
fire_event(DropRelation(dropped=dropped_key))
with self.lock:
if dropped_key not in self.relations:
fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg))
return
consequences = self.relations[dropped_key].collect_consequences()
# convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs
consequence_msgs = [key._asdict() for key in consequences]
fire_event(
CacheAction(
action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs
)
)
self._remove_refs(consequences)
self._drop_cascade_relation(dropped_key)
def _rename_relation(self, old_key, new_relation):
"""Rename a relation named old_key to new_key, updating references.
@@ -388,20 +385,14 @@ class RelationsCache:
relation = self.relations.pop(old_key)
new_key = new_relation.key()
# relation has to rename its innards, so it needs the _CachedRelation.
# relaton has to rename its innards, so it needs the _CachedRelation.
relation.rename(new_relation)
# update all the relations that refer to it
for cached in self.relations.values():
if cached.is_referenced_by(old_key):
fire_event(
CacheAction(
action="update_reference",
ref_key=_make_ref_key_dict(old_key),
ref_key_2=_make_ref_key_dict(new_key),
ref_key_3=_make_ref_key_dict(cached.key()),
)
UpdateReference(old_key=old_key, new_key=new_key, cached_key=cached.key())
)
cached.rename_key(old_key, new_key)
self.relations[new_key] = relation
@@ -424,12 +415,13 @@ class RelationsCache:
:raises InternalError: If the new key is already present.
"""
if new_key in self.relations:
# Tell user when collision caused by model names truncated during
# materialization.
raise TruncatedModelNameCausedCollisionError(new_key, self.relations)
dbt.exceptions.raise_cache_inconsistent(
'in rename, new key {} already in cache: {}'
.format(new_key, list(self.relations.keys()))
)
if old_key not in self.relations:
fire_event(CacheAction(action="temporary_relation", ref_key=old_key._asdict()))
fire_event(TemporaryRelation(key=old_key))
return False
return True
@@ -445,20 +437,11 @@ class RelationsCache:
:param BaseRelation new: The new relation name information.
:raises InternalError: If the new key is already present.
"""
old_key = _make_ref_key(old)
new_key = _make_ref_key(new)
fire_event(
CacheAction(
action="rename_relation",
ref_key=old_key._asdict(),
ref_key_2=new_key._asdict(),
)
)
flags = get_flags()
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()),
)
old_key = _make_key(old)
new_key = _make_key(new)
fire_event(RenameSchema(old_key=old_key, new_key=new_key))
fire_event(DumpBeforeRenameSchema(dump=Lazy.defer(lambda: self.dump_graph())))
with self.lock:
if self._check_rename_constraints(old_key, new_key):
@@ -466,12 +449,11 @@ class RelationsCache:
else:
self._setdefault(_CachedRelation(new))
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()),
)
fire_event(DumpAfterRenameSchema(dump=Lazy.defer(lambda: self.dump_graph())))
def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
def get_relations(
self, database: Optional[str], schema: Optional[str]
) -> List[Any]:
"""Case-insensitively yield all relations matching the given schema.
:param str schema: The case-insensitive schema name to list from.
@@ -482,13 +464,15 @@ class RelationsCache:
schema = lowercase(schema)
with self.lock:
results = [
r.inner
for r in self.relations.values()
if (lowercase(r.schema) == schema and lowercase(r.database) == database)
r.inner for r in self.relations.values()
if (lowercase(r.schema) == schema and
lowercase(r.database) == database)
]
if None in results:
raise NoneRelationFoundError()
dbt.exceptions.raise_cache_inconsistent(
'in get_relations, a None relation was found in the cache!'
)
return results
def clear(self):
@@ -515,6 +499,6 @@ class RelationsCache:
"""
for relation in to_remove:
# it may have been cascaded out already
drop_key = _make_ref_key(relation)
drop_key = _make_key(relation)
if drop_key in self.relations:
self.drop(drop_key)

View File

@@ -1,19 +1,23 @@
import threading
import traceback
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Type
from importlib import import_module
from typing import Type, Dict, Any, List, Optional, Set
from dbt.adapters.base.plugin import AdapterPlugin
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
from dbt.exceptions import RuntimeException, InternalException
from dbt.include.global_project import (
PACKAGE_PATH as GLOBAL_PROJECT_PATH,
PROJECT_NAME as GLOBAL_PROJECT_NAME,
)
from dbt.events.functions import fire_event
from dbt.events.types import AdapterImportError, PluginLoadError, AdapterRegistered
from dbt.exceptions import DbtInternalError, DbtRuntimeError
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
from dbt.semver import VersionSpecifier
from dbt.events.types import AdapterImportError, PluginLoadError
from dbt.contracts.connection import Credentials, AdapterRequiredConfig
from dbt.adapters.protocol import (
AdapterProtocol,
AdapterConfig,
RelationProtocol,
)
from dbt.adapters.base.plugin import AdapterPlugin
Adapter = AdapterProtocol
@@ -35,7 +39,7 @@ class AdapterContainer:
names = ", ".join(self.plugins.keys())
message = f"Invalid adapter type {name}! Must be one of {names}"
raise DbtRuntimeError(message)
raise RuntimeException(message)
def get_adapter_class_by_name(self, name: str) -> Type[Adapter]:
plugin = self.get_plugin_by_name(name)
@@ -45,7 +49,9 @@ class AdapterContainer:
adapter = self.get_adapter_class_by_name(name)
return adapter.Relation
def get_config_class_by_name(self, name: str) -> Type[AdapterConfig]:
def get_config_class_by_name(
self, name: str
) -> Type[AdapterConfig]:
adapter = self.get_adapter_class_by_name(name)
return adapter.AdapterSpecificConfigs
@@ -55,25 +61,25 @@ class AdapterContainer:
# singletons
try:
# mypy doesn't think modules have any attributes.
mod: Any = import_module("." + name, "dbt.adapters")
mod: Any = import_module('.' + name, 'dbt.adapters')
except ModuleNotFoundError as exc:
# if we failed to import the target module in particular, inform
# the user about it via a runtime error
if exc.name == "dbt.adapters." + name:
fire_event(AdapterImportError(exc=str(exc)))
raise DbtRuntimeError(f"Could not find adapter type {name}!")
if exc.name == 'dbt.adapters.' + name:
fire_event(AdapterImportError(exc=exc))
raise RuntimeException(f'Could not find adapter type {name}!')
# otherwise, the error had to have come from some underlying
# library. Log the stack trace.
fire_event(PluginLoadError(exc_info=traceback.format_exc()))
fire_event(PluginLoadError())
raise
plugin: AdapterPlugin = mod.Plugin
plugin_type = plugin.adapter.type()
if plugin_type != name:
raise DbtRuntimeError(
f"Expected to find adapter with type named {name}, got "
f"adapter with type {plugin_type}"
raise RuntimeException(
f'Expected to find adapter with type named {name}, got '
f'adapter with type {plugin_type}'
)
with self.lock:
@@ -90,13 +96,7 @@ class AdapterContainer:
def register_adapter(self, config: AdapterRequiredConfig) -> None:
adapter_name = config.credentials.type
adapter_type = self.get_adapter_class_by_name(adapter_name)
adapter_version = import_module(f".{adapter_name}.__version__", "dbt.adapters").version
adapter_version_specifier = VersionSpecifier.from_version_string(
adapter_version
).to_version_string()
fire_event(
AdapterRegistered(adapter_name=adapter_name, adapter_version=adapter_version_specifier)
)
with self.lock:
if adapter_name in self.adapters:
# this shouldn't really happen...
@@ -109,7 +109,8 @@ class AdapterContainer:
return self.adapters[adapter_name]
def reset_adapters(self):
"""Clear the adapters. This is useful for tests, which change configs."""
"""Clear the adapters. This is useful for tests, which change configs.
"""
with self.lock:
for adapter in self.adapters.values():
adapter.cleanup_connections()
@@ -139,16 +140,22 @@ class AdapterContainer:
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise DbtInternalError(f"No plugin found for {plugin_name}") from None
raise InternalException(
f'No plugin found for {plugin_name}'
) from None
plugins.append(plugin)
seen.add(plugin_name)
if plugin.dependencies is None:
continue
for dep in plugin.dependencies:
if dep not in seen:
plugin_names.append(dep)
return plugins
def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
package_names: List[str] = [
p.project_name for p in self.get_adapter_plugins(name)
]
package_names.append(GLOBAL_PROJECT_NAME)
return package_names
@@ -158,16 +165,15 @@ class AdapterContainer:
try:
path = self.packages[package_name]
except KeyError:
raise DbtInternalError(f"No internal package listing found for {package_name}")
raise InternalException(
f'No internal package listing found for {package_name}'
)
paths.append(path)
return paths
def get_adapter_type_names(self, name: Optional[str]) -> List[str]:
return [p.adapter.type() for p in self.get_adapter_plugins(name)]
def get_adapter_constraint_support(self, name: Optional[str]) -> List[str]:
return self.lookup_adapter(name).CONSTRAINT_SUPPORT # type: ignore
FACTORY: AdapterContainer = AdapterContainer()
@@ -180,12 +186,9 @@ def get_adapter(config: AdapterRequiredConfig):
return FACTORY.lookup_adapter(config.credentials.type)
def get_adapter_by_type(adapter_type):
return FACTORY.lookup_adapter(adapter_type)
def reset_adapters():
"""Clear the adapters. This is useful for tests, which change configs."""
"""Clear the adapters. This is useful for tests, which change configs.
"""
FACTORY.reset_adapters()
@@ -222,16 +225,3 @@ def get_adapter_package_names(name: Optional[str]) -> List[str]:
def get_adapter_type_names(name: Optional[str]) -> List[str]:
return FACTORY.get_adapter_type_names(name)
def get_adapter_constraint_support(name: Optional[str]) -> List[str]:
return FACTORY.get_adapter_constraint_support(name)
@contextmanager
def adapter_management():
reset_adapters()
try:
yield
finally:
cleanup_connections()

View File

@@ -1,22 +1,19 @@
from dataclasses import dataclass
from typing import (
Type,
Hashable,
Optional,
ContextManager,
List,
Generic,
TypeVar,
Tuple,
Dict,
Any,
Type, Hashable, Optional, ContextManager, List, Generic, TypeVar, ClassVar,
Tuple, Union, Dict, Any
)
from typing_extensions import Protocol
import agate
from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
from dbt.contracts.graph.nodes import ResultNode, ManifestNode
from dbt.contracts.connection import (
Connection, AdapterRequiredConfig, AdapterResponse
)
from dbt.contracts.graph.compiled import (
CompiledNode, ManifestNode, NonSourceCompiledNode
)
from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition
from dbt.contracts.graph.model_config import BaseConfig
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.relation import Policy, HasQuoting
@@ -37,7 +34,7 @@ class ColumnProtocol(Protocol):
pass
Self = TypeVar("Self", bound="RelationProtocol")
Self = TypeVar('Self', bound='RelationProtocol')
class RelationProtocol(Protocol):
@@ -46,7 +43,11 @@ class RelationProtocol(Protocol):
...
@classmethod
def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self:
def create_from(
cls: Type[Self],
config: HasQuoting,
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
) -> Self:
...
@@ -59,19 +60,26 @@ class CompilerProtocol(Protocol):
node: ManifestNode,
manifest: Manifest,
extra_context: Optional[Dict[str, Any]] = None,
) -> ManifestNode:
) -> NonSourceCompiledNode:
...
AdapterConfig_T = TypeVar("AdapterConfig_T", bound=AdapterConfig)
ConnectionManager_T = TypeVar("ConnectionManager_T", bound=ConnectionManagerProtocol)
Relation_T = TypeVar("Relation_T", bound=RelationProtocol)
Column_T = TypeVar("Column_T", bound=ColumnProtocol)
Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
AdapterConfig_T = TypeVar(
'AdapterConfig_T', bound=AdapterConfig
)
ConnectionManager_T = TypeVar(
'ConnectionManager_T', bound=ConnectionManagerProtocol
)
Relation_T = TypeVar(
'Relation_T', bound=RelationProtocol
)
Column_T = TypeVar(
'Column_T', bound=ColumnProtocol
)
Compiler_T = TypeVar('Compiler_T', bound=CompilerProtocol)
# TODO CT-211
class AdapterProtocol( # type: ignore[misc]
class AdapterProtocol(
Protocol,
Generic[
AdapterConfig_T,
@@ -79,15 +87,12 @@ class AdapterProtocol( # type: ignore[misc]
Relation_T,
Column_T,
Compiler_T,
],
]
):
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
# ClassVar due to the restrictiveness of PEP-526
# See: https://github.com/python/mypy/issues/5144
AdapterSpecificConfigs: Type[AdapterConfig_T]
Column: Type[Column_T]
Relation: Type[Relation_T]
ConnectionManager: Type[ConnectionManager_T]
AdapterSpecificConfigs: ClassVar[Type[AdapterConfig_T]]
Column: ClassVar[Type[Column_T]]
Relation: ClassVar[Type[Relation_T]]
ConnectionManager: ClassVar[Type[ConnectionManager_T]]
connections: ConnectionManager_T
def __init__(self, config: AdapterRequiredConfig):
@@ -151,7 +156,7 @@ class AdapterProtocol( # type: ignore[misc]
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[AdapterResponse, agate.Table]:
) -> Tuple[Union[str, AdapterResponse], agate.Table]:
...
def get_compiler(self) -> Compiler_T:

View File

@@ -1,10 +1,10 @@
# this module exists to resolve circular imports with the events module
from collections import namedtuple
from typing import Any, Optional
from typing import Optional
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
_ReferenceKey = namedtuple('_ReferenceKey', 'database schema identifier')
def lowercase(value: Optional[str]) -> Optional[str]:
@@ -14,24 +14,11 @@ def lowercase(value: Optional[str]) -> Optional[str]:
return value.lower()
# For backwards compatibility. New code should use _make_ref_key
def _make_key(relation: Any) -> _ReferenceKey:
return _make_ref_key(relation)
def _make_ref_key(relation: Any) -> _ReferenceKey:
def _make_key(relation) -> _ReferenceKey:
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
to keep track of quoting
"""
# databases and schemas can both be None
return _ReferenceKey(
lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
)
def _make_ref_key_dict(relation: Any):
return {
"database": relation.database,
"schema": relation.schema,
"identifier": relation.identifier,
}
return _ReferenceKey(lowercase(relation.database),
lowercase(relation.schema),
lowercase(relation.identifier))

View File

@@ -1,25 +0,0 @@
# RelationConfig
This package serves as an initial abstraction for managing the inspection of existing relations and determining
changes on those relations. It arose from the materialized view work and is currently only supporting
materialized views for Postgres and Redshift as well as dynamic tables for Snowflake. There are three main
classes in this package.
## RelationConfigBase
This is a very small class that only has a `from_dict()` method and a default `NotImplementedError()`. At some
point this could be replaced by a more robust framework, like `mashumaro` or `pydantic`.
## RelationConfigChange
This class inherits from `RelationConfigBase` ; however, this can be thought of as a separate class. The subclassing
merely points to the idea that both classes would likely inherit from the same class in a `mashumaro` or
`pydantic` implementation. This class is much more restricted in attribution. It should really only
ever need an `action` and a `context`. This can be though of as being analogous to a web request. You need to
know what you're doing (`action`: 'create' = GET, 'drop' = DELETE, etc.) and the information (`context`) needed
to make the change. In our scenarios, the context tends to be an instance of `RelationConfigBase` corresponding
to the new state.
## RelationConfigValidationMixin
This mixin provides optional validation mechanics that can be applied to either `RelationConfigBase` or
`RelationConfigChange` subclasses. A validation rule is a combination of a `validation_check`, something
that should evaluate to `True`, and an optional `validation_error`, an instance of `DbtRuntimeError`
that should be raised in the event the `validation_check` fails. While optional, it's recommended that
the `validation_error` be provided for clearer transparency to the end user.

View File

@@ -1,12 +0,0 @@
from dbt.adapters.relation_configs.config_base import ( # noqa: F401
RelationConfigBase,
RelationResults,
)
from dbt.adapters.relation_configs.config_change import ( # noqa: F401
RelationConfigChangeAction,
RelationConfigChange,
)
from dbt.adapters.relation_configs.config_validation import ( # noqa: F401
RelationConfigValidationMixin,
RelationConfigValidationRule,
)

View File

@@ -1,44 +0,0 @@
from dataclasses import dataclass
from typing import Union, Dict
import agate
from dbt.utils import filter_null_values
"""
This is what relation metadata from the database looks like. It's a dictionary because there will be
multiple grains of data for a single object. For example, a materialized view in Postgres has base level information,
like name. But it also can have multiple indexes, which needs to be a separate query. It might look like this:
{
"base": agate.Row({"table_name": "table_abc", "query": "select * from table_def"})
"indexes": agate.Table("rows": [
agate.Row({"name": "index_a", "columns": ["column_a"], "type": "hash", "unique": False}),
agate.Row({"name": "index_b", "columns": ["time_dim_a"], "type": "btree", "unique": False}),
])
}
"""
RelationResults = Dict[str, Union[agate.Row, agate.Table]]
@dataclass(frozen=True)
class RelationConfigBase:
@classmethod
def from_dict(cls, kwargs_dict) -> "RelationConfigBase":
"""
This assumes the subclass of `RelationConfigBase` is flat, in the sense that no attribute is
itself another subclass of `RelationConfigBase`. If that's not the case, this should be overriden
to manually manage that complexity.
Args:
kwargs_dict: the dict representation of this instance
Returns: the `RelationConfigBase` representation associated with the provided dict
"""
return cls(**filter_null_values(kwargs_dict)) # type: ignore
@classmethod
def _not_implemented_error(cls) -> NotImplementedError:
return NotImplementedError(
"This relation type has not been fully configured for this adapter."
)

View File

@@ -1,23 +0,0 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Hashable
from dbt.adapters.relation_configs.config_base import RelationConfigBase
from dbt.dataclass_schema import StrEnum
class RelationConfigChangeAction(StrEnum):
alter = "alter"
create = "create"
drop = "drop"
@dataclass(frozen=True, eq=True, unsafe_hash=True)
class RelationConfigChange(RelationConfigBase, ABC):
action: RelationConfigChangeAction
context: Hashable # this is usually a RelationConfig, e.g. IndexConfig, but shouldn't be limited
@property
@abstractmethod
def requires_full_refresh(self) -> bool:
raise self._not_implemented_error()

View File

@@ -1,57 +0,0 @@
from dataclasses import dataclass
from typing import Set, Optional
from dbt.exceptions import DbtRuntimeError
@dataclass(frozen=True, eq=True, unsafe_hash=True)
class RelationConfigValidationRule:
validation_check: bool
validation_error: Optional[DbtRuntimeError]
@property
def default_error(self):
return DbtRuntimeError(
"There was a validation error in preparing this relation config."
"No additional context was provided by this adapter."
)
@dataclass(frozen=True)
class RelationConfigValidationMixin:
def __post_init__(self):
self.run_validation_rules()
@property
def validation_rules(self) -> Set[RelationConfigValidationRule]:
"""
A set of validation rules to run against the object upon creation.
A validation rule is a combination of a validation check (bool) and an optional error message.
This defaults to no validation rules if not implemented. It's recommended to override this with values,
but that may not always be necessary.
Returns: a set of validation rules
"""
return set()
def run_validation_rules(self):
for validation_rule in self.validation_rules:
try:
assert validation_rule.validation_check
except AssertionError:
if validation_rule.validation_error:
raise validation_rule.validation_error
else:
raise validation_rule.default_error
self.run_child_validation_rules()
def run_child_validation_rules(self):
for attr_value in vars(self).values():
if hasattr(attr_value, "validation_rules"):
attr_value.run_validation_rules()
if isinstance(attr_value, set):
for member in attr_value:
if hasattr(member, "validation_rules"):
member.run_validation_rules()

View File

@@ -7,11 +7,11 @@ import agate
import dbt.clients.agate_helper
import dbt.exceptions
from dbt.adapters.base import BaseConnectionManager
from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse
from dbt.contracts.connection import (
Connection, ConnectionState, AdapterResponse
)
from dbt.events.functions import fire_event
from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus
from dbt.events.contextvars import get_node_info
from dbt.utils import cast_to_str
class SQLConnectionManager(BaseConnectionManager):
@@ -23,11 +23,12 @@ class SQLConnectionManager(BaseConnectionManager):
- get_response
- open
"""
@abc.abstractmethod
def cancel(self, connection: Connection):
"""Cancel the given connection."""
raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!")
raise dbt.exceptions.NotImplementedException(
'`cancel` is not implemented for this adapter!'
)
def cancel_open(self) -> List[str]:
names = []
@@ -39,7 +40,10 @@ class SQLConnectionManager(BaseConnectionManager):
# if the connection failed, the handle will be None so we have
# nothing to cancel.
if connection.handle is not None and connection.state == ConnectionState.OPEN:
if (
connection.handle is not None and
connection.state == ConnectionState.OPEN
):
self.cancel(connection)
if connection.name is not None:
names.append(connection.name)
@@ -50,30 +54,20 @@ class SQLConnectionManager(BaseConnectionManager):
sql: str,
auto_begin: bool = True,
bindings: Optional[Any] = None,
abridge_sql_log: bool = False,
abridge_sql_log: bool = False
) -> Tuple[Connection, Any]:
connection = self.get_thread_connection()
if auto_begin and connection.transaction_open is False:
self.begin()
fire_event(
ConnectionUsed(
conn_type=self.TYPE,
conn_name=cast_to_str(connection.name),
node_info=get_node_info(),
)
)
fire_event(ConnectionUsed(conn_type=self.TYPE, conn_name=connection.name))
with self.exception_handler(sql):
if abridge_sql_log:
log_sql = "{}...".format(sql[:512])
log_sql = '{}...'.format(sql[:512])
else:
log_sql = sql
fire_event(
SQLQuery(
conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info()
)
)
fire_event(SQLQuery(conn_name=connection.name, sql=log_sql))
pre = time.time()
cursor = connection.handle.cursor()
@@ -82,92 +76,74 @@ class SQLConnectionManager(BaseConnectionManager):
fire_event(
SQLQueryStatus(
status=str(self.get_response(cursor)),
elapsed=round((time.time() - pre)),
node_info=get_node_info(),
elapsed=round((time.time() - pre), 2)
)
)
return connection, cursor
@classmethod
@abc.abstractmethod
def get_response(cls, cursor: Any) -> AdapterResponse:
@abc.abstractclassmethod
def get_response(cls, cursor: Any) -> Union[AdapterResponse, str]:
"""Get the status of the cursor."""
raise dbt.exceptions.NotImplementedError(
"`get_response` is not implemented for this adapter!"
raise dbt.exceptions.NotImplementedException(
'`get_response` is not implemented for this adapter!'
)
@classmethod
def process_results(
cls, column_names: Iterable[str], rows: Iterable[Any]
cls,
column_names: Iterable[str],
rows: Iterable[Any]
) -> List[Dict[str, Any]]:
# TODO CT-211
unique_col_names = dict() # type: ignore[var-annotated]
# TODO CT-211
for idx in range(len(column_names)): # type: ignore[arg-type]
# TODO CT-211
col_name = column_names[idx] # type: ignore[index]
unique_col_names = dict()
for idx in range(len(column_names)):
col_name = column_names[idx]
if col_name in unique_col_names:
unique_col_names[col_name] += 1
# TODO CT-211
column_names[idx] = f"{col_name}_{unique_col_names[col_name]}" # type: ignore[index] # noqa
column_names[idx] = f'{col_name}_{unique_col_names[col_name]}'
else:
# TODO CT-211
unique_col_names[column_names[idx]] = 1 # type: ignore[index]
unique_col_names[column_names[idx]] = 1
return [dict(zip(column_names, row)) for row in rows]
@classmethod
def get_result_from_cursor(cls, cursor: Any, limit: Optional[int]) -> agate.Table:
def get_result_from_cursor(cls, cursor: Any) -> agate.Table:
data: List[Any] = []
column_names: List[str] = []
if cursor.description is not None:
column_names = [col[0] for col in cursor.description]
if limit:
rows = cursor.fetchmany(limit)
else:
rows = cursor.fetchall()
rows = cursor.fetchall()
data = cls.process_results(column_names, rows)
return dbt.clients.agate_helper.table_from_data_flat(data, column_names)
@classmethod
def data_type_code_to_name(cls, type_code: Union[int, str]) -> str:
"""Get the string representation of the data type from the type_code."""
# https://peps.python.org/pep-0249/#type-objects
raise dbt.exceptions.NotImplementedError(
"`data_type_code_to_name` is not implemented for this adapter!"
return dbt.clients.agate_helper.table_from_data_flat(
data,
column_names
)
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False, limit: Optional[int] = None
) -> Tuple[AdapterResponse, agate.Table]:
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[Union[AdapterResponse, str], agate.Table]:
sql = self._add_query_comment(sql)
_, cursor = self.add_query(sql, auto_begin)
response = self.get_response(cursor)
if fetch:
table = self.get_result_from_cursor(cursor, limit)
table = self.get_result_from_cursor(cursor)
else:
table = dbt.clients.agate_helper.empty_table()
return response, table
def add_begin_query(self):
return self.add_query("BEGIN", auto_begin=False)
return self.add_query('BEGIN', auto_begin=False)
def add_commit_query(self):
return self.add_query("COMMIT", auto_begin=False)
def add_select_query(self, sql: str) -> Tuple[Connection, Any]:
sql = self._add_query_comment(sql)
return self.add_query(sql, auto_begin=False)
return self.add_query('COMMIT', auto_begin=False)
def begin(self):
connection = self.get_thread_connection()
if connection.transaction_open is True:
raise dbt.exceptions.DbtInternalError(
raise dbt.exceptions.InternalException(
'Tried to begin a new transaction on connection "{}", but '
"it already had one open!".format(connection.name)
)
'it already had one open!'.format(connection.name))
self.add_begin_query()
@@ -177,12 +153,11 @@ class SQLConnectionManager(BaseConnectionManager):
def commit(self):
connection = self.get_thread_connection()
if connection.transaction_open is False:
raise dbt.exceptions.DbtInternalError(
raise dbt.exceptions.InternalException(
'Tried to commit transaction on connection "{}", but '
"it does not have one open!".format(connection.name)
)
'it does not have one open!'.format(connection.name))
fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info()))
fire_event(SQLCommit(conn_name=connection.name))
self.add_commit_query()
connection.transaction_open = False

View File

@@ -1,10 +1,11 @@
import agate
from typing import Any, Optional, Tuple, Type, List
from dbt.contracts.connection import Connection, AdapterResponse
from dbt.exceptions import RelationTypeNullError
import dbt.clients.agate_helper
from dbt.contracts.connection import Connection
import dbt.exceptions
from dbt.adapters.base import BaseAdapter, available
from dbt.adapters.cache import _make_ref_key_dict
from dbt.adapters.cache import _make_key
from dbt.adapters.sql import SQLConnectionManager
from dbt.events.functions import fire_event
from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
@@ -12,22 +13,21 @@ from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
from dbt.adapters.base.relation import BaseRelation
LIST_RELATIONS_MACRO_NAME = "list_relations_without_caching"
GET_COLUMNS_IN_RELATION_MACRO_NAME = "get_columns_in_relation"
LIST_SCHEMAS_MACRO_NAME = "list_schemas"
CHECK_SCHEMA_EXISTS_MACRO_NAME = "check_schema_exists"
CREATE_SCHEMA_MACRO_NAME = "create_schema"
DROP_SCHEMA_MACRO_NAME = "drop_schema"
RENAME_RELATION_MACRO_NAME = "rename_relation"
TRUNCATE_RELATION_MACRO_NAME = "truncate_relation"
DROP_RELATION_MACRO_NAME = "drop_relation"
ALTER_COLUMN_TYPE_MACRO_NAME = "alter_column_type"
VALIDATE_SQL_MACRO_NAME = "validate_sql"
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
GET_COLUMNS_IN_RELATION_MACRO_NAME = 'get_columns_in_relation'
LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
CHECK_SCHEMA_EXISTS_MACRO_NAME = 'check_schema_exists'
CREATE_SCHEMA_MACRO_NAME = 'create_schema'
DROP_SCHEMA_MACRO_NAME = 'drop_schema'
RENAME_RELATION_MACRO_NAME = 'rename_relation'
TRUNCATE_RELATION_MACRO_NAME = 'truncate_relation'
DROP_RELATION_MACRO_NAME = 'drop_relation'
ALTER_COLUMN_TYPE_MACRO_NAME = 'alter_column_type'
class SQLAdapter(BaseAdapter):
"""The default adapter with the common agate conversions and some SQL
methods was implemented. This adapter has a different much shorter list of
methods implemented. This adapter has a different much shorter list of
methods to implement, but some more macros that must be implemented.
To implement a macro, implement "${adapter_type}__${macro_name}". in the
@@ -63,24 +63,30 @@ class SQLAdapter(BaseAdapter):
:param abridge_sql_log: If set, limit the raw sql logged to 512
characters
"""
return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
return self.connections.add_query(sql, auto_begin, bindings,
abridge_sql_log)
@classmethod
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "text"
@classmethod
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
# TODO CT-211
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore[attr-defined]
def convert_number_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
return "float8" if decimals else "integer"
@classmethod
def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
def convert_boolean_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
return "boolean"
@classmethod
def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
def convert_datetime_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
return "timestamp without time zone"
@classmethod
@@ -96,27 +102,36 @@ class SQLAdapter(BaseAdapter):
return True
def expand_column_types(self, goal, current):
reference_columns = {c.name: c for c in self.get_columns_in_relation(goal)}
reference_columns = {
c.name: c for c in
self.get_columns_in_relation(goal)
}
target_columns = {c.name: c for c in self.get_columns_in_relation(current)}
target_columns = {
c.name: c for c
in self.get_columns_in_relation(current)
}
for column_name, reference_column in reference_columns.items():
target_column = target_columns.get(column_name)
if target_column is not None and target_column.can_expand_to(reference_column):
if target_column is not None and \
target_column.can_expand_to(reference_column):
col_string_size = reference_column.string_size()
new_type = self.Column.string_type(col_string_size)
fire_event(
ColTypeChange(
orig_type=target_column.data_type,
new_type=new_type,
table=_make_ref_key_dict(current),
table=_make_key(current),
)
)
self.alter_column_type(current, column_name, new_type)
def alter_column_type(self, relation, column_name, new_column_type) -> None:
def alter_column_type(
self, relation, column_name, new_column_type
) -> None:
"""
1. Create a new column (w/ temp name and correct type)
2. Copy data over to it
@@ -124,38 +139,53 @@ class SQLAdapter(BaseAdapter):
4. Rename the new column to existing column
"""
kwargs = {
"relation": relation,
"column_name": column_name,
"new_column_type": new_column_type,
'relation': relation,
'column_name': column_name,
'new_column_type': new_column_type,
}
self.execute_macro(ALTER_COLUMN_TYPE_MACRO_NAME, kwargs=kwargs)
self.execute_macro(
ALTER_COLUMN_TYPE_MACRO_NAME,
kwargs=kwargs
)
def drop_relation(self, relation):
if relation.type is None:
raise RelationTypeNullError(relation)
dbt.exceptions.raise_compiler_error(
'Tried to drop relation {}, but its type is null.'
.format(relation))
self.cache_dropped(relation)
self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
self.execute_macro(
DROP_RELATION_MACRO_NAME,
kwargs={'relation': relation}
)
def truncate_relation(self, relation):
self.execute_macro(TRUNCATE_RELATION_MACRO_NAME, kwargs={"relation": relation})
self.execute_macro(
TRUNCATE_RELATION_MACRO_NAME,
kwargs={'relation': relation}
)
def rename_relation(self, from_relation, to_relation):
self.cache_renamed(from_relation, to_relation)
kwargs = {"from_relation": from_relation, "to_relation": to_relation}
self.execute_macro(RENAME_RELATION_MACRO_NAME, kwargs=kwargs)
kwargs = {'from_relation': from_relation, 'to_relation': to_relation}
self.execute_macro(
RENAME_RELATION_MACRO_NAME,
kwargs=kwargs
)
def get_columns_in_relation(self, relation):
return self.execute_macro(
GET_COLUMNS_IN_RELATION_MACRO_NAME, kwargs={"relation": relation}
GET_COLUMNS_IN_RELATION_MACRO_NAME,
kwargs={'relation': relation}
)
def create_schema(self, relation: BaseRelation) -> None:
relation = relation.without_identifier()
fire_event(SchemaCreation(relation=_make_ref_key_dict(relation)))
fire_event(SchemaCreation(relation=_make_key(relation)))
kwargs = {
"relation": relation,
'relation': relation,
}
self.execute_macro(CREATE_SCHEMA_MACRO_NAME, kwargs=kwargs)
self.commit_if_has_connection()
@@ -164,46 +194,51 @@ class SQLAdapter(BaseAdapter):
def drop_schema(self, relation: BaseRelation) -> None:
relation = relation.without_identifier()
fire_event(SchemaDrop(relation=_make_ref_key_dict(relation)))
fire_event(SchemaDrop(relation=_make_key(relation)))
kwargs = {
"relation": relation,
'relation': relation,
}
self.execute_macro(DROP_SCHEMA_MACRO_NAME, kwargs=kwargs)
self.commit_if_has_connection()
# we can update the cache here
self.cache.drop_schema(relation.database, relation.schema)
def list_relations_without_caching(
self,
schema_relation: BaseRelation,
self, schema_relation: BaseRelation,
) -> List[BaseRelation]:
kwargs = {"schema_relation": schema_relation}
results = self.execute_macro(LIST_RELATIONS_MACRO_NAME, kwargs=kwargs)
kwargs = {'schema_relation': schema_relation}
results = self.execute_macro(
LIST_RELATIONS_MACRO_NAME,
kwargs=kwargs
)
relations = []
quote_policy = {"database": True, "schema": True, "identifier": True}
quote_policy = {
'database': True,
'schema': True,
'identifier': True
}
for _database, name, _schema, _type in results:
try:
_type = self.Relation.get_relation_type(_type)
except ValueError:
_type = self.Relation.External
relations.append(
self.Relation.create(
database=_database,
schema=_schema,
identifier=name,
quote_policy=quote_policy,
type=_type,
)
)
relations.append(self.Relation.create(
database=_database,
schema=_schema,
identifier=name,
quote_policy=quote_policy,
type=_type
))
return relations
@classmethod
def quote(self, identifier):
return '"{}"'.format(identifier)
def list_schemas(self, database: str) -> List[str]:
results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
return [row[0] for row in results]
@@ -211,60 +246,13 @@ class SQLAdapter(BaseAdapter):
information_schema = self.Relation.create(
database=database,
schema=schema,
identifier="INFORMATION_SCHEMA",
quote_policy=self.config.quoting,
identifier='INFORMATION_SCHEMA',
quote_policy=self.config.quoting
).information_schema()
kwargs = {"information_schema": information_schema, "schema": schema}
results = self.execute_macro(CHECK_SCHEMA_EXISTS_MACRO_NAME, kwargs=kwargs)
return results[0][0] > 0
def validate_sql(self, sql: str) -> AdapterResponse:
"""Submit the given SQL to the engine for validation, but not execution.
By default we simply prefix the query with the explain keyword and allow the
exceptions thrown by the underlying engine on invalid SQL inputs to bubble up
to the exception handler. For adjustments to the explain statement - such as
for adapters that have different mechanisms for hinting at query validation
or dry-run - callers may be able to override the validate_sql_query macro with
the addition of an <adapter>__validate_sql implementation.
:param sql str: The sql to validate
"""
kwargs = {
"sql": sql,
}
result = self.execute_macro(VALIDATE_SQL_MACRO_NAME, kwargs=kwargs)
# The statement macro always returns an AdapterResponse in the output AttrDict's
# `response` property, and we preserve the full payload in case we want to
# return fetched output for engines where explain plans are emitted as columnar
# results. Any macro override that deviates from this behavior may encounter an
# assertion error in the runtime.
adapter_response = result.response # type: ignore[attr-defined]
assert isinstance(adapter_response, AdapterResponse), (
f"Expected AdapterResponse from validate_sql macro execution, "
f"got {type(adapter_response)}."
kwargs = {'information_schema': information_schema, 'schema': schema}
results = self.execute_macro(
CHECK_SCHEMA_EXISTS_MACRO_NAME,
kwargs=kwargs
)
return adapter_response
# This is for use in the test suite
def run_sql_for_tests(self, sql, fetch, conn):
cursor = conn.handle.cursor()
try:
cursor.execute(sql)
if hasattr(conn.handle, "commit"):
conn.handle.commit()
if fetch == "one":
return cursor.fetchone()
elif fetch == "all":
return cursor.fetchall()
else:
return
except BaseException as e:
if conn.handle and not getattr(conn.handle, "closed", True):
conn.handle.rollback()
print(sql)
print(e)
raise
finally:
conn.transaction_open = False
return results[0][0] > 0

View File

@@ -1,71 +0,0 @@
# Adding a new command
## `main.py`
Add the new command with all necessary decorators. Every command will need at minimum:
- a decorator for the click group it belongs to which also names the command
- the postflight decorator (must come before other decorators from the `requires` module for error handling)
- the preflight decorator
```py
@cli.command("my-new-command")
@requires.postflight
@requires.preflight
def my_new_command(ctx, **kwargs):
...
```
## `types.py`
Add an entry to the `Command` enum with your new command. Commands that are sub-commands should have entries
that represent their full command path (e.g. `source freshness -> SOURCE_FRESHNESS`, `docs serve -> DOCS_SERVE`).
## `flags.py`
Add the new command to the dictionary within the `command_args` function.
# Exception Handling
## `requires.py`
### `postflight`
In the postflight decorator, the click command is invoked (i.e. `func(*args, **kwargs)`) and wrapped in a `try/except` block to handle any exceptions thrown.
Any exceptions thrown from `postflight` are wrapped by custom exceptions from the `dbt.cli.exceptions` module (i.e. `ResultExit`, `ExceptionExit`) to instruct click to complete execution with a particular exit code.
Some `dbt-core` handled exceptions have an attribute named `results` which contains results from running nodes (e.g. `FailFastError`). These are wrapped in the `ResultExit` exception to represent runs that have failed in a way that `dbt-core` expects.
If the invocation of the command does not throw any exceptions but does not succeed, `postflight` will still raise the `ResultExit` exception to make use of the exit code.
These exceptions produce an exit code of `1`.
Exceptions wrapped with `ExceptionExit` may be thrown by `dbt-core` intentionally (i.e. an exception that inherits from `dbt.exceptions.Exception`) or unintentionally (i.e. exceptions thrown by the python runtime). In either case these are considered errors that `dbt-core` did not expect and are treated as genuine exceptions.
These exceptions produce an exit code of `2`.
If no exceptions are thrown from invoking the command and the command succeeds, `postflight` will not raise any exceptions.
When no exceptions are raised an exit code of `0` is produced.
## `main.py`
### `dbtRunner`
`dbtRunner` provides a programmatic interface for our click CLI and wraps the invocation of the click commands to handle any exceptions thrown.
`dbtRunner.invoke` should ideally only ever return an instantiated `dbtRunnerResult` which contains the following fields:
- `success`: A boolean representing whether the command invocation was successful
- `result`: The optional result of the command invoked. This attribute can have many types, please see the definition of `dbtRunnerResult` for more information
- `exception`: If an exception was thrown during command invocation it will be saved here, otherwise it will be `None`. Please note that the exceptions held in this attribute are not the exceptions thrown by `preflight` but instead the exceptions that `ResultExit` and `ExceptionExit` wrap
Programmatic exception handling might look like the following:
```python
res = dbtRunner().invoke(["run"])
if not res.success:
...
if type(res.exception) == SomeExceptionType:
...
```
## `dbt/tests/util.py`
### `run_dbt`
In many of our functional and integration tests, we want to be sure that an invocation of `dbt` raises a certain exception.
A common pattern for these assertions:
```python
class TestSomething:
def test_something(self, project):
with pytest.raises(SomeException):
run_dbt(["run"])
```
To allow these tests to assert that exceptions have been thrown, the `run_dbt` function will raise any exceptions it recieves from the invocation of a `dbt` command.

View File

@@ -1 +0,0 @@
from .main import cli as dbt_cli # noqa

View File

@@ -1,16 +0,0 @@
import click
from typing import Optional
from dbt.cli.main import cli as dbt
def make_context(args, command=dbt) -> Optional[click.Context]:
try:
ctx = command.make_context(command.name, args)
except click.exceptions.Exit:
return None
ctx.invoked_subcommand = ctx.protected_args[0] if ctx.protected_args else None
ctx.obj = {}
return ctx

View File

@@ -1,43 +0,0 @@
from typing import Optional, IO
from click.exceptions import ClickException
from dbt.utils import ExitCodes
class DbtUsageException(Exception):
pass
class DbtInternalException(Exception):
pass
class CliException(ClickException):
"""The base exception class for our implementation of the click CLI.
The exit_code attribute is used by click to determine which exit code to produce
after an invocation."""
def __init__(self, exit_code: ExitCodes) -> None:
self.exit_code = exit_code.value
# the typing of _file is to satisfy the signature of ClickException.show
# overriding this method prevents click from printing any exceptions to stdout
def show(self, _file: Optional[IO] = None) -> None:
pass
class ResultExit(CliException):
"""This class wraps any exception that contains results while invoking dbt, or the
results of an invocation that did not succeed but did not throw any exceptions."""
def __init__(self, result) -> None:
super().__init__(ExitCodes.ModelError)
self.result = result
class ExceptionExit(CliException):
"""This class wraps any exception that does not contain results thrown while invoking dbt."""
def __init__(self, exception: Exception) -> None:
super().__init__(ExitCodes.UnhandledError)
self.exception = exception

View File

@@ -1,404 +0,0 @@
import os
import sys
from dataclasses import dataclass
from importlib import import_module
from multiprocessing import get_context
from pprint import pformat as pf
from typing import Any, Callable, Dict, List, Optional, Set, Union
from click import Context, get_current_context, Parameter
from click.core import Command as ClickCommand, Group, ParameterSource
from dbt.cli.exceptions import DbtUsageException
from dbt.cli.resolvers import default_log_path, default_project_dir
from dbt.cli.types import Command as CliCommand
from dbt.config.profile import read_user_config
from dbt.contracts.project import UserConfig
from dbt.exceptions import DbtInternalError
from dbt.deprecations import renamed_env_var
from dbt.helper_types import WarnErrorOptions
if os.name != "nt":
# https://bugs.python.org/issue41567
import multiprocessing.popen_spawn_posix # type: ignore # noqa: F401
FLAGS_DEFAULTS = {
"INDIRECT_SELECTION": "eager",
"TARGET_PATH": None,
# Cli args without user_config or env var option.
"FULL_REFRESH": False,
"STRICT_MODE": False,
"STORE_FAILURES": False,
"INTROSPECT": True,
}
DEPRECATED_PARAMS = {
"deprecated_defer": "defer",
"deprecated_favor_state": "favor_state",
"deprecated_print": "print",
"deprecated_state": "state",
}
WHICH_KEY = "which"
def convert_config(config_name, config_value):
"""Convert the values from config and original set_from_args to the correct type."""
ret = config_value
if config_name.lower() == "warn_error_options" and type(config_value) == dict:
ret = WarnErrorOptions(
include=config_value.get("include", []), exclude=config_value.get("exclude", [])
)
return ret
def args_to_context(args: List[str]) -> Context:
"""Convert a list of args to a click context with proper hierarchy for dbt commands"""
from dbt.cli.main import cli
cli_ctx = cli.make_context(cli.name, args)
# Split args if they're a comma seperated string.
if len(args) == 1 and "," in args[0]:
args = args[0].split(",")
sub_command_name, sub_command, args = cli.resolve_command(cli_ctx, args)
# Handle source and docs group.
if isinstance(sub_command, Group):
sub_command_name, sub_command, args = sub_command.resolve_command(cli_ctx, args)
assert isinstance(sub_command, ClickCommand)
sub_command_ctx = sub_command.make_context(sub_command_name, args)
sub_command_ctx.parent = cli_ctx
return sub_command_ctx
@dataclass(frozen=True)
class Flags:
"""Primary configuration artifact for running dbt"""
def __init__(
self, ctx: Optional[Context] = None, user_config: Optional[UserConfig] = None
) -> None:
# Set the default flags.
for key, value in FLAGS_DEFAULTS.items():
object.__setattr__(self, key, value)
if ctx is None:
ctx = get_current_context()
def _get_params_by_source(ctx: Context, source_type: ParameterSource):
"""Generates all params of a given source type."""
yield from [
name for name, source in ctx._parameter_source.items() if source is source_type
]
if ctx.parent:
yield from _get_params_by_source(ctx.parent, source_type)
# Ensure that any params sourced from the commandline are not present more than once.
# Click handles this exclusivity, but only at a per-subcommand level.
seen_params = []
for param in _get_params_by_source(ctx, ParameterSource.COMMANDLINE):
if param in seen_params:
raise DbtUsageException(
f"{param.lower()} was provided both before and after the subcommand, it can only be set either before or after.",
)
seen_params.append(param)
def _assign_params(
ctx: Context,
params_assigned_from_default: set,
deprecated_env_vars: Dict[str, Callable],
):
"""Recursively adds all click params to flag object"""
for param_name, param_value in ctx.params.items():
# N.B. You have to use the base MRO method (object.__setattr__) to set attributes
# when using frozen dataclasses.
# https://docs.python.org/3/library/dataclasses.html#frozen-instances
# Handle deprecated env vars while still respecting old values
# e.g. DBT_NO_PRINT -> DBT_PRINT if DBT_NO_PRINT is set, it is
# respected over DBT_PRINT or --print.
new_name: Union[str, None] = None
if param_name in DEPRECATED_PARAMS:
# Deprecated env vars can only be set via env var.
# We use the deprecated option in click to serialize the value
# from the env var string.
param_source = ctx.get_parameter_source(param_name)
if param_source == ParameterSource.DEFAULT:
continue
elif param_source != ParameterSource.ENVIRONMENT:
raise DbtUsageException(
"Deprecated parameters can only be set via environment variables",
)
# Rename for clarity.
dep_name = param_name
new_name = DEPRECATED_PARAMS.get(dep_name)
try:
assert isinstance(new_name, str)
except AssertionError:
raise Exception(
f"No deprecated param name match in DEPRECATED_PARAMS from {dep_name} to {new_name}"
)
# Find param objects for their envvar name.
try:
dep_param = [x for x in ctx.command.params if x.name == dep_name][0]
new_param = [x for x in ctx.command.params if x.name == new_name][0]
except IndexError:
raise Exception(
f"No deprecated param name match in context from {dep_name} to {new_name}"
)
# Remove param from defaulted set since the deprecated
# value is not set from default, but from an env var.
if new_name in params_assigned_from_default:
params_assigned_from_default.remove(new_name)
# Add the deprecation warning function to the set.
assert isinstance(dep_param.envvar, str)
assert isinstance(new_param.envvar, str)
deprecated_env_vars[new_name] = renamed_env_var(
old_name=dep_param.envvar,
new_name=new_param.envvar,
)
# Set the flag value.
is_duplicate = hasattr(self, param_name.upper())
is_default = ctx.get_parameter_source(param_name) == ParameterSource.DEFAULT
flag_name = (new_name or param_name).upper()
if (is_duplicate and not is_default) or not is_duplicate:
object.__setattr__(self, flag_name, param_value)
# Track default assigned params.
if is_default:
params_assigned_from_default.add(param_name)
if ctx.parent:
_assign_params(ctx.parent, params_assigned_from_default, deprecated_env_vars)
params_assigned_from_default = set() # type: Set[str]
deprecated_env_vars: Dict[str, Callable] = {}
_assign_params(ctx, params_assigned_from_default, deprecated_env_vars)
# Set deprecated_env_var_warnings to be fired later after events have been init.
object.__setattr__(
self, "deprecated_env_var_warnings", [x for x in deprecated_env_vars.values()]
)
# Get the invoked command flags.
invoked_subcommand_name = (
ctx.invoked_subcommand if hasattr(ctx, "invoked_subcommand") else None
)
if invoked_subcommand_name is not None:
invoked_subcommand = getattr(import_module("dbt.cli.main"), invoked_subcommand_name)
invoked_subcommand.allow_extra_args = True
invoked_subcommand.ignore_unknown_options = True
invoked_subcommand_ctx = invoked_subcommand.make_context(None, sys.argv)
_assign_params(
invoked_subcommand_ctx, params_assigned_from_default, deprecated_env_vars
)
if not user_config:
profiles_dir = getattr(self, "PROFILES_DIR", None)
user_config = read_user_config(profiles_dir) if profiles_dir else None
# Add entire invocation command to flags
object.__setattr__(self, "INVOCATION_COMMAND", "dbt " + " ".join(sys.argv[1:]))
# Overwrite default assignments with user config if available.
if user_config:
param_assigned_from_default_copy = params_assigned_from_default.copy()
for param_assigned_from_default in params_assigned_from_default:
user_config_param_value = getattr(user_config, param_assigned_from_default, None)
if user_config_param_value is not None:
object.__setattr__(
self,
param_assigned_from_default.upper(),
convert_config(param_assigned_from_default, user_config_param_value),
)
param_assigned_from_default_copy.remove(param_assigned_from_default)
params_assigned_from_default = param_assigned_from_default_copy
# Set hard coded flags.
object.__setattr__(self, "WHICH", invoked_subcommand_name or ctx.info_name)
object.__setattr__(self, "MP_CONTEXT", get_context("spawn"))
# Apply the lead/follow relationship between some parameters.
self._override_if_set("USE_COLORS", "USE_COLORS_FILE", params_assigned_from_default)
self._override_if_set("LOG_LEVEL", "LOG_LEVEL_FILE", params_assigned_from_default)
self._override_if_set("LOG_FORMAT", "LOG_FORMAT_FILE", params_assigned_from_default)
# Set default LOG_PATH from PROJECT_DIR, if available.
# Starting in v1.5, if `log-path` is set in `dbt_project.yml`, it will raise a deprecation warning,
# with the possibility of removing it in a future release.
if getattr(self, "LOG_PATH", None) is None:
project_dir = getattr(self, "PROJECT_DIR", default_project_dir())
version_check = getattr(self, "VERSION_CHECK", True)
object.__setattr__(self, "LOG_PATH", default_log_path(project_dir, version_check))
# Support console DO NOT TRACK initiative.
if os.getenv("DO_NOT_TRACK", "").lower() in ("1", "t", "true", "y", "yes"):
object.__setattr__(self, "SEND_ANONYMOUS_USAGE_STATS", False)
# Check mutual exclusivity once all flags are set.
self._assert_mutually_exclusive(
params_assigned_from_default, ["WARN_ERROR", "WARN_ERROR_OPTIONS"]
)
# Support lower cased access for legacy code.
params = set(
x for x in dir(self) if not callable(getattr(self, x)) and not x.startswith("__")
)
for param in params:
object.__setattr__(self, param.lower(), getattr(self, param))
def __str__(self) -> str:
return str(pf(self.__dict__))
def _override_if_set(self, lead: str, follow: str, defaulted: Set[str]) -> None:
"""If the value of the lead parameter was set explicitly, apply the value to follow, unless follow was also set explicitly."""
if lead.lower() not in defaulted and follow.lower() in defaulted:
object.__setattr__(self, follow.upper(), getattr(self, lead.upper(), None))
def _assert_mutually_exclusive(
self, params_assigned_from_default: Set[str], group: List[str]
) -> None:
"""
Ensure no elements from group are simultaneously provided by a user, as inferred from params_assigned_from_default.
Raises click.UsageError if any two elements from group are simultaneously provided by a user.
"""
set_flag = None
for flag in group:
flag_set_by_user = flag.lower() not in params_assigned_from_default
if flag_set_by_user and set_flag:
raise DbtUsageException(
f"{flag.lower()}: not allowed with argument {set_flag.lower()}"
)
elif flag_set_by_user:
set_flag = flag
def fire_deprecations(self):
"""Fires events for deprecated env_var usage."""
[dep_fn() for dep_fn in self.deprecated_env_var_warnings]
# It is necessary to remove this attr from the class so it does
# not get pickled when written to disk as json.
object.__delattr__(self, "deprecated_env_var_warnings")
@classmethod
def from_dict(cls, command: CliCommand, args_dict: Dict[str, Any]) -> "Flags":
command_arg_list = command_params(command, args_dict)
ctx = args_to_context(command_arg_list)
flags = cls(ctx=ctx)
flags.fire_deprecations()
return flags
CommandParams = List[str]
def command_params(command: CliCommand, args_dict: Dict[str, Any]) -> CommandParams:
"""Given a command and a dict, returns a list of strings representing
the CLI params for that command. The order of this list is consistent with
which flags are expected at the parent level vs the command level.
e.g. fn("run", {"defer": True, "print": False}) -> ["--no-print", "run", "--defer"]
The result of this function can be passed in to the args_to_context function
to produce a click context to instantiate Flags with.
"""
cmd_args = set(command_args(command))
prnt_args = set(parent_args())
default_args = set([x.lower() for x in FLAGS_DEFAULTS.keys()])
res = command.to_list()
for k, v in args_dict.items():
k = k.lower()
# if a "which" value exists in the args dict, it should match the command provided
if k == WHICH_KEY:
if v != command.value:
raise DbtInternalError(
f"Command '{command.value}' does not match value of which: '{v}'"
)
continue
# param was assigned from defaults and should not be included
if k not in (cmd_args | prnt_args) - default_args:
continue
# if the param is in parent args, it should come before the arg name
# e.g. ["--print", "run"] vs ["run", "--print"]
add_fn = res.append
if k in prnt_args:
def add_fn(x):
res.insert(0, x)
spinal_cased = k.replace("_", "-")
if k == "macro" and command == CliCommand.RUN_OPERATION:
add_fn(v)
elif v in (None, False):
add_fn(f"--no-{spinal_cased}")
elif v is True:
add_fn(f"--{spinal_cased}")
else:
add_fn(f"--{spinal_cased}={v}")
return res
ArgsList = List[str]
def parent_args() -> ArgsList:
"""Return a list representing the params the base click command takes."""
from dbt.cli.main import cli
return format_params(cli.params)
def command_args(command: CliCommand) -> ArgsList:
"""Given a command, return a list of strings representing the params
that command takes. This function only returns params assigned to a
specific command, not those of its parent command.
e.g. fn("run") -> ["defer", "favor_state", "exclude", ...]
"""
import dbt.cli.main as cli
CMD_DICT: Dict[CliCommand, ClickCommand] = {
CliCommand.BUILD: cli.build,
CliCommand.CLEAN: cli.clean,
CliCommand.CLONE: cli.clone,
CliCommand.COMPILE: cli.compile,
CliCommand.DOCS_GENERATE: cli.docs_generate,
CliCommand.DOCS_SERVE: cli.docs_serve,
CliCommand.DEBUG: cli.debug,
CliCommand.DEPS: cli.deps,
CliCommand.INIT: cli.init,
CliCommand.LIST: cli.list,
CliCommand.PARSE: cli.parse,
CliCommand.RUN: cli.run,
CliCommand.RUN_OPERATION: cli.run_operation,
CliCommand.SEED: cli.seed,
CliCommand.SHOW: cli.show,
CliCommand.SNAPSHOT: cli.snapshot,
CliCommand.SOURCE_FRESHNESS: cli.freshness,
CliCommand.TEST: cli.test,
CliCommand.RETRY: cli.retry,
}
click_cmd: Optional[ClickCommand] = CMD_DICT.get(command, None)
if click_cmd is None:
raise DbtInternalError(f"No command found for name '{command.name}'")
return format_params(click_cmd.params)
def format_params(params: List[Parameter]) -> ArgsList:
return [str(x.name) for x in params if not str(x.name).lower().startswith("deprecated_")]

View File

@@ -1,850 +0,0 @@
from copy import copy
from dataclasses import dataclass
from typing import Callable, List, Optional, Union
import click
from click.exceptions import (
Exit as ClickExit,
BadOptionUsage,
NoSuchOption,
UsageError,
)
from dbt.cli import requires, params as p
from dbt.cli.exceptions import (
DbtInternalException,
DbtUsageException,
)
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.results import (
CatalogArtifact,
RunExecutionResult,
)
from dbt.events.base_types import EventMsg
from dbt.task.build import BuildTask
from dbt.task.clean import CleanTask
from dbt.task.clone import CloneTask
from dbt.task.compile import CompileTask
from dbt.task.debug import DebugTask
from dbt.task.deps import DepsTask
from dbt.task.freshness import FreshnessTask
from dbt.task.generate import GenerateTask
from dbt.task.init import InitTask
from dbt.task.list import ListTask
from dbt.task.retry import RetryTask
from dbt.task.run import RunTask
from dbt.task.run_operation import RunOperationTask
from dbt.task.seed import SeedTask
from dbt.task.serve import ServeTask
from dbt.task.show import ShowTask
from dbt.task.snapshot import SnapshotTask
from dbt.task.test import TestTask
@dataclass
class dbtRunnerResult:
"""Contains the result of an invocation of the dbtRunner"""
success: bool
exception: Optional[BaseException] = None
result: Union[
bool, # debug
CatalogArtifact, # docs generate
List[str], # list/ls
Manifest, # parse
None, # clean, deps, init, source
RunExecutionResult, # build, compile, run, seed, snapshot, test, run-operation
] = None
# Programmatic invocation
class dbtRunner:
def __init__(
self,
manifest: Optional[Manifest] = None,
callbacks: Optional[List[Callable[[EventMsg], None]]] = None,
):
self.manifest = manifest
if callbacks is None:
callbacks = []
self.callbacks = callbacks
def invoke(self, args: List[str], **kwargs) -> dbtRunnerResult:
try:
dbt_ctx = cli.make_context(cli.name, args)
dbt_ctx.obj = {
"manifest": self.manifest,
"callbacks": self.callbacks,
}
for key, value in kwargs.items():
dbt_ctx.params[key] = value
# Hack to set parameter source to custom string
dbt_ctx.set_parameter_source(key, "kwargs") # type: ignore
result, success = cli.invoke(dbt_ctx)
return dbtRunnerResult(
result=result,
success=success,
)
except requires.ResultExit as e:
return dbtRunnerResult(
result=e.result,
success=False,
)
except requires.ExceptionExit as e:
return dbtRunnerResult(
exception=e.exception,
success=False,
)
except (BadOptionUsage, NoSuchOption, UsageError) as e:
return dbtRunnerResult(
exception=DbtUsageException(e.message),
success=False,
)
except ClickExit as e:
if e.exit_code == 0:
return dbtRunnerResult(success=True)
return dbtRunnerResult(
exception=DbtInternalException(f"unhandled exit code {e.exit_code}"),
success=False,
)
except BaseException as e:
return dbtRunnerResult(
exception=e,
success=False,
)
# dbt
@click.group(
context_settings={"help_option_names": ["-h", "--help"]},
invoke_without_command=True,
no_args_is_help=True,
epilog="Specify one of these sub-commands and you can find more help from there.",
)
@click.pass_context
@p.cache_selected_only
@p.debug
@p.deprecated_print
@p.enable_legacy_logger
@p.fail_fast
@p.log_cache_events
@p.log_format
@p.log_format_file
@p.log_level
@p.log_level_file
@p.log_path
@p.macro_debugging
@p.partial_parse
@p.partial_parse_file_path
@p.populate_cache
@p.print
@p.printer_width
@p.quiet
@p.record_timing_info
@p.send_anonymous_usage_stats
@p.single_threaded
@p.static_parser
@p.use_colors
@p.use_colors_file
@p.use_experimental_parser
@p.version
@p.version_check
@p.warn_error
@p.warn_error_options
@p.write_json
def cli(ctx, **kwargs):
"""An ELT tool for managing your SQL transformations and data models.
For more documentation on these commands, visit: docs.getdbt.com
"""
# dbt build
@cli.command("build")
@click.pass_context
@p.defer
@p.deprecated_defer
@p.exclude
@p.fail_fast
@p.favor_state
@p.deprecated_favor_state
@p.full_refresh
@p.indirect_selection
@p.profile
@p.profiles_dir
@p.project_dir
@p.resource_type
@p.select
@p.selector
@p.show
@p.state
@p.defer_state
@p.deprecated_state
@p.store_failures
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def build(ctx, **kwargs):
"""Run all seeds, models, snapshots, and tests in DAG order"""
task = BuildTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt clean
@cli.command("clean")
@click.pass_context
@p.profile
@p.profiles_dir
@p.project_dir
@p.target
@p.target_path
@p.vars
@requires.postflight
@requires.preflight
@requires.unset_profile
@requires.project
def clean(ctx, **kwargs):
"""Delete all folders in the clean-targets list (usually the dbt_packages and target directories.)"""
task = CleanTask(ctx.obj["flags"], ctx.obj["project"])
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt docs
@cli.group()
@click.pass_context
def docs(ctx, **kwargs):
"""Generate or serve the documentation website for your project"""
# dbt docs generate
@docs.command("generate")
@click.pass_context
@p.compile_docs
@p.defer
@p.deprecated_defer
@p.exclude
@p.favor_state
@p.deprecated_favor_state
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.empty_catalog
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest(write=False)
def docs_generate(ctx, **kwargs):
"""Generate the documentation website for your project"""
task = GenerateTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt docs serve
@docs.command("serve")
@click.pass_context
@p.browser
@p.port
@p.profile
@p.profiles_dir
@p.project_dir
@p.target
@p.target_path
@p.vars
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
def docs_serve(ctx, **kwargs):
"""Serve the documentation website for your project"""
task = ServeTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt compile
@cli.command("compile")
@click.pass_context
@p.defer
@p.deprecated_defer
@p.exclude
@p.favor_state
@p.deprecated_favor_state
@p.full_refresh
@p.show_output_format
@p.indirect_selection
@p.introspect
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.inline
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def compile(ctx, **kwargs):
"""Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the
target/ directory."""
task = CompileTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt show
@cli.command("show")
@click.pass_context
@p.defer
@p.deprecated_defer
@p.exclude
@p.favor_state
@p.deprecated_favor_state
@p.full_refresh
@p.show_output_format
@p.show_limit
@p.indirect_selection
@p.introspect
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.inline
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def show(ctx, **kwargs):
"""Generates executable SQL for a named resource or inline query, runs that SQL, and returns a preview of the
results. Does not materialize anything to the warehouse."""
task = ShowTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt debug
@cli.command("debug")
@click.pass_context
@p.debug_connection
@p.config_dir
@p.profile
@p.profiles_dir_exists_false
@p.project_dir
@p.target
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
def debug(ctx, **kwargs):
"""Show information on the current dbt environment and check dependencies, then test the database connection. Not to be confused with the --debug option which increases verbosity."""
task = DebugTask(
ctx.obj["flags"],
None,
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt deps
@cli.command("deps")
@click.pass_context
@p.profile
@p.profiles_dir_exists_false
@p.project_dir
@p.target
@p.vars
@requires.postflight
@requires.preflight
@requires.unset_profile
@requires.project
def deps(ctx, **kwargs):
"""Pull the most recent version of the dependencies listed in packages.yml"""
task = DepsTask(ctx.obj["flags"], ctx.obj["project"])
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt init
@cli.command("init")
@click.pass_context
# for backwards compatibility, accept 'project_name' as an optional positional argument
@click.argument("project_name", required=False)
@p.profile
@p.profiles_dir_exists_false
@p.project_dir
@p.skip_profile_setup
@p.target
@p.vars
@requires.postflight
@requires.preflight
def init(ctx, **kwargs):
"""Initialize a new dbt project."""
task = InitTask(ctx.obj["flags"], None)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt list
@cli.command("list")
@click.pass_context
@p.exclude
@p.indirect_selection
@p.models
@p.output
@p.output_keys
@p.profile
@p.profiles_dir
@p.project_dir
@p.resource_type
@p.raw_select
@p.selector
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.vars
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def list(ctx, **kwargs):
"""List the resources in your project"""
task = ListTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# Alias "list" to "ls"
ls = copy(cli.commands["list"])
ls.hidden = True
cli.add_command(ls, "ls")
# dbt parse
@cli.command("parse")
@click.pass_context
@p.profile
@p.profiles_dir
@p.project_dir
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest(write_perf_info=True)
def parse(ctx, **kwargs):
"""Parses the project and provides information on performance"""
# manifest generation and writing happens in @requires.manifest
return ctx.obj["manifest"], True
# dbt run
@cli.command("run")
@click.pass_context
@p.defer
@p.deprecated_defer
@p.favor_state
@p.deprecated_favor_state
@p.exclude
@p.fail_fast
@p.full_refresh
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def run(ctx, **kwargs):
"""Compile SQL and execute against the current target database."""
task = RunTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt retry
@cli.command("retry")
@click.pass_context
@p.project_dir
@p.profiles_dir
@p.vars
@p.profile
@p.target
@p.state
@p.threads
@p.fail_fast
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def retry(ctx, **kwargs):
"""Retry the nodes that failed in the previous run."""
task = RetryTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt clone
@cli.command("clone")
@click.pass_context
@p.defer_state
@p.exclude
@p.full_refresh
@p.profile
@p.profiles_dir
@p.project_dir
@p.resource_type
@p.select
@p.selector
@p.state # required
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
@requires.postflight
def clone(ctx, **kwargs):
"""Create clones of selected nodes based on their location in the manifest provided to --state."""
task = CloneTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt run operation
@cli.command("run-operation")
@click.pass_context
@click.argument("macro")
@p.args
@p.profile
@p.profiles_dir
@p.project_dir
@p.target
@p.target_path
@p.threads
@p.vars
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def run_operation(ctx, **kwargs):
"""Run the named macro with any supplied arguments."""
task = RunOperationTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt seed
@cli.command("seed")
@click.pass_context
@p.exclude
@p.full_refresh
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.show
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def seed(ctx, **kwargs):
"""Load data from csv files into your data warehouse."""
task = SeedTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt snapshot
@cli.command("snapshot")
@click.pass_context
@p.defer
@p.deprecated_defer
@p.exclude
@p.favor_state
@p.deprecated_favor_state
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def snapshot(ctx, **kwargs):
"""Execute snapshots defined in your project"""
task = SnapshotTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# dbt source
@cli.group()
@click.pass_context
def source(ctx, **kwargs):
"""Manage your project's sources"""
# dbt source freshness
@source.command("freshness")
@click.pass_context
@p.exclude
@p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate?
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.state
@p.defer_state
@p.deprecated_state
@p.target
@p.target_path
@p.threads
@p.vars
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def freshness(ctx, **kwargs):
"""check the current freshness of the project's sources"""
task = FreshnessTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# Alias "source freshness" to "snapshot-freshness"
snapshot_freshness = copy(cli.commands["source"].commands["freshness"]) # type: ignore
snapshot_freshness.hidden = True
cli.commands["source"].add_command(snapshot_freshness, "snapshot-freshness") # type: ignore
# dbt test
@cli.command("test")
@click.pass_context
@p.defer
@p.deprecated_defer
@p.exclude
@p.fail_fast
@p.favor_state
@p.deprecated_favor_state
@p.indirect_selection
@p.profile
@p.profiles_dir
@p.project_dir
@p.select
@p.selector
@p.state
@p.defer_state
@p.deprecated_state
@p.store_failures
@p.target
@p.target_path
@p.threads
@p.vars
@p.version_check
@requires.postflight
@requires.preflight
@requires.profile
@requires.project
@requires.runtime_config
@requires.manifest
def test(ctx, **kwargs):
"""Runs tests on data in deployed models. Run this after `dbt run`"""
task = TestTask(
ctx.obj["flags"],
ctx.obj["runtime_config"],
ctx.obj["manifest"],
)
results = task.run()
success = task.interpret_results(results)
return results, success
# Support running as a module
if __name__ == "__main__":
cli()

View File

@@ -1,62 +0,0 @@
from click import ParamType, Choice
from dbt.config.utils import parse_cli_yaml_string
from dbt.exceptions import ValidationError, DbtValidationError, OptionNotYamlDictError
from dbt.helper_types import WarnErrorOptions
class YAML(ParamType):
"""The Click YAML type. Converts YAML strings into objects."""
name = "YAML"
def convert(self, value, param, ctx):
# assume non-string values are a problem
if not isinstance(value, str):
self.fail(f"Cannot load YAML from type {type(value)}", param, ctx)
try:
param_option_name = param.opts[0] if param.opts else param.name
return parse_cli_yaml_string(value, param_option_name.strip("-"))
except (ValidationError, DbtValidationError, OptionNotYamlDictError):
self.fail(f"String '{value}' is not valid YAML", param, ctx)
class WarnErrorOptionsType(YAML):
"""The Click WarnErrorOptions type. Converts YAML strings into objects."""
name = "WarnErrorOptionsType"
def convert(self, value, param, ctx):
# this function is being used by param in click
include_exclude = super().convert(value, param, ctx)
return WarnErrorOptions(
include=include_exclude.get("include", []), exclude=include_exclude.get("exclude", [])
)
class Truthy(ParamType):
"""The Click Truthy type. Converts strings into a "truthy" type"""
name = "TRUTHY"
def convert(self, value, param, ctx):
# assume non-string / non-None values are a problem
if not isinstance(value, (str, None)):
self.fail(f"Cannot load TRUTHY from type {type(value)}", param, ctx)
if value is None or value.lower() in ("0", "false", "f"):
return None
else:
return value
class ChoiceTuple(Choice):
name = "CHOICE_TUPLE"
def convert(self, value, param, ctx):
for value_item in value:
super().convert(value_item, param, ctx)
return value

View File

@@ -1,75 +0,0 @@
import click
import inspect
import typing as t
from click import Context
from dbt.cli.option_types import ChoiceTuple
# Implementation from: https://stackoverflow.com/a/48394004
# Note MultiOption options must be specified with type=tuple or type=ChoiceTuple (https://github.com/pallets/click/issues/2012)
class MultiOption(click.Option):
def __init__(self, *args, **kwargs):
self.save_other_options = kwargs.pop("save_other_options", True)
nargs = kwargs.pop("nargs", -1)
assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs)
super(MultiOption, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._eat_all_parser = None
# validate that multiple=True
multiple = kwargs.pop("multiple", None)
msg = f"MultiOption named `{self.name}` must have multiple=True (rather than {multiple})"
assert multiple, msg
# validate that type=tuple or type=ChoiceTuple
option_type = kwargs.pop("type", None)
msg = f"MultiOption named `{self.name}` must be tuple or ChoiceTuple (rather than {option_type})"
if inspect.isclass(option_type):
assert issubclass(option_type, tuple), msg
else:
assert isinstance(option_type, ChoiceTuple), msg
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
# method to hook to the parser.process
done = False
value = [value]
if self.save_other_options:
# grab everything up to the next option
while state.rargs and not done:
for prefix in self._eat_all_parser.prefixes:
if state.rargs[0].startswith(prefix):
done = True
if not done:
value.append(state.rargs.pop(0))
else:
# grab everything remaining
value += state.rargs
state.rargs[:] = []
value = tuple(value)
# call the actual process
self._previous_parser_process(value, state)
retval = super(MultiOption, self).add_to_parser(parser, ctx)
for name in self.opts:
our_parser = parser._long_opt.get(name) or parser._short_opt.get(name)
if our_parser:
self._eat_all_parser = our_parser
self._previous_parser_process = our_parser.process
our_parser.process = parser_process
break
return retval
def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any:
def flatten(data):
if isinstance(data, tuple):
for x in data:
yield from flatten(x)
else:
yield data
# there will be nested tuples to flatten when multiple=True
value = super(MultiOption, self).type_cast_value(ctx, value)
if value:
value = tuple(flatten(value))
return value

View File

@@ -1,583 +0,0 @@
from pathlib import Path
import click
from dbt.cli.options import MultiOption
from dbt.cli.option_types import YAML, ChoiceTuple, WarnErrorOptionsType
from dbt.cli.resolvers import default_project_dir, default_profiles_dir
from dbt.version import get_version_information
args = click.option(
"--args",
envvar=None,
help="Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. '{my_variable: my_value}'",
type=YAML(),
)
browser = click.option(
"--browser/--no-browser",
envvar=None,
help="Wether or not to open a local web browser after starting the server",
default=True,
)
cache_selected_only = click.option(
"--cache-selected-only/--no-cache-selected-only",
envvar="DBT_CACHE_SELECTED_ONLY",
help="At start of run, populate relational cache only for schemas containing selected nodes, or for all schemas of interest.",
)
introspect = click.option(
"--introspect/--no-introspect",
envvar="DBT_INTROSPECT",
help="Whether to scaffold introspective queries as part of compilation",
default=True,
)
compile_docs = click.option(
"--compile/--no-compile",
envvar=None,
help="Whether or not to run 'dbt compile' as part of docs generation",
default=True,
)
config_dir = click.option(
"--config-dir",
envvar=None,
help="Print a system-specific command to access the directory that the current dbt project is searching for a profiles.yml. Then, exit. This flag renders other debug step flags no-ops.",
is_flag=True,
)
debug = click.option(
"--debug/--no-debug",
"-d/ ",
envvar="DBT_DEBUG",
help="Display debug logging during dbt execution. Useful for debugging and making bug reports.",
)
# flag was previously named DEFER_MODE
defer = click.option(
"--defer/--no-defer",
envvar="DBT_DEFER",
help="If set, resolve unselected nodes by deferring to the manifest within the --state directory.",
)
deprecated_defer = click.option(
"--deprecated-defer",
envvar="DBT_DEFER_TO_STATE",
help="Internal flag for deprecating old env var.",
default=False,
hidden=True,
)
enable_legacy_logger = click.option(
"--enable-legacy-logger/--no-enable-legacy-logger",
envvar="DBT_ENABLE_LEGACY_LOGGER",
hidden=True,
)
exclude = click.option(
"--exclude",
envvar=None,
type=tuple,
cls=MultiOption,
multiple=True,
help="Specify the nodes to exclude.",
)
fail_fast = click.option(
"--fail-fast/--no-fail-fast",
"-x/ ",
envvar="DBT_FAIL_FAST",
help="Stop execution on first failure.",
)
favor_state = click.option(
"--favor-state/--no-favor-state",
envvar="DBT_FAVOR_STATE",
help="If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.",
)
deprecated_favor_state = click.option(
"--deprecated-favor-state",
envvar="DBT_FAVOR_STATE_MODE",
help="Internal flag for deprecating old env var.",
)
full_refresh = click.option(
"--full-refresh",
"-f",
envvar="DBT_FULL_REFRESH",
help="If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.",
is_flag=True,
)
indirect_selection = click.option(
"--indirect-selection",
envvar="DBT_INDIRECT_SELECTION",
help="Choose which tests to select that are adjacent to selected resources. Eager is most inclusive, cautious is most exclusive, and buildable is in between. Empty includes no tests at all.",
type=click.Choice(["eager", "cautious", "buildable", "empty"], case_sensitive=False),
default="eager",
)
log_cache_events = click.option(
"--log-cache-events/--no-log-cache-events",
help="Enable verbose logging for relational cache events to help when debugging.",
envvar="DBT_LOG_CACHE_EVENTS",
)
log_format = click.option(
"--log-format",
envvar="DBT_LOG_FORMAT",
help="Specify the format of logging to the console and the log file. Use --log-format-file to configure the format for the log file differently than the console.",
type=click.Choice(["text", "debug", "json", "default"], case_sensitive=False),
default="default",
)
log_format_file = click.option(
"--log-format-file",
envvar="DBT_LOG_FORMAT_FILE",
help="Specify the format of logging to the log file by overriding the default value and the general --log-format setting.",
type=click.Choice(["text", "debug", "json", "default"], case_sensitive=False),
default="debug",
)
log_level = click.option(
"--log-level",
envvar="DBT_LOG_LEVEL",
help="Specify the minimum severity of events that are logged to the console and the log file. Use --log-level-file to configure the severity for the log file differently than the console.",
type=click.Choice(["debug", "info", "warn", "error", "none"], case_sensitive=False),
default="info",
)
log_level_file = click.option(
"--log-level-file",
envvar="DBT_LOG_LEVEL_FILE",
help="Specify the minimum severity of events that are logged to the log file by overriding the default value and the general --log-level setting.",
type=click.Choice(["debug", "info", "warn", "error", "none"], case_sensitive=False),
default="debug",
)
use_colors = click.option(
"--use-colors/--no-use-colors",
envvar="DBT_USE_COLORS",
help="Specify whether log output is colorized in the console and the log file. Use --use-colors-file/--no-use-colors-file to colorize the log file differently than the console.",
default=True,
)
use_colors_file = click.option(
"--use-colors-file/--no-use-colors-file",
envvar="DBT_USE_COLORS_FILE",
help="Specify whether log file output is colorized by overriding the default value and the general --use-colors/--no-use-colors setting.",
default=True,
)
log_path = click.option(
"--log-path",
envvar="DBT_LOG_PATH",
help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.",
default=None,
type=click.Path(resolve_path=True, path_type=Path),
)
macro_debugging = click.option(
"--macro-debugging/--no-macro-debugging",
envvar="DBT_MACRO_DEBUGGING",
hidden=True,
)
# This less standard usage of --output where output_path below is more standard
output = click.option(
"--output",
envvar=None,
help="Specify the output format: either JSON or a newline-delimited list of selectors, paths, or names",
type=click.Choice(["json", "name", "path", "selector"], case_sensitive=False),
default="selector",
)
show_output_format = click.option(
"--output",
envvar=None,
help="Output format for dbt compile and dbt show",
type=click.Choice(["json", "text"], case_sensitive=False),
default="text",
)
show_limit = click.option(
"--limit",
envvar=None,
help="Limit the number of results returned by dbt show",
type=click.INT,
default=5,
)
output_keys = click.option(
"--output-keys",
envvar=None,
help=(
"Space-delimited listing of node properties to include as custom keys for JSON output "
"(e.g. `--output json --output-keys name resource_type description`)"
),
type=tuple,
cls=MultiOption,
multiple=True,
default=[],
)
output_path = click.option(
"--output",
"-o",
envvar=None,
help="Specify the output path for the JSON report. By default, outputs to 'target/sources.json'",
type=click.Path(file_okay=True, dir_okay=False, writable=True),
default=None,
)
partial_parse = click.option(
"--partial-parse/--no-partial-parse",
envvar="DBT_PARTIAL_PARSE",
help="Allow for partial parsing by looking for and writing to a pickle file in the target directory. This overrides the user configuration file.",
default=True,
)
partial_parse_file_path = click.option(
"--partial-parse-file-path",
envvar="DBT_PARTIAL_PARSE_FILE_PATH",
help="Internal flag for path to partial_parse.manifest file.",
default=None,
hidden=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
)
populate_cache = click.option(
"--populate-cache/--no-populate-cache",
envvar="DBT_POPULATE_CACHE",
help="At start of run, use `show` or `information_schema` queries to populate a relational cache, which can speed up subsequent materializations.",
default=True,
)
port = click.option(
"--port",
envvar=None,
help="Specify the port number for the docs server",
default=8080,
type=click.INT,
)
print = click.option(
"--print/--no-print",
envvar="DBT_PRINT",
help="Output all {{ print() }} macro calls.",
default=True,
)
deprecated_print = click.option(
"--deprecated-print/--deprecated-no-print",
envvar="DBT_NO_PRINT",
help="Internal flag for deprecating old env var.",
default=True,
hidden=True,
callback=lambda ctx, param, value: not value,
)
printer_width = click.option(
"--printer-width",
envvar="DBT_PRINTER_WIDTH",
help="Sets the width of terminal output",
type=click.INT,
default=80,
)
profile = click.option(
"--profile",
envvar=None,
help="Which profile to load. Overrides setting in dbt_project.yml.",
)
profiles_dir = click.option(
"--profiles-dir",
envvar="DBT_PROFILES_DIR",
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
default=default_profiles_dir,
type=click.Path(exists=True),
)
# `dbt debug` uses this because it implements custom behaviour for non-existent profiles.yml directories
# `dbt deps` does not load a profile at all
# `dbt init` will write profiles.yml if it doesn't yet exist
profiles_dir_exists_false = click.option(
"--profiles-dir",
envvar="DBT_PROFILES_DIR",
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
default=default_profiles_dir,
type=click.Path(exists=False),
)
project_dir = click.option(
"--project-dir",
envvar="DBT_PROJECT_DIR",
help="Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.",
default=default_project_dir,
type=click.Path(exists=True),
)
quiet = click.option(
"--quiet/--no-quiet",
"-q",
envvar="DBT_QUIET",
help="Suppress all non-error logging to stdout. Does not affect {{ print() }} macro calls.",
)
record_timing_info = click.option(
"--record-timing-info",
"-r",
envvar=None,
help="When this option is passed, dbt will output low-level timing stats to the specified file. Example: `--record-timing-info output.profile`",
type=click.Path(exists=False),
)
resource_type = click.option(
"--resource-types",
"--resource-type",
envvar=None,
help="Restricts the types of resources that dbt will include",
type=ChoiceTuple(
[
"metric",
"source",
"analysis",
"model",
"test",
"exposure",
"snapshot",
"seed",
"default",
"all",
],
case_sensitive=False,
),
cls=MultiOption,
multiple=True,
default=(),
)
model_decls = ("-m", "--models", "--model")
select_decls = ("-s", "--select")
select_attrs = {
"envvar": None,
"help": "Specify the nodes to include.",
"cls": MultiOption,
"multiple": True,
"type": tuple,
}
inline = click.option(
"--inline",
envvar=None,
help="Pass SQL inline to dbt compile and show",
)
# `--select` and `--models` are analogous for most commands except `dbt list` for legacy reasons.
# Most CLI arguments should use the combined `select` option that aliases `--models` to `--select`.
# However, if you need to split out these separators (like `dbt ls`), use the `models` and `raw_select` options instead.
# See https://github.com/dbt-labs/dbt-core/pull/6774#issuecomment-1408476095 for more info.
models = click.option(*model_decls, **select_attrs)
raw_select = click.option(*select_decls, **select_attrs)
select = click.option(*select_decls, *model_decls, **select_attrs)
selector = click.option(
"--selector",
envvar=None,
help="The selector name to use, as defined in selectors.yml",
)
send_anonymous_usage_stats = click.option(
"--send-anonymous-usage-stats/--no-send-anonymous-usage-stats",
envvar="DBT_SEND_ANONYMOUS_USAGE_STATS",
help="Send anonymous usage stats to dbt Labs.",
default=True,
)
show = click.option(
"--show",
envvar=None,
help="Show a sample of the loaded data in the terminal",
is_flag=True,
)
# TODO: The env var is a correction!
# The original env var was `DBT_TEST_SINGLE_THREADED`.
# This broke the existing naming convention.
# This will need to be communicated as a change to the community!
#
# N.B. This flag is only used for testing, hence it's hidden from help text.
single_threaded = click.option(
"--single-threaded/--no-single-threaded",
envvar="DBT_SINGLE_THREADED",
default=False,
hidden=True,
)
skip_profile_setup = click.option(
"--skip-profile-setup",
"-s",
envvar=None,
help="Skip interactive profile setup.",
is_flag=True,
)
empty_catalog = click.option(
"--empty-catalog",
help="If specified, generate empty catalog.json file during the `dbt docs generate` command.",
default=False,
is_flag=True,
)
state = click.option(
"--state",
envvar="DBT_STATE",
help="Unless overridden, use this state directory for both state comparison and deferral.",
type=click.Path(
dir_okay=True,
file_okay=False,
readable=True,
resolve_path=False,
path_type=Path,
),
)
defer_state = click.option(
"--defer-state",
envvar="DBT_DEFER_STATE",
help="Override the state directory for deferral only.",
type=click.Path(
dir_okay=True,
file_okay=False,
readable=True,
resolve_path=False,
path_type=Path,
),
)
deprecated_state = click.option(
"--deprecated-state",
envvar="DBT_ARTIFACT_STATE_PATH",
help="Internal flag for deprecating old env var.",
hidden=True,
type=click.Path(
dir_okay=True,
file_okay=False,
readable=True,
resolve_path=True,
path_type=Path,
),
)
static_parser = click.option(
"--static-parser/--no-static-parser",
envvar="DBT_STATIC_PARSER",
help="Use the static parser.",
default=True,
)
store_failures = click.option(
"--store-failures",
envvar="DBT_STORE_FAILURES",
help="Store test results (failing rows) in the database",
is_flag=True,
)
target = click.option(
"--target",
"-t",
envvar=None,
help="Which target to load for the given profile",
)
target_path = click.option(
"--target-path",
envvar="DBT_TARGET_PATH",
help="Configure the 'target-path'. Only applies this setting for the current run. Overrides the 'DBT_TARGET_PATH' if it is set.",
type=click.Path(),
)
debug_connection = click.option(
"--connection",
envvar=None,
help="Test the connection to the target database independent of dependency checks.",
is_flag=True,
)
threads = click.option(
"--threads",
envvar=None,
help="Specify number of threads to use while executing models. Overrides settings in profiles.yml.",
default=None,
type=click.INT,
)
use_experimental_parser = click.option(
"--use-experimental-parser/--no-use-experimental-parser",
envvar="DBT_USE_EXPERIMENTAL_PARSER",
help="Enable experimental parsing features.",
)
vars = click.option(
"--vars",
envvar=None,
help="Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. '{my_variable: my_value}'",
type=YAML(),
default="{}",
)
# TODO: when legacy flags are deprecated use
# click.version_option instead of a callback
def _version_callback(ctx, _param, value):
if not value or ctx.resilient_parsing:
return
click.echo(get_version_information())
ctx.exit()
version = click.option(
"--version",
"-V",
"-v",
callback=_version_callback,
envvar=None,
expose_value=False,
help="Show version information and exit",
is_eager=True,
is_flag=True,
)
version_check = click.option(
"--version-check/--no-version-check",
envvar="DBT_VERSION_CHECK",
help="If set, ensure the installed dbt version matches the require-dbt-version specified in the dbt_project.yml file (if any). Otherwise, allow them to differ.",
default=True,
)
warn_error = click.option(
"--warn-error",
envvar="DBT_WARN_ERROR",
help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.",
default=None,
is_flag=True,
)
warn_error_options = click.option(
"--warn-error-options",
envvar="DBT_WARN_ERROR_OPTIONS",
default="{}",
help="""If dbt would normally warn, instead raise an exception based on include/exclude configuration. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations,
and missing sources/refs in tests. This argument should be a YAML string, with keys 'include' or 'exclude'. eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'""",
type=WarnErrorOptionsType(),
)
write_json = click.option(
"--write-json/--no-write-json",
envvar="DBT_WRITE_JSON",
help="Whether or not to write the manifest.json and run_results.json files to the target directory",
default=True,
)

View File

@@ -1,267 +0,0 @@
import dbt.tracking
from dbt.version import installed as installed_version
from dbt.adapters.factory import adapter_management, register_adapter
from dbt.flags import set_flags, get_flag_dict
from dbt.cli.exceptions import (
ExceptionExit,
ResultExit,
)
from dbt.cli.flags import Flags
from dbt.config import RuntimeConfig
from dbt.config.runtime import load_project, load_profile, UnsetProfile
from dbt.events.functions import fire_event, LOG_VERSION, set_invocation_id, setup_event_logger
from dbt.events.types import (
CommandCompleted,
MainReportVersion,
MainReportArgs,
MainTrackingUserState,
)
from dbt.events.helpers import get_json_string_utcnow
from dbt.events.types import MainEncounteredError, MainStackTrace
from dbt.exceptions import Exception as DbtException, DbtProjectError, FailFastError
from dbt.parser.manifest import ManifestLoader, write_manifest
from dbt.profiler import profiler
from dbt.tracking import active_user, initialize_from_flags, track_run
from dbt.utils import cast_dict_to_dict_of_strings
from dbt.plugins import set_up_plugin_manager, get_plugin_manager
from click import Context
from functools import update_wrapper
import time
import traceback
def preflight(func):
def wrapper(*args, **kwargs):
ctx = args[0]
assert isinstance(ctx, Context)
ctx.obj = ctx.obj or {}
# Flags
flags = Flags(ctx)
ctx.obj["flags"] = flags
set_flags(flags)
# Logging
callbacks = ctx.obj.get("callbacks", [])
set_invocation_id()
setup_event_logger(flags=flags, callbacks=callbacks)
# Tracking
initialize_from_flags(flags.SEND_ANONYMOUS_USAGE_STATS, flags.PROFILES_DIR)
ctx.with_resource(track_run(run_command=flags.WHICH))
# Now that we have our logger, fire away!
fire_event(MainReportVersion(version=str(installed_version), log_version=LOG_VERSION))
flags_dict_str = cast_dict_to_dict_of_strings(get_flag_dict())
fire_event(MainReportArgs(args=flags_dict_str))
# Deprecation warnings
flags.fire_deprecations()
if active_user is not None: # mypy appeasement, always true
fire_event(MainTrackingUserState(user_state=active_user.state()))
# Profiling
if flags.RECORD_TIMING_INFO:
ctx.with_resource(profiler(enable=True, outfile=flags.RECORD_TIMING_INFO))
# Adapter management
ctx.with_resource(adapter_management())
return func(*args, **kwargs)
return update_wrapper(wrapper, func)
def postflight(func):
"""The decorator that handles all exception handling for the click commands.
This decorator must be used before any other decorators that may throw an exception."""
def wrapper(*args, **kwargs):
ctx = args[0]
start_func = time.perf_counter()
success = False
try:
result, success = func(*args, **kwargs)
except FailFastError as e:
fire_event(MainEncounteredError(exc=str(e)))
raise ResultExit(e.result)
except DbtException as e:
fire_event(MainEncounteredError(exc=str(e)))
raise ExceptionExit(e)
except BaseException as e:
fire_event(MainEncounteredError(exc=str(e)))
fire_event(MainStackTrace(stack_trace=traceback.format_exc()))
raise ExceptionExit(e)
finally:
fire_event(
CommandCompleted(
command=ctx.command_path,
success=success,
completed_at=get_json_string_utcnow(),
elapsed=time.perf_counter() - start_func,
)
)
if not success:
raise ResultExit(result)
return (result, success)
return update_wrapper(wrapper, func)
# TODO: UnsetProfile is necessary for deps and clean to load a project.
# This decorator and its usage can be removed once https://github.com/dbt-labs/dbt-core/issues/6257 is closed.
def unset_profile(func):
def wrapper(*args, **kwargs):
ctx = args[0]
assert isinstance(ctx, Context)
profile = UnsetProfile()
ctx.obj["profile"] = profile
return func(*args, **kwargs)
return update_wrapper(wrapper, func)
def profile(func):
def wrapper(*args, **kwargs):
ctx = args[0]
assert isinstance(ctx, Context)
flags = ctx.obj["flags"]
# TODO: Generalize safe access to flags.THREADS:
# https://github.com/dbt-labs/dbt-core/issues/6259
threads = getattr(flags, "THREADS", None)
profile = load_profile(flags.PROJECT_DIR, flags.VARS, flags.PROFILE, flags.TARGET, threads)
ctx.obj["profile"] = profile
return func(*args, **kwargs)
return update_wrapper(wrapper, func)
def project(func):
def wrapper(*args, **kwargs):
ctx = args[0]
assert isinstance(ctx, Context)
# TODO: Decouple target from profile, and remove the need for profile here:
# https://github.com/dbt-labs/dbt-core/issues/6257
if not ctx.obj.get("profile"):
raise DbtProjectError("profile required for project")
flags = ctx.obj["flags"]
project = load_project(
flags.PROJECT_DIR, flags.VERSION_CHECK, ctx.obj["profile"], flags.VARS
)
ctx.obj["project"] = project
# Plugins
set_up_plugin_manager(project_name=project.project_name)
if dbt.tracking.active_user is not None:
project_id = None if project is None else project.hashed_name()
dbt.tracking.track_project_id({"project_id": project_id})
return func(*args, **kwargs)
return update_wrapper(wrapper, func)
def runtime_config(func):
"""A decorator used by click command functions for generating a runtime
config given a profile and project.
"""
def wrapper(*args, **kwargs):
ctx = args[0]
assert isinstance(ctx, Context)
req_strs = ["profile", "project"]
reqs = [ctx.obj.get(req_str) for req_str in req_strs]
if None in reqs:
raise DbtProjectError("profile and project required for runtime_config")
config = RuntimeConfig.from_parts(
ctx.obj["project"],
ctx.obj["profile"],
ctx.obj["flags"],
)
ctx.obj["runtime_config"] = config
if dbt.tracking.active_user is not None:
adapter_type = (
getattr(config.credentials, "type", None)
if hasattr(config, "credentials")
else None
)
adapter_unique_id = (
config.credentials.hashed_unique_field()
if hasattr(config, "credentials")
else None
)
dbt.tracking.track_adapter_info(
{
"adapter_type": adapter_type,
"adapter_unique_id": adapter_unique_id,
}
)
return func(*args, **kwargs)
return update_wrapper(wrapper, func)
def manifest(*args0, write=True, write_perf_info=False):
"""A decorator used by click command functions for generating a manifest
given a profile, project, and runtime config. This also registers the adapter
from the runtime config and conditionally writes the manifest to disk.
"""
def outer_wrapper(func):
def wrapper(*args, **kwargs):
ctx = args[0]
assert isinstance(ctx, Context)
req_strs = ["profile", "project", "runtime_config"]
reqs = [ctx.obj.get(dep) for dep in req_strs]
if None in reqs:
raise DbtProjectError("profile, project, and runtime_config required for manifest")
runtime_config = ctx.obj["runtime_config"]
register_adapter(runtime_config)
# a manifest has already been set on the context, so don't overwrite it
if ctx.obj.get("manifest") is None:
manifest = ManifestLoader.get_full_manifest(
runtime_config,
write_perf_info=write_perf_info,
)
ctx.obj["manifest"] = manifest
if write and ctx.obj["flags"].write_json:
write_manifest(manifest, runtime_config.project_target_path)
pm = get_plugin_manager(runtime_config.project_name)
plugin_artifacts = pm.get_manifest_artifacts(manifest)
for path, plugin_artifact in plugin_artifacts.items():
plugin_artifact.write(path)
return func(*args, **kwargs)
return update_wrapper(wrapper, func)
# if there are no args, the decorator was used without params @decorator
# otherwise, the decorator was called with params @decorator(arg)
if len(args0) == 0:
return outer_wrapper
return outer_wrapper(args0[0])

View File

@@ -1,31 +0,0 @@
from pathlib import Path
from dbt.config.project import PartialProject
from dbt.exceptions import DbtProjectError
def default_project_dir() -> Path:
paths = list(Path.cwd().parents)
paths.insert(0, Path.cwd())
return next((x for x in paths if (x / "dbt_project.yml").exists()), Path.cwd())
def default_profiles_dir() -> Path:
return Path.cwd() if (Path.cwd() / "profiles.yml").exists() else Path.home() / ".dbt"
def default_log_path(project_dir: Path, verify_version: bool = False) -> Path:
"""If available, derive a default log path from dbt_project.yml. Otherwise, default to "logs".
Known limitations:
1. Using PartialProject here, so no jinja rendering of log-path.
2. Programmatic invocations of the cli via dbtRunner may pass a Project object directly,
which is not being taken into consideration here to extract a log-path.
"""
default_log_path = Path("logs")
try:
partial = PartialProject.from_project_root(str(project_dir), verify_version=verify_version)
partial_log_path = partial.project_dict.get("log-path") or default_log_path
default_log_path = Path(project_dir) / partial_log_path
except DbtProjectError:
pass
return default_log_path

View File

@@ -1,40 +0,0 @@
from enum import Enum
from typing import List
from dbt.exceptions import DbtInternalError
class Command(Enum):
BUILD = "build"
CLEAN = "clean"
COMPILE = "compile"
CLONE = "clone"
DOCS_GENERATE = "generate"
DOCS_SERVE = "serve"
DEBUG = "debug"
DEPS = "deps"
INIT = "init"
LIST = "list"
PARSE = "parse"
RUN = "run"
RUN_OPERATION = "run-operation"
SEED = "seed"
SHOW = "show"
SNAPSHOT = "snapshot"
SOURCE_FRESHNESS = "freshness"
TEST = "test"
RETRY = "retry"
@classmethod
def from_str(cls, s: str) -> "Command":
try:
return cls(s)
except ValueError:
raise DbtInternalError(f"No value '{s}' exists in Command enum")
def to_list(self) -> List[str]:
return {
Command.DOCS_GENERATE: ["docs", "generate"],
Command.DOCS_SERVE: ["docs", "serve"],
Command.SOURCE_FRESHNESS: ["source", "freshness"],
}.get(self, [self.value])

View File

@@ -1,19 +1 @@
# Clients README
### Jinja
#### How are materializations defined
Model materializations are kept in `core/dbt/include/global_project/macros/materializations/models/`. Materializations are defined using syntax that isn't part of the Jinja standard library. These tags are referenced internally, and materializations can be overridden in user projects when users have specific needs.
```
-- Pseudocode for arguments
{% materialization <name>, <target name := one_of{default, adapter}> %}'
{% endmaterialization %}
```
These blocks are referred to Jinja extensions. Extensions are defined as part of the accepted Jinja code encapsulated within a dbt project. This includes system code used internally by dbt and user space (i.e. user-defined) macros. Extensions exist to help Jinja users create reusable code blocks or abstract objects--for us, materializations are a great use-case since we pass these around as arguments within dbt system code.
The code that defines this extension is a class `MaterializationExtension` and a `parse` routine. That code lives in [clients/jinja.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/clients/jinja.py). The routine
enables Jinja to parse (i.e. recognize) the unique comma separated arg structure our `materialization` tags exhibit (the `table, default` as seen above).

View File

@@ -1,15 +1,7 @@
import re
from collections import namedtuple
from dbt.exceptions import (
BlockDefinitionNotAtTopError,
DbtInternalError,
MissingCloseTagError,
MissingControlFlowStartTagError,
NestedTagsError,
UnexpectedControlFlowEndTagError,
UnexpectedMacroEOFError,
)
import dbt.exceptions
def regex(pat):
@@ -18,83 +10,79 @@ def regex(pat):
class BlockData:
"""raw plaintext data from the top level of the file."""
def __init__(self, contents):
self.block_type_name = "__dbt__data"
self.block_type_name = '__dbt__data'
self.contents = contents
self.full_block = contents
class BlockTag:
def __init__(self, block_type_name, block_name, contents=None, full_block=None, **kw):
def __init__(self, block_type_name, block_name, contents=None,
full_block=None, **kw):
self.block_type_name = block_type_name
self.block_name = block_name
self.contents = contents
self.full_block = full_block
def __str__(self):
return "BlockTag({!r}, {!r})".format(self.block_type_name, self.block_name)
return 'BlockTag({!r}, {!r})'.format(self.block_type_name,
self.block_name)
def __repr__(self):
return str(self)
@property
def end_block_type_name(self):
return "end{}".format(self.block_type_name)
return 'end{}'.format(self.block_type_name)
def end_pat(self):
# we don't want to use string formatting here because jinja uses most
# of the string formatting operators in its syntax...
pattern = "".join(
(
r"(?P<endblock>((?:\s*\{\%\-|\{\%)\s*",
self.end_block_type_name,
r"\s*(?:\-\%\}\s*|\%\})))",
)
)
pattern = ''.join((
r'(?P<endblock>((?:\s*\{\%\-|\{\%)\s*',
self.end_block_type_name,
r'\s*(?:\-\%\}\s*|\%\})))',
))
return regex(pattern)
Tag = namedtuple("Tag", "block_type_name block_name start end")
Tag = namedtuple('Tag', 'block_type_name block_name start end')
_NAME_PATTERN = r"[A-Za-z_][A-Za-z_0-9]*"
_NAME_PATTERN = r'[A-Za-z_][A-Za-z_0-9]*'
COMMENT_START_PATTERN = regex(r"(?:(?P<comment_start>(\s*\{\#)))")
COMMENT_END_PATTERN = regex(r"(.*?)(\s*\#\})")
RAW_START_PATTERN = regex(r"(?:\s*\{\%\-|\{\%)\s*(?P<raw_start>(raw))\s*(?:\-\%\}\s*|\%\})")
EXPR_START_PATTERN = regex(r"(?P<expr_start>(\{\{\s*))")
EXPR_END_PATTERN = regex(r"(?P<expr_end>(\s*\}\}))")
BLOCK_START_PATTERN = regex(
"".join(
(
r"(?:\s*\{\%\-|\{\%)\s*",
r"(?P<block_type_name>({}))".format(_NAME_PATTERN),
# some blocks have a 'block name'.
r"(?:\s+(?P<block_name>({})))?".format(_NAME_PATTERN),
)
)
COMMENT_START_PATTERN = regex(r'(?:(?P<comment_start>(\s*\{\#)))')
COMMENT_END_PATTERN = regex(r'(.*?)(\s*\#\})')
RAW_START_PATTERN = regex(
r'(?:\s*\{\%\-|\{\%)\s*(?P<raw_start>(raw))\s*(?:\-\%\}\s*|\%\})'
)
EXPR_START_PATTERN = regex(r'(?P<expr_start>(\{\{\s*))')
EXPR_END_PATTERN = regex(r'(?P<expr_end>(\s*\}\}))')
BLOCK_START_PATTERN = regex(''.join((
r'(?:\s*\{\%\-|\{\%)\s*',
r'(?P<block_type_name>({}))'.format(_NAME_PATTERN),
# some blocks have a 'block name'.
r'(?:\s+(?P<block_name>({})))?'.format(_NAME_PATTERN),
)))
RAW_BLOCK_PATTERN = regex(
"".join(
(
r"(?:\s*\{\%\-|\{\%)\s*raw\s*(?:\-\%\}\s*|\%\})",
r"(?:.*?)",
r"(?:\s*\{\%\-|\{\%)\s*endraw\s*(?:\-\%\}\s*|\%\})",
)
)
)
RAW_BLOCK_PATTERN = regex(''.join((
r'(?:\s*\{\%\-|\{\%)\s*raw\s*(?:\-\%\}\s*|\%\})',
r'(?:.*?)',
r'(?:\s*\{\%\-|\{\%)\s*endraw\s*(?:\-\%\}\s*|\%\})',
)))
TAG_CLOSE_PATTERN = regex(r"(?:(?P<tag_close>(\-\%\}\s*|\%\})))")
TAG_CLOSE_PATTERN = regex(r'(?:(?P<tag_close>(\-\%\}\s*|\%\})))')
# stolen from jinja's lexer. Note that we've consumed all prefix whitespace by
# the time we want to use this.
STRING_PATTERN = regex(r"(?P<string>('([^'\\]*(?:\\.[^'\\]*)*)'|" r'"([^"\\]*(?:\\.[^"\\]*)*)"))')
STRING_PATTERN = regex(
r"(?P<string>('([^'\\]*(?:\\.[^'\\]*)*)'|"
r'"([^"\\]*(?:\\.[^"\\]*)*)"))'
)
QUOTE_START_PATTERN = regex(r"""(?P<quote>(['"]))""")
QUOTE_START_PATTERN = regex(r'''(?P<quote>(['"]))''')
class TagIterator:
@@ -111,10 +99,10 @@ class TagIterator:
end_val: int = self.pos if end is None else end
data = self.data[:end_val]
# if not found, rfind returns -1, and -1+1=0, which is perfect!
last_line_start = data.rfind("\n") + 1
last_line_start = data.rfind('\n') + 1
# it's easy to forget this, but line numbers are 1-indexed
line_number = data.count("\n") + 1
return f"{line_number}:{end_val - last_line_start}"
line_number = data.count('\n') + 1
return f'{line_number}:{end_val - last_line_start}'
def advance(self, new_position):
self.pos = new_position
@@ -132,7 +120,7 @@ class TagIterator:
matches = []
for pattern in patterns:
# default to 'search', but sometimes we want to 'match'.
if kwargs.get("method", "search") == "search":
if kwargs.get('method', 'search') == 'search':
match = self._search(pattern)
else:
match = self._match(pattern)
@@ -147,7 +135,10 @@ class TagIterator:
def _expect_match(self, expected_name, *patterns, **kwargs):
match = self._first_match(*patterns, **kwargs)
if match is None:
raise UnexpectedMacroEOFError(expected_name, self.data[self.pos :])
msg = 'unexpected EOF, expected {}, got "{}"'.format(
expected_name, self.data[self.pos:]
)
dbt.exceptions.raise_compiler_error(msg)
return match
def handle_expr(self, match):
@@ -165,20 +156,22 @@ class TagIterator:
"""
self.advance(match.end())
while True:
match = self._expect_match("}}", EXPR_END_PATTERN, QUOTE_START_PATTERN)
if match.groupdict().get("expr_end") is not None:
match = self._expect_match('}}',
EXPR_END_PATTERN,
QUOTE_START_PATTERN)
if match.groupdict().get('expr_end') is not None:
break
else:
# it's a quote. we haven't advanced for this match yet, so
# just slurp up the whole string, no need to rewind.
match = self._expect_match("string", STRING_PATTERN)
match = self._expect_match('string', STRING_PATTERN)
self.advance(match.end())
self.advance(match.end())
def handle_comment(self, match):
self.advance(match.end())
match = self._expect_match("#}", COMMENT_END_PATTERN)
match = self._expect_match('#}', COMMENT_END_PATTERN)
self.advance(match.end())
def _expect_block_close(self):
@@ -195,19 +188,22 @@ class TagIterator:
"""
while True:
end_match = self._expect_match(
'tag close ("%}")', QUOTE_START_PATTERN, TAG_CLOSE_PATTERN
'tag close ("%}")',
QUOTE_START_PATTERN,
TAG_CLOSE_PATTERN
)
self.advance(end_match.end())
if end_match.groupdict().get("tag_close") is not None:
if end_match.groupdict().get('tag_close') is not None:
return
# must be a string. Rewind to its start and advance past it.
self.rewind()
string_match = self._expect_match("string", STRING_PATTERN)
string_match = self._expect_match('string', STRING_PATTERN)
self.advance(string_match.end())
def handle_raw(self):
# raw blocks are super special, they are a single complete regex
match = self._expect_match("{% raw %}...{% endraw %}", RAW_BLOCK_PATTERN)
match = self._expect_match('{% raw %}...{% endraw %}',
RAW_BLOCK_PATTERN)
self.advance(match.end())
return match.end()
@@ -224,24 +220,30 @@ class TagIterator:
"""
groups = match.groupdict()
# always a value
block_type_name = groups["block_type_name"]
block_type_name = groups['block_type_name']
# might be None
block_name = groups.get("block_name")
block_name = groups.get('block_name')
start_pos = self.pos
if block_type_name == "raw":
match = self._expect_match("{% raw %}...{% endraw %}", RAW_BLOCK_PATTERN)
if block_type_name == 'raw':
match = self._expect_match('{% raw %}...{% endraw %}',
RAW_BLOCK_PATTERN)
self.advance(match.end())
else:
self.advance(match.end())
self._expect_block_close()
return Tag(
block_type_name=block_type_name, block_name=block_name, start=start_pos, end=self.pos
block_type_name=block_type_name,
block_name=block_name,
start=start_pos,
end=self.pos
)
def find_tags(self):
while True:
match = self._first_match(
BLOCK_START_PATTERN, COMMENT_START_PATTERN, EXPR_START_PATTERN
BLOCK_START_PATTERN,
COMMENT_START_PATTERN,
EXPR_START_PATTERN
)
if match is None:
break
@@ -250,9 +252,9 @@ class TagIterator:
# start = self.pos
groups = match.groupdict()
comment_start = groups.get("comment_start")
expr_start = groups.get("expr_start")
block_type_name = groups.get("block_type_name")
comment_start = groups.get('comment_start')
expr_start = groups.get('expr_start')
block_type_name = groups.get('block_type_name')
if comment_start is not None:
self.handle_comment(match)
@@ -261,21 +263,31 @@ class TagIterator:
elif block_type_name is not None:
yield self.handle_tag(match)
else:
raise DbtInternalError(
"Invalid regex match in next_block, expected block start, "
"expr start, or comment start"
raise dbt.exceptions.InternalException(
'Invalid regex match in next_block, expected block start, '
'expr start, or comment start'
)
def __iter__(self):
return self.find_tags()
duplicate_tags = (
'Got nested tags: {outer.block_type_name} (started at {outer.start}) did '
'not have a matching {{% end{outer.block_type_name} %}} before a '
'subsequent {inner.block_type_name} was found (started at {inner.start})'
)
_CONTROL_FLOW_TAGS = {
"if": "endif",
"for": "endfor",
'if': 'endif',
'for': 'endfor',
}
_CONTROL_FLOW_END_TAGS = {v: k for k, v in _CONTROL_FLOW_TAGS.items()}
_CONTROL_FLOW_END_TAGS = {
v: k
for k, v in _CONTROL_FLOW_TAGS.items()
}
class BlockIterator:
@@ -298,15 +310,15 @@ class BlockIterator:
def is_current_end(self, tag):
return (
tag.block_type_name.startswith("end")
and self.current is not None
and tag.block_type_name[3:] == self.current.block_type_name
tag.block_type_name.startswith('end') and
self.current is not None and
tag.block_type_name[3:] == self.current.block_type_name
)
def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
"""Find all top-level blocks in the data."""
if allowed_blocks is None:
allowed_blocks = {"snapshot", "macro", "materialization", "docs"}
allowed_blocks = {'snapshot', 'macro', 'materialization', 'docs'}
for tag in self.tag_parser.find_tags():
if tag.block_type_name in _CONTROL_FLOW_TAGS:
@@ -317,18 +329,37 @@ class BlockIterator:
found = self.stack.pop()
else:
expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
raise UnexpectedControlFlowEndTagError(tag, expected, self.tag_parser)
dbt.exceptions.raise_compiler_error((
'Got an unexpected control flow end tag, got {} but '
'never saw a preceeding {} (@ {})'
).format(
tag.block_type_name,
expected,
self.tag_parser.linepos(tag.start)
))
expected = _CONTROL_FLOW_TAGS[found]
if expected != tag.block_type_name:
raise MissingControlFlowStartTagError(tag, expected, self.tag_parser)
dbt.exceptions.raise_compiler_error((
'Got an unexpected control flow end tag, got {} but '
'expected {} next (@ {})'
).format(
tag.block_type_name,
expected,
self.tag_parser.linepos(tag.start)
))
if tag.block_type_name in allowed_blocks:
if self.stack:
raise BlockDefinitionNotAtTopError(self.tag_parser, tag.start)
dbt.exceptions.raise_compiler_error((
'Got a block definition inside control flow at {}. '
'All dbt block definitions must be at the top level'
).format(self.tag_parser.linepos(tag.start)))
if self.current is not None:
raise NestedTagsError(outer=self.current, inner=tag)
dbt.exceptions.raise_compiler_error(
duplicate_tags.format(outer=self.current, inner=tag)
)
if collect_raw_data:
raw_data = self.data[self.last_position : tag.start]
raw_data = self.data[self.last_position:tag.start]
self.last_position = tag.start
if raw_data:
yield BlockData(raw_data)
@@ -340,21 +371,23 @@ class BlockIterator:
yield BlockTag(
block_type_name=self.current.block_type_name,
block_name=self.current.block_name,
contents=self.data[self.current.end : tag.start],
full_block=self.data[self.current.start : tag.end],
contents=self.data[self.current.end:tag.start],
full_block=self.data[self.current.start:tag.end]
)
self.current = None
if self.current:
linecount = self.data[: self.current.end].count("\n") + 1
raise MissingCloseTagError(self.current.block_type_name, linecount)
linecount = self.data[:self.current.end].count('\n') + 1
dbt.exceptions.raise_compiler_error((
'Reached EOF without finding a close tag for '
'{} (searched from line {})'
).format(self.current.block_type_name, linecount))
if collect_raw_data:
raw_data = self.data[self.last_position :]
raw_data = self.data[self.last_position:]
if raw_data:
yield BlockData(raw_data)
def lex_for_blocks(self, allowed_blocks=None, collect_raw_data=True):
return list(
self.find_blocks(allowed_blocks=allowed_blocks, collect_raw_data=collect_raw_data)
)
return list(self.find_blocks(allowed_blocks=allowed_blocks,
collect_raw_data=collect_raw_data))

View File

@@ -7,10 +7,10 @@ import json
import dbt.utils
from typing import Iterable, List, Dict, Union, Optional, Any
from dbt.exceptions import DbtRuntimeError
from dbt.exceptions import RuntimeException
BOM = BOM_UTF8.decode("utf-8") # '\ufeff'
BOM = BOM_UTF8.decode('utf-8') # '\ufeff'
class Number(agate.data_types.Number):
@@ -18,7 +18,9 @@ class Number(agate.data_types.Number):
# i.e. do not cast True and False to numeric 1 and 0
def cast(self, d):
if type(d) == bool:
raise agate.exceptions.CastError("Do not cast True to 1 or False to 0.")
raise agate.exceptions.CastError(
'Do not cast True to 1 or False to 0.'
)
else:
return super().cast(d)
@@ -40,24 +42,32 @@ class ISODateTime(agate.data_types.DateTime):
except: # noqa
pass
raise agate.exceptions.CastError('Can not parse value "%s" as datetime.' % d)
raise agate.exceptions.CastError(
'Can not parse value "%s" as datetime.' % d
)
def build_type_tester(
text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = ("null", "")
text_columns: Iterable[str],
string_null_values: Optional[Iterable[str]] = ('null', '')
) -> agate.TypeTester:
types = [
Number(null_values=("null", "")),
agate.data_types.Date(null_values=("null", ""), date_format="%Y-%m-%d"),
agate.data_types.DateTime(null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"),
ISODateTime(null_values=("null", "")),
agate.data_types.Boolean(
true_values=("true",), false_values=("false",), null_values=("null", "")
),
agate.data_types.Text(null_values=string_null_values),
Number(null_values=('null', '')),
agate.data_types.Date(null_values=('null', ''),
date_format='%Y-%m-%d'),
agate.data_types.DateTime(null_values=('null', ''),
datetime_format='%Y-%m-%d %H:%M:%S'),
ISODateTime(null_values=('null', '')),
agate.data_types.Boolean(true_values=('true',),
false_values=('false',),
null_values=('null', '')),
agate.data_types.Text(null_values=string_null_values)
]
force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns}
force = {
k: agate.data_types.Text(null_values=string_null_values)
for k in text_columns
}
return agate.TypeTester(force=force, types=types)
@@ -74,13 +84,16 @@ def table_from_rows(
else:
# If text_only_columns are present, prevent coercing empty string or
# literal 'null' strings to a None representation.
column_types = build_type_tester(text_only_columns, string_null_values=())
column_types = build_type_tester(
text_only_columns,
string_null_values=()
)
return agate.Table(rows, column_names, column_types=column_types)
def table_from_data(data, column_names: Iterable[str]) -> agate.Table:
"Convert a list of dictionaries into an Agate table"
"Convert list of dictionaries into an Agate table"
# The agate table is generated from a list of dicts, so the column order
# from `data` is not preserved. We can use `select` to reorder the columns
@@ -119,7 +132,9 @@ def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:
rows.append(row)
return table_from_rows(
rows=rows, column_names=column_names, text_only_columns=text_only_columns
rows=rows,
column_names=column_names,
text_only_columns=text_only_columns
)
@@ -137,7 +152,7 @@ def as_matrix(table):
def from_csv(abspath, text_columns):
type_tester = build_type_tester(text_columns=text_columns)
with open(abspath, encoding="utf-8") as fp:
with open(abspath, encoding='utf-8') as fp:
if fp.read(1) != BOM:
fp.seek(0)
return agate.Table.from_csv(fp, column_types=type_tester)
@@ -168,9 +183,9 @@ class ColumnTypeBuilder(Dict[str, NullableAgateType]):
return
elif not isinstance(value, type(existing_type)):
# actual type mismatch!
raise DbtRuntimeError(
f"Tables contain columns with the same names ({key}), "
f"but different types ({value} vs {existing_type})"
raise RuntimeException(
f'Tables contain columns with the same names ({key}), '
f'but different types ({value} vs {existing_type})'
)
def finalize(self) -> Dict[str, agate.data_types.DataType]:
@@ -184,7 +199,9 @@ class ColumnTypeBuilder(Dict[str, NullableAgateType]):
return result
def _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]:
def _merged_column_types(
tables: List[agate.Table]
) -> Dict[str, agate.data_types.DataType]:
# this is a lot like agate.Table.merge, but with handling for all-null
# rows being "any type".
new_columns: ColumnTypeBuilder = ColumnTypeBuilder()
@@ -210,7 +227,10 @@ def merge_tables(tables: List[agate.Table]) -> agate.Table:
rows: List[agate.Row] = []
for table in tables:
if table.column_names == column_names and table.column_types == column_types:
if (
table.column_names == column_names and
table.column_types == column_types
):
rows.extend(table.rows)
else:
for row in table.rows:

Some files were not shown because too many files have changed in this diff Show More