mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-19 20:01:29 +00:00
Compare commits
1 Commits
jerco/upda
...
feature/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d69a9b114 |
@@ -1,36 +1,26 @@
|
||||
[bumpversion]
|
||||
current_version = 1.7.0a1
|
||||
parse = (?P<major>[\d]+) # major version number
|
||||
\.(?P<minor>[\d]+) # minor version number
|
||||
\.(?P<patch>[\d]+) # patch version number
|
||||
(?P<prerelease> # optional pre-release - ex: a1, b2, rc25
|
||||
(?P<prekind>a|b|rc) # pre-release type
|
||||
(?P<num>[\d]+) # pre-release version number
|
||||
)?
|
||||
( # optional nightly release indicator
|
||||
\.(?P<nightly>dev[0-9]+) # ex: .dev02142023
|
||||
)? # expected matches: `1.15.0`, `1.5.0a11`, `1.5.0a1.dev123`, `1.5.0.dev123457`, expected failures: `1`, `1.5`, `1.5.2-a1`, `text1.5.0`
|
||||
serialize =
|
||||
{major}.{minor}.{patch}{prekind}{num}.{nightly}
|
||||
{major}.{minor}.{patch}.{nightly}
|
||||
{major}.{minor}.{patch}{prekind}{num}
|
||||
current_version = 0.19.0a1
|
||||
parse = (?P<major>\d+)
|
||||
\.(?P<minor>\d+)
|
||||
\.(?P<patch>\d+)
|
||||
((?P<prerelease>[a-z]+)(?P<num>\d+))?
|
||||
serialize =
|
||||
{major}.{minor}.{patch}{prerelease}{num}
|
||||
{major}.{minor}.{patch}
|
||||
commit = False
|
||||
tag = False
|
||||
|
||||
[bumpversion:part:prekind]
|
||||
[bumpversion:part:prerelease]
|
||||
first_value = a
|
||||
optional_value = final
|
||||
values =
|
||||
values =
|
||||
a
|
||||
b
|
||||
rc
|
||||
final
|
||||
|
||||
[bumpversion:part:num]
|
||||
first_value = 1
|
||||
|
||||
[bumpversion:part:nightly]
|
||||
[bumpversion:file:setup.py]
|
||||
|
||||
[bumpversion:file:core/setup.py]
|
||||
|
||||
@@ -38,10 +28,17 @@ first_value = 1
|
||||
|
||||
[bumpversion:file:plugins/postgres/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/redshift/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/snowflake/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/bigquery/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
|
||||
|
||||
[bumpversion:file:docker/Dockerfile]
|
||||
[bumpversion:file:plugins/redshift/dbt/adapters/redshift/__version__.py]
|
||||
|
||||
[bumpversion:file:tests/adapter/setup.py]
|
||||
[bumpversion:file:plugins/snowflake/dbt/adapters/snowflake/__version__.py]
|
||||
|
||||
[bumpversion:file:plugins/bigquery/dbt/adapters/bigquery/__version__.py]
|
||||
|
||||
[bumpversion:file:tests/adapter/dbt/tests/adapter/__version__.py]
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
## Previous Releases
|
||||
|
||||
For information on prior major and minor releases, see their changelogs:
|
||||
|
||||
|
||||
* [1.6](https://github.com/dbt-labs/dbt-core/blob/1.6.latest/CHANGELOG.md)
|
||||
* [1.5](https://github.com/dbt-labs/dbt-core/blob/1.5.latest/CHANGELOG.md)
|
||||
* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
|
||||
* [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
|
||||
* [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)
|
||||
* [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md)
|
||||
* [1.0](https://github.com/dbt-labs/dbt-core/blob/1.0.latest/CHANGELOG.md)
|
||||
* [0.21](https://github.com/dbt-labs/dbt-core/blob/0.21.latest/CHANGELOG.md)
|
||||
* [0.20](https://github.com/dbt-labs/dbt-core/blob/0.20.latest/CHANGELOG.md)
|
||||
* [0.19](https://github.com/dbt-labs/dbt-core/blob/0.19.latest/CHANGELOG.md)
|
||||
* [0.18](https://github.com/dbt-labs/dbt-core/blob/0.18.latest/CHANGELOG.md)
|
||||
* [0.17](https://github.com/dbt-labs/dbt-core/blob/0.17.latest/CHANGELOG.md)
|
||||
* [0.16](https://github.com/dbt-labs/dbt-core/blob/0.16.latest/CHANGELOG.md)
|
||||
* [0.15](https://github.com/dbt-labs/dbt-core/blob/0.15.latest/CHANGELOG.md)
|
||||
* [0.14](https://github.com/dbt-labs/dbt-core/blob/0.14.latest/CHANGELOG.md)
|
||||
* [0.13](https://github.com/dbt-labs/dbt-core/blob/0.13.latest/CHANGELOG.md)
|
||||
* [0.12](https://github.com/dbt-labs/dbt-core/blob/0.12.latest/CHANGELOG.md)
|
||||
* [0.11 and earlier](https://github.com/dbt-labs/dbt-core/blob/0.11.latest/CHANGELOG.md)
|
||||
@@ -1,53 +0,0 @@
|
||||
# CHANGELOG Automation
|
||||
|
||||
We use [changie](https://changie.dev/) to automate `CHANGELOG` generation. For installation and format/command specifics, see the documentation.
|
||||
|
||||
### Quick Tour
|
||||
|
||||
- All new change entries get generated under `/.changes/unreleased` as a yaml file
|
||||
- `header.tpl.md` contains the contents of the entire CHANGELOG file
|
||||
- `0.0.0.md` contains the contents of the footer for the entire CHANGELOG file. changie looks to be in the process of supporting a footer file the same as it supports a header file. Switch to that when available. For now, the 0.0.0 in the file name forces it to the bottom of the changelog no matter what version we are releasing.
|
||||
- `.changie.yaml` contains the fields in a change, the format of a single change, as well as the format of the Contributors section for each version.
|
||||
|
||||
### Workflow
|
||||
|
||||
#### Daily workflow
|
||||
Almost every code change we make associated with an issue will require a `CHANGELOG` entry. After you have created the PR in GitHub, run `changie new` and follow the command prompts to generate a yaml file with your change details. This only needs to be done once per PR.
|
||||
|
||||
The `changie new` command will ensure correct file format and file name. There is a one to one mapping of issues to changes. Multiple issues cannot be lumped into a single entry. If you make a mistake, the yaml file may be directly modified and saved as long as the format is preserved.
|
||||
|
||||
Note: If your PR has been cleared by the Core Team as not needing a changelog entry, the `Skip Changelog` label may be put on the PR to bypass the GitHub action that blacks PRs from being merged when they are missing a `CHANGELOG` entry.
|
||||
|
||||
#### Prerelease Workflow
|
||||
These commands batch up changes in `/.changes/unreleased` to be included in this prerelease and move those files to a directory named for the release version. The `--move-dir` will be created if it does not exist and is created in `/.changes`.
|
||||
|
||||
```
|
||||
changie batch <version> --move-dir '<version>' --prerelease 'rc1'
|
||||
changie merge
|
||||
```
|
||||
|
||||
Example
|
||||
```
|
||||
changie batch 1.0.5 --move-dir '1.0.5' --prerelease 'rc1'
|
||||
changie merge
|
||||
```
|
||||
|
||||
#### Final Release Workflow
|
||||
These commands batch up changes in `/.changes/unreleased` as well as `/.changes/<version>` to be included in this final release and delete all prereleases. This rolls all prereleases up into a single final release. All `yaml` files in `/unreleased` and `<version>` will be deleted at this point.
|
||||
|
||||
```
|
||||
changie batch <version> --include '<version>' --remove-prereleases
|
||||
changie merge
|
||||
```
|
||||
|
||||
Example
|
||||
```
|
||||
changie batch 1.0.5 --include '1.0.5' --remove-prereleases
|
||||
changie merge
|
||||
```
|
||||
|
||||
### A Note on Manual Edits & Gotchas
|
||||
- Changie generates markdown files in the `.changes` directory that are parsed together with the `changie merge` command. Every time `changie merge` is run, it regenerates the entire file. For this reason, any changes made directly to `CHANGELOG.md` will be overwritten on the next run of `changie merge`.
|
||||
- If changes need to be made to the `CHANGELOG.md`, make the changes to the relevant `<version>.md` file located in the `/.changes` directory. You will then run `changie merge` to regenerate the `CHANGELOG.MD`.
|
||||
- Do not run `changie batch` again on released versions. Our final release workflow deletes all of the yaml files associated with individual changes. If for some reason modifications to the `CHANGELOG.md` are required after we've generated the final release `CHANGELOG.md`, the modifications need to be done manually to the `<version>.md` file in the `/.changes` directory.
|
||||
- changie can modify, create and delete files depending on the command you run. This is expected. Be sure to commit everything that has been modified and deleted.
|
||||
@@ -1,6 +0,0 @@
|
||||
# dbt Core Changelog
|
||||
|
||||
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
|
||||
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
|
||||
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
|
||||
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Dependencies
|
||||
body: Pin click<9 + sqlparse<0.5
|
||||
time: 2023-07-19T12:37:43.716495+02:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
PR: "8146"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Docs
|
||||
body: Fix for column tests not rendering on quoted columns
|
||||
time: 2023-05-31T11:54:19.687363-04:00
|
||||
custom:
|
||||
Author: drewbanin
|
||||
Issue: "201"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Docs
|
||||
body: Remove static SQL codeblock for metrics
|
||||
time: 2023-07-18T19:24:22.155323+02:00
|
||||
custom:
|
||||
Author: marcodamore
|
||||
Issue: "436"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Enable converting deprecation warnings to errors
|
||||
time: 2023-07-18T12:55:18.03914-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "8130"
|
||||
140
.changie.yaml
140
.changie.yaml
@@ -1,140 +0,0 @@
|
||||
changesDir: .changes
|
||||
unreleasedDir: unreleased
|
||||
headerPath: header.tpl.md
|
||||
versionHeaderPath: ""
|
||||
changelogPath: CHANGELOG.md
|
||||
versionExt: md
|
||||
envPrefix: "CHANGIE_"
|
||||
versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}'
|
||||
kindFormat: '### {{.Kind}}'
|
||||
changeFormat: |-
|
||||
{{- $IssueList := list }}
|
||||
{{- $changes := splitList " " $.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
|
||||
kinds:
|
||||
- label: Breaking Changes
|
||||
- label: Features
|
||||
- label: Fixes
|
||||
- label: Docs
|
||||
changeFormat: |-
|
||||
{{- $IssueList := list }}
|
||||
{{- $changes := splitList " " $.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
- label: Under the Hood
|
||||
- label: Dependencies
|
||||
changeFormat: |-
|
||||
{{- $PRList := list }}
|
||||
{{- $changes := splitList " " $.Custom.PR }}
|
||||
{{- range $pullrequest := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
|
||||
{{- $PRList = append $PRList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
skipGlobalChoices: true
|
||||
additionalChoices:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
- label: Security
|
||||
changeFormat: |-
|
||||
{{- $PRList := list }}
|
||||
{{- $changes := splitList " " $.Custom.PR }}
|
||||
{{- range $pullrequest := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
|
||||
{{- $PRList = append $PRList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
skipGlobalChoices: true
|
||||
additionalChoices:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
|
||||
newlines:
|
||||
afterChangelogHeader: 1
|
||||
afterKind: 1
|
||||
afterChangelogVersion: 1
|
||||
beforeKind: 1
|
||||
endOfVersion: 1
|
||||
|
||||
custom:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: Issue
|
||||
label: GitHub Issue Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
|
||||
footerFormat: |
|
||||
{{- $contributorDict := dict }}
|
||||
{{- /* ensure all names in this list are all lowercase for later matching purposes */}}
|
||||
{{- $core_team := splitList " " .Env.CORE_TEAM }}
|
||||
{{- /* ensure we always skip snyk and dependabot in addition to the core team */}}
|
||||
{{- $maintainers := list "dependabot[bot]" "snyk-bot"}}
|
||||
{{- range $team_member := $core_team }}
|
||||
{{- $team_member_lower := lower $team_member }}
|
||||
{{- $maintainers = append $maintainers $team_member_lower }}
|
||||
{{- end }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $authorList := splitList " " $change.Custom.Author }}
|
||||
{{- /* loop through all authors for a single changelog */}}
|
||||
{{- range $author := $authorList }}
|
||||
{{- $authorLower := lower $author }}
|
||||
{{- /* we only want to include non-core team contributors */}}
|
||||
{{- if not (has $authorLower $maintainers)}}
|
||||
{{- $changeList := splitList " " $change.Custom.Author }}
|
||||
{{- $IssueList := list }}
|
||||
{{- $changeLink := $change.Kind }}
|
||||
{{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
|
||||
{{- $changes := splitList " " $change.Custom.PR }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
{{- $changes := splitList " " $change.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- /* check if this contributor has other changes associated with them already */}}
|
||||
{{- if hasKey $contributorDict $author }}
|
||||
{{- $contributionList := get $contributorDict $author }}
|
||||
{{- $contributionList = concat $contributionList $IssueList }}
|
||||
{{- $contributorDict := set $contributorDict $author $contributionList }}
|
||||
{{- else }}
|
||||
{{- $contributionList := $IssueList }}
|
||||
{{- $contributorDict := set $contributorDict $author $contributionList }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}}
|
||||
{{- if $contributorDict}}
|
||||
### Contributors
|
||||
{{- range $k,$v := $contributorDict }}
|
||||
- [@{{$k}}](https://github.com/{{$k}}) ({{ range $index, $element := $v }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
163
.circleci/config.yml
Normal file
163
.circleci/config.yml
Normal file
@@ -0,0 +1,163 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
unit:
|
||||
docker: &test_only
|
||||
- image: fishtownanalytics/test-container:9
|
||||
environment:
|
||||
DBT_INVOCATION_ENV: circle
|
||||
steps:
|
||||
- checkout
|
||||
- run: tox -e flake8,mypy,unit-py36,unit-py38
|
||||
build-wheels:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Build wheels
|
||||
command: |
|
||||
python3.8 -m venv "${PYTHON_ENV}"
|
||||
export PYTHON_BIN="${PYTHON_ENV}/bin/python"
|
||||
$PYTHON_BIN -m pip install -U pip setuptools
|
||||
$PYTHON_BIN -m pip install -r requirements.txt
|
||||
$PYTHON_BIN -m pip install -r dev_requirements.txt
|
||||
/bin/bash ./scripts/build-wheels.sh
|
||||
$PYTHON_BIN ./scripts/collect-dbt-contexts.py > ./dist/context_metadata.json
|
||||
$PYTHON_BIN ./scripts/collect-artifact-schema.py > ./dist/artifact_schemas.json
|
||||
environment:
|
||||
PYTHON_ENV: /home/tox/build_venv/
|
||||
- store_artifacts:
|
||||
path: ./dist
|
||||
destination: dist
|
||||
integration-postgres-py36:
|
||||
docker: &test_and_postgres
|
||||
- image: fishtownanalytics/test-container:9
|
||||
environment:
|
||||
DBT_INVOCATION_ENV: circle
|
||||
- image: postgres
|
||||
name: database
|
||||
environment: &pgenv
|
||||
POSTGRES_USER: "root"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
POSTGRES_DB: "dbt"
|
||||
steps:
|
||||
- checkout
|
||||
- run: &setupdb
|
||||
name: Setup postgres
|
||||
command: bash test/setup_db.sh
|
||||
environment:
|
||||
PGHOST: database
|
||||
PGUSER: root
|
||||
PGPASSWORD: password
|
||||
PGDATABASE: postgres
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-postgres-py36
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-snowflake-py36:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-snowflake-py36
|
||||
no_output_timeout: 1h
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-redshift-py36:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-redshift-py36
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-bigquery-py36:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-bigquery-py36
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-postgres-py38:
|
||||
docker: *test_and_postgres
|
||||
steps:
|
||||
- checkout
|
||||
- run: *setupdb
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-postgres-py38
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-snowflake-py38:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-snowflake-py38
|
||||
no_output_timeout: 1h
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-redshift-py38:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-redshift-py38
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-bigquery-py38:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-bigquery-py38
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-everything:
|
||||
jobs:
|
||||
- unit
|
||||
- integration-postgres-py36:
|
||||
requires:
|
||||
- unit
|
||||
- integration-redshift-py36:
|
||||
requires:
|
||||
- integration-postgres-py36
|
||||
- integration-bigquery-py36:
|
||||
requires:
|
||||
- integration-postgres-py36
|
||||
- integration-snowflake-py36:
|
||||
requires:
|
||||
- integration-postgres-py36
|
||||
- integration-postgres-py38:
|
||||
requires:
|
||||
- unit
|
||||
- integration-redshift-py38:
|
||||
requires:
|
||||
- integration-postgres-py38
|
||||
- integration-bigquery-py38:
|
||||
requires:
|
||||
- integration-postgres-py38
|
||||
- integration-snowflake-py38:
|
||||
requires:
|
||||
- integration-postgres-py38
|
||||
- build-wheels:
|
||||
requires:
|
||||
- unit
|
||||
- integration-postgres-py36
|
||||
- integration-redshift-py36
|
||||
- integration-bigquery-py36
|
||||
- integration-snowflake-py36
|
||||
- integration-postgres-py38
|
||||
- integration-redshift-py38
|
||||
- integration-bigquery-py38
|
||||
- integration-snowflake-py38
|
||||
12
.flake8
12
.flake8
@@ -1,12 +0,0 @@
|
||||
[flake8]
|
||||
select =
|
||||
E
|
||||
W
|
||||
F
|
||||
ignore =
|
||||
W503 # makes Flake8 work like black
|
||||
W504
|
||||
E203 # makes Flake8 work like black
|
||||
E741
|
||||
E501 # long line checking is done in black
|
||||
exclude = test/
|
||||
@@ -1,2 +0,0 @@
|
||||
# Reformatting dbt-core via black, flake8, mypy, and assorted pre-commit hooks.
|
||||
43e3fc22c4eae4d3d901faba05e33c40f1f1dc5a
|
||||
6
.gitattributes
vendored
6
.gitattributes
vendored
@@ -1,6 +0,0 @@
|
||||
core/dbt/include/index.html binary
|
||||
tests/functional/artifacts/data/state/*/manifest.json binary
|
||||
core/dbt/docs/build/html/searchindex.js binary
|
||||
core/dbt/docs/build/html/index.html binary
|
||||
performance/runner/Cargo.lock binary
|
||||
core/dbt/events/types_pb2.py binary
|
||||
60
.github/CODEOWNERS
vendored
60
.github/CODEOWNERS
vendored
@@ -1,60 +0,0 @@
|
||||
# This file contains the code owners for the dbt-core repo.
|
||||
# PRs will be automatically assigned for review to the associated
|
||||
# team(s) or person(s) that touches any files that are mapped to them.
|
||||
#
|
||||
# A statement takes precedence over the statements above it so more general
|
||||
# assignments are found at the top with specific assignments being lower in
|
||||
# the ordering (i.e. catch all assignment should be the first item)
|
||||
#
|
||||
# Consult GitHub documentation for formatting guidelines:
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#example-of-a-codeowners-file
|
||||
|
||||
# As a default for areas with no assignment,
|
||||
# the core team as a whole will be assigned
|
||||
* @dbt-labs/core-team
|
||||
|
||||
### OSS Tooling Guild
|
||||
|
||||
/.github/ @dbt-labs/guild-oss-tooling
|
||||
.bumpversion.cfg @dbt-labs/guild-oss-tooling
|
||||
|
||||
.changie.yaml @dbt-labs/guild-oss-tooling
|
||||
|
||||
pre-commit-config.yaml @dbt-labs/guild-oss-tooling
|
||||
pytest.ini @dbt-labs/guild-oss-tooling
|
||||
tox.ini @dbt-labs/guild-oss-tooling
|
||||
|
||||
pyproject.toml @dbt-labs/guild-oss-tooling
|
||||
requirements.txt @dbt-labs/guild-oss-tooling
|
||||
dev_requirements.txt @dbt-labs/guild-oss-tooling
|
||||
/core/setup.py @dbt-labs/guild-oss-tooling
|
||||
/core/MANIFEST.in @dbt-labs/guild-oss-tooling
|
||||
|
||||
### ADAPTERS
|
||||
|
||||
# Adapter interface ("base" + "sql" adapter defaults, cache)
|
||||
/core/dbt/adapters @dbt-labs/core-adapters
|
||||
|
||||
# Global project (default macros + materializations), starter project
|
||||
/core/dbt/include @dbt-labs/core-adapters
|
||||
|
||||
# Postgres plugin
|
||||
/plugins/ @dbt-labs/core-adapters
|
||||
/plugins/postgres/setup.py @dbt-labs/core-adapters @dbt-labs/guild-oss-tooling
|
||||
|
||||
# Functional tests for adapter plugins
|
||||
/tests/adapter @dbt-labs/core-adapters
|
||||
|
||||
### TESTS
|
||||
|
||||
# Overlapping ownership for vast majority of unit + functional tests
|
||||
|
||||
# Perf regression testing framework
|
||||
# This excludes the test project files itself since those aren't specific
|
||||
# framework changes (excluded by not setting an owner next to it- no owner)
|
||||
/performance @nathaniel-may
|
||||
/performance/projects
|
||||
|
||||
### ARTIFACTS
|
||||
|
||||
/schemas/dbt @dbt-labs/cloud-artifacts
|
||||
97
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
97
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: 🐞 Bug
|
||||
description: Report a bug or an issue you've found with dbt
|
||||
title: "[Bug] <title>"
|
||||
labels: ["bug", "triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a new bug in dbt-core?
|
||||
description: >
|
||||
In other words, is this an error, flaw, failure or fault in our software?
|
||||
|
||||
If this is a bug that broke existing functionality that used to work, please open a regression issue.
|
||||
If this is a bug in an adapter plugin, please open an issue in the adapter's repository.
|
||||
If this is a bug experienced while using dbt Cloud, please report to [support](mailto:support@getdbt.com).
|
||||
If this is a request for help or troubleshooting code in your own dbt project, please join our [dbt Community Slack](https://www.getdbt.com/community/join-the-community/) or open a [Discussion question](https://github.com/dbt-labs/docs.getdbt.com/discussions).
|
||||
|
||||
Please search to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I believe this is a new bug in dbt-core
|
||||
required: true
|
||||
- label: I have searched the existing issues, and I could not find an existing issue for this bug
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: A concise description of what you're experiencing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: Steps to reproduce the behavior.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
2. With this config...
|
||||
3. Run '...'
|
||||
4. See error...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
If applicable, log output to help explain your problem.
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Python**: 3.9.12 (`python3 --version`)
|
||||
- **dbt-core**: 1.1.1 (`dbt --version`)
|
||||
value: |
|
||||
- OS:
|
||||
- Python:
|
||||
- dbt:
|
||||
render: markdown
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: database
|
||||
attributes:
|
||||
label: Which database adapter are you using with dbt?
|
||||
description: If the bug is specific to the database or adapter, please open the issue in that adapter's repository instead
|
||||
multiple: true
|
||||
options:
|
||||
- postgres
|
||||
- redshift
|
||||
- snowflake
|
||||
- bigquery
|
||||
- spark
|
||||
- other (mention it in "Additional Context")
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug or an issue you've found with dbt
|
||||
title: ''
|
||||
labels: bug, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Describe the bug
|
||||
A clear and concise description of what the bug is. What command did you run? What happened?
|
||||
|
||||
### Steps To Reproduce
|
||||
In as much detail as possible, please provide steps to reproduce the issue. Sample data that triggers the issue, example model code, etc is all very helpful here.
|
||||
|
||||
### Expected behavior
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
### Screenshots and log output
|
||||
If applicable, add screenshots or log output to help explain your problem.
|
||||
|
||||
### System information
|
||||
**Which database are you using dbt with?**
|
||||
- [ ] postgres
|
||||
- [ ] redshift
|
||||
- [ ] bigquery
|
||||
- [ ] snowflake
|
||||
- [ ] other (specify: ____________)
|
||||
|
||||
|
||||
**The output of `dbt --version`:**
|
||||
```
|
||||
<output goes here>
|
||||
```
|
||||
|
||||
**The operating system you're using:**
|
||||
|
||||
**The output of `python --version`:**
|
||||
|
||||
### Additional context
|
||||
Add any other context about the problem here.
|
||||
23
.github/ISSUE_TEMPLATE/config.yml
vendored
23
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,23 +0,0 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Ask the community for help
|
||||
url: https://github.com/dbt-labs/docs.getdbt.com/discussions
|
||||
about: Need help troubleshooting? Check out our guide on how to ask
|
||||
- name: Contact dbt Cloud support
|
||||
url: mailto:support@getdbt.com
|
||||
about: Are you using dbt Cloud? Contact our support team for help!
|
||||
- name: Participate in Discussions
|
||||
url: https://github.com/dbt-labs/dbt-core/discussions
|
||||
about: Do you have a Big Idea for dbt? Read open discussions, or start a new one
|
||||
- name: Create an issue for dbt-redshift
|
||||
url: https://github.com/dbt-labs/dbt-redshift/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-redshift
|
||||
- name: Create an issue for dbt-bigquery
|
||||
url: https://github.com/dbt-labs/dbt-bigquery/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-bigquery
|
||||
- name: Create an issue for dbt-snowflake
|
||||
url: https://github.com/dbt-labs/dbt-snowflake/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-snowflake
|
||||
- name: Create an issue for dbt-spark
|
||||
url: https://github.com/dbt-labs/dbt-spark/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-spark
|
||||
59
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
59
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: ✨ Feature
|
||||
description: Propose a straightforward extension of dbt functionality
|
||||
title: "[Feature] <title>"
|
||||
labels: ["enhancement", "triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this feature request!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this your first time submitting a feature request?
|
||||
description: >
|
||||
We want to make sure that features are distinct and discoverable,
|
||||
so that other members of the community can find them and offer their thoughts.
|
||||
|
||||
Issues are the right place to request straightforward extensions of existing dbt functionality.
|
||||
For "big ideas" about future capabilities of dbt, we ask that you open a
|
||||
[discussion](https://github.com/dbt-labs/dbt-core/discussions) in the "Ideas" category instead.
|
||||
options:
|
||||
- label: I have read the [expectations for open source contributors](https://docs.getdbt.com/docs/contributing/oss-expectations)
|
||||
required: true
|
||||
- label: I have searched the existing issues, and I could not find an existing issue for this feature
|
||||
required: true
|
||||
- label: I am requesting a straightforward extension of existing dbt functionality, rather than a Big Idea better suited to a discussion
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the feature
|
||||
description: A clear and concise description of what you want to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe alternatives you've considered
|
||||
description: |
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Who will this benefit?
|
||||
description: |
|
||||
What kind of use case will this feature be useful for? Please be specific and provide examples, this will help us prioritize properly.
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
attributes:
|
||||
label: Are you interested in contributing this feature?
|
||||
description: Let us know if you want to write some code, and how we can help.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the feature you are suggesting!
|
||||
validations:
|
||||
required: false
|
||||
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for dbt
|
||||
title: ''
|
||||
labels: enhancement, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Describe the feature
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Describe alternatives you've considered
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
### Additional context
|
||||
Is this feature database-specific? Which database(s) is/are relevant? Please include any other relevant context here.
|
||||
|
||||
### Who will this benefit?
|
||||
What kind of use case will this feature be useful for? Please be specific and provide examples, this will help us prioritize properly.
|
||||
|
||||
### Are you interested in contributing this feature?
|
||||
Let us know if you want to write some code, and how we can help.
|
||||
93
.github/ISSUE_TEMPLATE/regression-report.yml
vendored
93
.github/ISSUE_TEMPLATE/regression-report.yml
vendored
@@ -1,93 +0,0 @@
|
||||
name: ☣️ Regression
|
||||
description: Report a regression you've observed in a newer version of dbt
|
||||
title: "[Regression] <title>"
|
||||
labels: ["bug", "regression", "triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this regression report!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a regression in a recent version of dbt-core?
|
||||
description: >
|
||||
A regression is when documented functionality works as expected in an older version of dbt-core,
|
||||
and no longer works after upgrading to a newer version of dbt-core
|
||||
options:
|
||||
- label: I believe this is a regression in dbt-core functionality
|
||||
required: true
|
||||
- label: I have searched the existing issues, and I could not find an existing issue for this regression
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: A concise description of what you're experiencing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected/Previous Behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: Steps to reproduce the behavior.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
2. With this config...
|
||||
3. Run '...'
|
||||
4. See error...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
If applicable, log output to help explain your problem.
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Python**: 3.9.12 (`python3 --version`)
|
||||
- **dbt-core (working version)**: 1.1.1 (`dbt --version`)
|
||||
- **dbt-core (regression version)**: 1.2.0 (`dbt --version`)
|
||||
value: |
|
||||
- OS:
|
||||
- Python:
|
||||
- dbt (working version):
|
||||
- dbt (regression version):
|
||||
render: markdown
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: database
|
||||
attributes:
|
||||
label: Which database adapter are you using with dbt?
|
||||
description: If the regression is specific to the database or adapter, please open the issue in that adapter's repository instead
|
||||
multiple: true
|
||||
options:
|
||||
- postgres
|
||||
- redshift
|
||||
- snowflake
|
||||
- bigquery
|
||||
- spark
|
||||
- other (mention it in "Additional Context")
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
216
.github/_README.md
vendored
216
.github/_README.md
vendored
@@ -1,216 +0,0 @@
|
||||
<!-- GitHub will publish this readme on the main repo page if the name is `README.md` so we've added the leading underscore to prevent this -->
|
||||
<!-- Do not rename this file `README.md` -->
|
||||
<!-- See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-readmes -->
|
||||
|
||||
## What are GitHub Actions?
|
||||
|
||||
GitHub Actions are used for many different purposes. We use them to run tests in CI, validate PRs are in an expected state, and automate processes.
|
||||
|
||||
- [Overview of GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions)
|
||||
- [What's a workflow?](https://docs.github.com/en/actions/using-workflows/about-workflows)
|
||||
- [GitHub Actions guides](https://docs.github.com/en/actions/guides)
|
||||
|
||||
___
|
||||
|
||||
## Where do actions and workflows live
|
||||
|
||||
We try to maintain actions that are shared across repositories in a single place so that necesary changes can be made in a single place.
|
||||
|
||||
[dbt-labs/actions](https://github.com/dbt-labs/actions/) is the central repository of actions and workflows we use across repositories.
|
||||
|
||||
GitHub Actions also live locally within a repository. The workflows can be found at `.github/workflows` from the root of the repository. These should be specific to that code base.
|
||||
|
||||
Note: We are actively moving actions into the central Action repository so there is currently some duplication across repositories.
|
||||
|
||||
___
|
||||
|
||||
## Basics of Using Actions
|
||||
|
||||
### Viewing Output
|
||||
|
||||
- View the detailed action output for your PR in the **Checks** tab of the PR. This only shows the most recent run. You can also view high level **Checks** output at the bottom on the PR.
|
||||
|
||||
- View _all_ action output for a repository from the [**Actions**](https://github.com/dbt-labs/dbt-core/actions) tab. Workflow results last 1 year. Artifacts last 90 days, unless specified otherwise in individual workflows.
|
||||
|
||||
This view often shows what seem like duplicates of the same workflow. This occurs when files are renamed but the workflow name has not changed. These are in fact _not_ duplicates.
|
||||
|
||||
You can see the branch the workflow runs from in this view. It is listed in the table between the workflow name and the time/duration of the run. When blank, the workflow is running in the context of the `main` branch.
|
||||
|
||||
### How to view what workflow file is being referenced from a run
|
||||
|
||||
- When viewing the output of a specific workflow run, click the 3 dots at the top right of the display. There will be an option to `View workflow file`.
|
||||
|
||||
### How to manually run a workflow
|
||||
|
||||
- If a workflow has the `on: workflow_dispatch` trigger, it can be manually triggered
|
||||
- From the [**Actions**](https://github.com/dbt-labs/dbt-core/actions) tab, find the workflow you want to run, select it and fill in any inputs requied. That's it!
|
||||
|
||||
### How to re-run jobs
|
||||
|
||||
- Some actions cannot be rerun in the GitHub UI. Namely the snyk checks and the cla check. Snyk checks are rerun by closing and reopening the PR. You can retrigger the cla check by commenting on the PR with `@cla-bot check`
|
||||
|
||||
___
|
||||
|
||||
## General Standards
|
||||
|
||||
### Permissions
|
||||
- By default, workflows have read permissions in the repository for the contents scope only when no permissions are explicitly set.
|
||||
- It is best practice to always define the permissions explicitly. This will allow actions to continue to work when the default permissions on the repository are changed. It also allows explicit grants of the least permissions possible.
|
||||
- There are a lot of permissions available. [Read up on them](https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs) if you're unsure what to use.
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
```
|
||||
|
||||
### Secrets
|
||||
- When to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) vs the [GITHUB_TOKEN](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) generated for the action?
|
||||
|
||||
The `GITHUB_TOKEN` is used by default. In most cases it is sufficient for what you need.
|
||||
|
||||
If you expect the workflow to result in a commit to that should retrigger workflows, you will need to use a Personal Access Token for the bot to commit the file. When using the GITHUB_TOKEN, the resulting commit will not trigger another GitHub Actions Workflow run. This is due to limitations set by GitHub. See [the docs](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) for a more detailed explanation.
|
||||
|
||||
For example, we must use a PAT in our workflow to commit a new changelog yaml file for bot PRs. Once the file has been committed to the branch, it should retrigger the check to validate that a changelog exists on the PR. Otherwise, it would stay in a failed state since the check would never retrigger.
|
||||
|
||||
### Triggers
|
||||
You can configure your workflows to run when specific activity on GitHub happens, at a scheduled time, or when an event outside of GitHub occurs. Read more details in the [GitHub docs](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows).
|
||||
|
||||
These triggers are under the `on` key of the workflow and more than one can be listed.
|
||||
|
||||
```yaml
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
# catch when the PR is opened with the label or when the label is added
|
||||
types: [opened, labeled]
|
||||
workflow_dispatch:
|
||||
```
|
||||
|
||||
Some triggers of note that we use:
|
||||
|
||||
- `push` - Runs your workflow when you push a commit or tag.
|
||||
- `pull_request` - Runs your workflow when activity on a pull request in the workflow's repository occurs. Takes in a list of activity types (opened, labeled, etc) if appropriate.
|
||||
- `pull_request_target` - Same as `pull_request` but runs in the context of the PR target branch.
|
||||
- `workflow_call` - used with reusable workflows. Triggered by another workflow calling it.
|
||||
- `workflow_dispatch` - Gives the ability to manually trigger a workflow from the GitHub API, GitHub CLI, or GitHub browser interface.
|
||||
|
||||
|
||||
### Basic Formatting
|
||||
- Add a description of what your workflow does at the top in this format
|
||||
|
||||
```
|
||||
# **what?**
|
||||
# Describe what the action does.
|
||||
|
||||
# **why?**
|
||||
# Why does this action exist?
|
||||
|
||||
# **when?**
|
||||
# How/when will it be triggered?
|
||||
```
|
||||
|
||||
- Leave blank lines between steps and jobs
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
dependency_changelog:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get File Name Timestamp
|
||||
id: filename_time
|
||||
uses: nanzm/get-time-action@v1.1
|
||||
with:
|
||||
format: 'YYYYMMDD-HHmmss'
|
||||
|
||||
- name: Get File Content Timestamp
|
||||
id: file_content_time
|
||||
uses: nanzm/get-time-action@v1.1
|
||||
with:
|
||||
format: 'YYYY-MM-DDTHH:mm:ss.000000-05:00'
|
||||
|
||||
- name: Generate Filepath
|
||||
id: fp
|
||||
run: |
|
||||
FILEPATH=.changes/unreleased/Dependencies-${{ steps.filename_time.outputs.time }}.yaml
|
||||
echo "FILEPATH=$FILEPATH" >> $GITHUB_OUTPUT
|
||||
```
|
||||
|
||||
- Print out all variables you will reference as the first step of a job. This allows for easier debugging. The first job should log all inputs. Subsequent jobs should reference outputs of other jobs, if present.
|
||||
|
||||
When possible, generate variables at the top of your workflow in a single place to reference later. This is not always strictly possible since you may generate a value to be used later mid-workflow.
|
||||
|
||||
Be sure to use quotes around these logs so special characters are not interpreted.
|
||||
|
||||
```yaml
|
||||
job1:
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo "all variables defined as inputs"
|
||||
echo "The last commit sha in the release: ${{ inputs.sha }}"
|
||||
echo "The release version number: ${{ inputs.version_number }}"
|
||||
echo "The changelog_path: ${{ inputs.changelog_path }}"
|
||||
echo "The build_script_path: ${{ inputs.build_script_path }}"
|
||||
echo "The s3_bucket_name: ${{ inputs.s3_bucket_name }}"
|
||||
echo "The package_test_command: ${{ inputs.package_test_command }}"
|
||||
|
||||
# collect all the variables that need to be used in subsequent jobs
|
||||
- name: Set Variables
|
||||
id: variables
|
||||
run: |
|
||||
echo "important_path='performance/runner/Cargo.toml'" >> $GITHUB_OUTPUT
|
||||
echo "release_id=${{github.event.inputs.release_id}}" >> $GITHUB_OUTPUT
|
||||
echo "open_prs=${{github.event.inputs.open_prs}}" >> $GITHUB_OUTPUT
|
||||
|
||||
job2:
|
||||
needs: [job1]
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo "all variables defined in job1 > Set Variables > outputs"
|
||||
echo "important_path: ${{ needs.job1.outputs.important_path }}"
|
||||
echo "release_id: ${{ needs.job1.outputs.release_id }}"
|
||||
echo "open_prs: ${{ needs.job1.outputs.open_prs }}"
|
||||
```
|
||||
|
||||
- When it's not obvious what something does, add a comment!
|
||||
|
||||
___
|
||||
|
||||
## Tips
|
||||
|
||||
### Context
|
||||
- The [GitHub CLI](https://cli.github.com/) is available in the default runners
|
||||
- Actions run in your context. ie, using an action from the marketplace that uses the GITHUB_TOKEN uses the GITHUB_TOKEN generated by your workflow run.
|
||||
|
||||
### Actions from the Marketplace
|
||||
- Don’t use external actions for things that can easily be accomplished manually.
|
||||
- Always read through what an external action does before using it! Often an action in the GitHub Actions Marketplace can be replaced with a few lines in bash. This is much more maintainable (and won’t change under us) and clear as to what’s actually happening. It also prevents any
|
||||
- Pin actions _we don't control_ to tags.
|
||||
|
||||
### Connecting to AWS
|
||||
- Authenticate with the aws managed workflow
|
||||
|
||||
```yaml
|
||||
- name: Configure AWS credentials from Test account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
```
|
||||
|
||||
- Then access with the aws command that comes installed on the action runner machines
|
||||
|
||||
```yaml
|
||||
- name: Copy Artifacts from S3 via CLI
|
||||
run: aws s3 cp ${{ env.s3_bucket }} . --recursive
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
- Depending on what your action does, you may be able to use [`act`](https://github.com/nektos/act) to test the action locally. Some features of GitHub Actions do not work with `act`, among those are reusable workflows. If you can't use `act`, you'll have to push your changes up before being able to test. This can be slow.
|
||||
14
.github/actions/latest-wrangler/Dockerfile
vendored
14
.github/actions/latest-wrangler/Dockerfile
vendored
@@ -1,14 +0,0 @@
|
||||
FROM python:3-slim AS builder
|
||||
ADD . /app
|
||||
WORKDIR /app
|
||||
|
||||
# We are installing a dependency here directly into our app source dir
|
||||
RUN pip install --target=/app requests packaging
|
||||
|
||||
# A distroless container image with Python and some basics like SSL certificates
|
||||
# https://github.com/GoogleContainerTools/distroless
|
||||
FROM gcr.io/distroless/python3-debian10
|
||||
COPY --from=builder /app /app
|
||||
WORKDIR /app
|
||||
ENV PYTHONPATH /app
|
||||
CMD ["/app/main.py"]
|
||||
50
.github/actions/latest-wrangler/README.md
vendored
50
.github/actions/latest-wrangler/README.md
vendored
@@ -1,50 +0,0 @@
|
||||
# Github package 'latest' tag wrangler for containers
|
||||
## Usage
|
||||
|
||||
Plug in the necessary inputs to determine if the container being built should be tagged 'latest; at the package level, for example `dbt-redshift:latest`.
|
||||
|
||||
## Inputs
|
||||
| Input | Description |
|
||||
| - | - |
|
||||
| `package` | Name of the GH package to check against |
|
||||
| `new_version` | Semver of new container |
|
||||
| `gh_token` | GH token with package read scope|
|
||||
| `halt_on_missing` | Return non-zero exit code if requested package does not exist. (defaults to false)|
|
||||
|
||||
|
||||
## Outputs
|
||||
| Output | Description |
|
||||
| - | - |
|
||||
| `latest` | Wether or not the new container should be tagged 'latest'|
|
||||
| `minor_latest` | Wether or not the new container should be tagged major.minor.latest |
|
||||
|
||||
## Example workflow
|
||||
```yaml
|
||||
name: Ship it!
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to publish
|
||||
required: true
|
||||
version_number:
|
||||
description: The version number
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Wrangle latest tag
|
||||
id: is_latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.new_version }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Print the results
|
||||
run: |
|
||||
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
|
||||
echo "Is it minor.latest? Survey says: ${{ steps.is_latest.outputs.minor_latest }} !"
|
||||
```
|
||||
20
.github/actions/latest-wrangler/action.yml
vendored
20
.github/actions/latest-wrangler/action.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: "Github package 'latest' tag wrangler for containers"
|
||||
description: "Determines wether or not a given dbt container should be given a bare 'latest' tag (I.E. dbt-core:latest)"
|
||||
inputs:
|
||||
package_name:
|
||||
description: "Package to check (I.E. dbt-core, dbt-redshift, etc)"
|
||||
required: true
|
||||
new_version:
|
||||
description: "Semver of the container being built (I.E. 1.0.4)"
|
||||
required: true
|
||||
gh_token:
|
||||
description: "Auth token for github (must have view packages scope)"
|
||||
required: true
|
||||
outputs:
|
||||
latest:
|
||||
description: "Wether or not built container should be tagged latest (bool)"
|
||||
minor_latest:
|
||||
description: "Wether or not built container should be tagged minor.latest (bool)"
|
||||
runs:
|
||||
using: "docker"
|
||||
image: "Dockerfile"
|
||||
@@ -1,26 +0,0 @@
|
||||
name: Ship it!
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to publish
|
||||
required: true
|
||||
version_number:
|
||||
description: The version number
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Wrangle latest tag
|
||||
id: is_latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.new_version }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Print the results
|
||||
run: |
|
||||
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-redshift"
|
||||
}
|
||||
}
|
||||
98
.github/actions/latest-wrangler/main.py
vendored
98
.github/actions/latest-wrangler/main.py
vendored
@@ -1,98 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
from distutils.util import strtobool
|
||||
from typing import Union
|
||||
from packaging.version import parse, Version
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# get inputs
|
||||
package = os.environ["INPUT_PACKAGE"]
|
||||
new_version = parse(os.environ["INPUT_NEW_VERSION"])
|
||||
gh_token = os.environ["INPUT_GH_TOKEN"]
|
||||
halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
|
||||
|
||||
# get package metadata from github
|
||||
package_request = requests.get(
|
||||
f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
|
||||
auth=("", gh_token),
|
||||
)
|
||||
package_meta = package_request.json()
|
||||
|
||||
# Log info if we don't get a 200
|
||||
if package_request.status_code != 200:
|
||||
print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
|
||||
|
||||
# Make an early exit if there is no matching package in github
|
||||
if package_request.status_code == 404:
|
||||
if halt_on_missing:
|
||||
sys.exit(1)
|
||||
# everything is the latest if the package doesn't exist
|
||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
||||
with open(github_output, "at", encoding="utf-8") as gh_output:
|
||||
gh_output.write("latest=True")
|
||||
gh_output.write("minor_latest=True")
|
||||
sys.exit(0)
|
||||
|
||||
# TODO: verify package meta is "correct"
|
||||
# https://github.com/dbt-labs/dbt-core/issues/4640
|
||||
|
||||
# map versions and tags
|
||||
version_tag_map = {
|
||||
version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
|
||||
}
|
||||
|
||||
# is pre-release
|
||||
pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
|
||||
|
||||
# semver of current latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if "latest" in tags:
|
||||
# N.B. This seems counterintuitive, but we expect any version tagged
|
||||
# 'latest' to have exactly three associated tags:
|
||||
# latest, major.minor.latest, and major.minor.patch.
|
||||
# Subtracting everything that contains the string 'latest' gets us
|
||||
# the major.minor.patch which is what's needed for comparison.
|
||||
current_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_latest = False
|
||||
|
||||
# semver of current_minor_latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if f"{new_version.major}.{new_version.minor}.latest" in tags:
|
||||
# Similar to above, only now we expect exactly two tags:
|
||||
# major.minor.patch and major.minor.latest
|
||||
current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_minor_latest = False
|
||||
|
||||
def is_latest(
|
||||
pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
|
||||
) -> bool:
|
||||
"""Determine if a given contaier should be tagged 'latest' based on:
|
||||
- it's pre-release status
|
||||
- it's version
|
||||
- the version of a previously identified container tagged 'latest'
|
||||
|
||||
:param pre_rel: Wether or not the version of the new container is a pre-release
|
||||
:param new_version: The version of the new container
|
||||
:param remote_latest: The version of the previously identified container that's
|
||||
already tagged latest or False
|
||||
"""
|
||||
# is a pre-release = not latest
|
||||
if pre_rel:
|
||||
return False
|
||||
# + no latest tag found = is latest
|
||||
if not remote_latest:
|
||||
return True
|
||||
# + if remote version is lower than current = is latest, else not latest
|
||||
return True if remote_latest <= new_version else False
|
||||
|
||||
latest = is_latest(pre_rel, new_version, current_latest)
|
||||
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
|
||||
|
||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
||||
with open(github_output, "at", encoding="utf-8") as gh_output:
|
||||
gh_output.write(f"latest={latest}")
|
||||
gh_output.write(f"minor_latest={minor_latest}")
|
||||
10
.github/actions/setup-postgres-linux/action.yml
vendored
10
.github/actions/setup-postgres-linux/action.yml
vendored
@@ -1,10 +0,0 @@
|
||||
name: "Set up postgres (linux)"
|
||||
description: "Set up postgres service on linux vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
sudo systemctl start postgresql.service
|
||||
pg_isready
|
||||
sudo -u postgres bash ${{ github.action_path }}/setup_db.sh
|
||||
@@ -1 +0,0 @@
|
||||
../../../test/setup_db.sh
|
||||
24
.github/actions/setup-postgres-macos/action.yml
vendored
24
.github/actions/setup-postgres-macos/action.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: "Set up postgres (macos)"
|
||||
description: "Set up postgres service on macos vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
brew services start postgresql
|
||||
echo "Check PostgreSQL service is running"
|
||||
i=10
|
||||
COMMAND='pg_isready'
|
||||
while [ $i -gt -1 ]; do
|
||||
if [ $i == 0 ]; then
|
||||
echo "PostgreSQL service not ready, all attempts exhausted"
|
||||
exit 1
|
||||
fi
|
||||
echo "Check PostgreSQL service status"
|
||||
eval $COMMAND && break
|
||||
echo "PostgreSQL service not ready, wait 10 more sec, attempts left: $i"
|
||||
sleep 10
|
||||
((i--))
|
||||
done
|
||||
createuser -s postgres
|
||||
bash ${{ github.action_path }}/setup_db.sh
|
||||
@@ -1 +0,0 @@
|
||||
../../../test/setup_db.sh
|
||||
@@ -1,12 +0,0 @@
|
||||
name: "Set up postgres (windows)"
|
||||
description: "Set up postgres service on windows vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: pwsh
|
||||
run: |
|
||||
$pgService = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $pgService -Status running -StartupType automatic
|
||||
Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru
|
||||
$env:Path += ";$env:PGBIN"
|
||||
bash ${{ github.action_path }}/setup_db.sh
|
||||
@@ -1 +0,0 @@
|
||||
../../../test/setup_db.sh
|
||||
30
.github/dependabot.yml
vendored
30
.github/dependabot.yml
vendored
@@ -1,30 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
# python dependencies
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/core"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/plugins/postgres"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
|
||||
# docker dependencies
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/docker"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
rebase-strategy: "disabled"
|
||||
31
.github/pull_request_template.md
vendored
31
.github/pull_request_template.md
vendored
@@ -1,35 +1,22 @@
|
||||
resolves #
|
||||
[docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose) dbt-labs/docs.getdbt.com/#
|
||||
resolves #
|
||||
|
||||
<!---
|
||||
Include the number of the issue addressed by this PR above if applicable.
|
||||
PRs for code changes without an associated issue *will not be merged*.
|
||||
See CONTRIBUTING.md for more information.
|
||||
|
||||
Include the number of the docs issue that was opened for this PR. If
|
||||
this change has no user-facing implications, "N/A" suffices instead. New
|
||||
docs tickets can be created by clicking the link above or by going to
|
||||
https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose.
|
||||
Example:
|
||||
resolves #1234
|
||||
-->
|
||||
|
||||
### Problem
|
||||
|
||||
<!---
|
||||
Describe the problem this PR is solving. What is the application state
|
||||
before this PR is merged?
|
||||
-->
|
||||
### Description
|
||||
|
||||
### Solution
|
||||
<!--- Describe the Pull Request here -->
|
||||
|
||||
<!---
|
||||
Describe the way this PR solves the above problem. Add as much detail as you
|
||||
can to help reviewers understand your changes. Include any alternatives and
|
||||
tradeoffs you considered.
|
||||
-->
|
||||
|
||||
### Checklist
|
||||
|
||||
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] This PR has no interface changes (e.g. macros, cli, logs, json artifacts, config files, adapter interface, etc) or this PR has already received feedback and approval from Product or DX
|
||||
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.
|
||||
|
||||
40
.github/workflows/backport.yml
vendored
40
.github/workflows/backport.yml
vendored
@@ -1,40 +0,0 @@
|
||||
# **what?**
|
||||
# When a PR is merged, if it has the backport label, it will create
|
||||
# a new PR to backport those changes to the given branch. If it can't
|
||||
# cleanly do a backport, it will comment on the merged PR of the failure.
|
||||
#
|
||||
# Label naming convention: "backport <branch name to backport to>"
|
||||
# Example: backport 1.0.latest
|
||||
#
|
||||
# You MUST "Squash and merge" the original PR or this won't work.
|
||||
|
||||
# **why?**
|
||||
# Changes sometimes need to be backported to release branches.
|
||||
# This automates the backporting process
|
||||
|
||||
# **when?**
|
||||
# Once a PR is "Squash and merge"'d, by adding a backport label, this is triggered
|
||||
|
||||
name: Backport
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport
|
||||
runs-on: ubuntu-latest
|
||||
# Only react to merged PRs for security reasons.
|
||||
# See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target.
|
||||
if: >
|
||||
github.event.pull_request.merged
|
||||
&& contains(github.event.label.name, 'backport')
|
||||
steps:
|
||||
- uses: tibdex/backport@v2.0.3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
61
.github/workflows/bot-changelog.yml
vendored
61
.github/workflows/bot-changelog.yml
vendored
@@ -1,61 +0,0 @@
|
||||
# **what?**
|
||||
# When bots create a PR, this action will add a corresponding changie yaml file to that
|
||||
# PR when a specific label is added.
|
||||
#
|
||||
# The file is created off a template:
|
||||
#
|
||||
# kind: <per action matrix>
|
||||
# body: <PR title>
|
||||
# time: <current timestamp>
|
||||
# custom:
|
||||
# Author: <PR User Login (generally the bot)>
|
||||
# Issue: 4904
|
||||
# PR: <PR number>
|
||||
#
|
||||
# **why?**
|
||||
# Automate changelog generation for more visability with automated bot PRs.
|
||||
#
|
||||
# **when?**
|
||||
# Once a PR is created, label should be added to PR before or after creation. You can also
|
||||
# manually trigger this by adding the appropriate label at any time.
|
||||
#
|
||||
# **how to add another bot?**
|
||||
# Add the label and changie kind to the include matrix. That's it!
|
||||
#
|
||||
|
||||
name: Bot Changelog
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# catch when the PR is opened with the label or when the label is added
|
||||
types: [labeled]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
generate_changelog:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- label: "dependencies"
|
||||
changie_kind: "Dependencies"
|
||||
- label: "snyk"
|
||||
changie_kind: "Security"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Create and commit changelog on bot PR
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, matrix.label) }}
|
||||
id: bot_changelog
|
||||
uses: emmyoop/changie_bot@v1.1.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
commit_author_name: "Github Build Bot"
|
||||
commit_author_email: "<buildbot@fishtownanalytics.com>"
|
||||
commit_message: "Add automated changelog yaml from template for bot PR"
|
||||
changie_kind: ${{ matrix.changie_kind }}
|
||||
label: ${{ matrix.label }}
|
||||
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}"
|
||||
40
.github/workflows/changelog-existence.yml
vendored
40
.github/workflows/changelog-existence.yml
vendored
@@ -1,40 +0,0 @@
|
||||
# **what?**
|
||||
# Checks that a file has been committed under the /.changes directory
|
||||
# as a new CHANGELOG entry. Cannot check for a specific filename as
|
||||
# it is dynamically generated by change type and timestamp.
|
||||
# This workflow should not require any secrets since it runs for PRs
|
||||
# from forked repos.
|
||||
# By default, secrets are not passed to workflows running from
|
||||
# a forked repo.
|
||||
|
||||
# **why?**
|
||||
# Ensure code change gets reflected in the CHANGELOG.
|
||||
|
||||
# **when?**
|
||||
# This will run for all PRs going into main and *.latest. It will
|
||||
# run when they are opened, reopened, when any label is added or removed
|
||||
# and when new code is pushed to the branch. The action will then get
|
||||
# skipped if the 'Skip Changelog' label is present is any of the labels.
|
||||
|
||||
name: Check Changelog Entry
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, labeled, unlabeled, synchronize]
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
uses: dbt-labs/actions/.github/workflows/changelog-existence.yml@main
|
||||
with:
|
||||
changelog_comment: 'Thank you for your pull request! We could not find a changelog entry for this change. For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry).'
|
||||
skip_label: 'Skip Changelog'
|
||||
secrets: inherit
|
||||
41
.github/workflows/cut-release-branch.yml
vendored
41
.github/workflows/cut-release-branch.yml
vendored
@@ -1,41 +0,0 @@
|
||||
# **what?**
|
||||
# Cuts a new `*.latest` branch
|
||||
# Also cleans up all files in `.changes/unreleased` and `.changes/previous verion on
|
||||
# `main` and bumps `main` to the input version.
|
||||
|
||||
# **why?**
|
||||
# Generally reduces the workload of engineers and reduces error. Allow automation.
|
||||
|
||||
# **when?**
|
||||
# This will run when called manually.
|
||||
|
||||
name: Cut new release branch
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_to_bump_main:
|
||||
description: 'The alpha version main should bump to (ex. 1.6.0a1)'
|
||||
required: true
|
||||
new_branch_name:
|
||||
description: 'The full name of the new branch (ex. 1.5.latest)'
|
||||
required: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
cut_branch:
|
||||
name: "Cut branch and clean up main for dbt-core"
|
||||
uses: dbt-labs/actions/.github/workflows/cut-release-branch.yml@main
|
||||
with:
|
||||
version_to_bump_main: ${{ inputs.version_to_bump_main }}
|
||||
new_branch_name: ${{ inputs.new_branch_name }}
|
||||
PR_title: "Cleanup main after cutting new ${{ inputs.new_branch_name }} branch"
|
||||
PR_body: "All adapter PRs will fail CI until the dbt-core PR has been merged due to release version conflicts."
|
||||
secrets:
|
||||
FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
26
.github/workflows/jira-creation.yml
vendored
26
.github/workflows/jira-creation.yml
vendored
@@ -1,26 +0,0 @@
|
||||
# **what?**
|
||||
# Mirrors issues into Jira. Includes the information: title,
|
||||
# GitHub Issue ID and URL
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these issues in there
|
||||
|
||||
# **when?**
|
||||
# On issue creation or when an issue is labeled `Jira`
|
||||
|
||||
name: Jira Issue Creation
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
call-creation-action:
|
||||
uses: dbt-labs/actions/.github/workflows/jira-creation-actions.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
26
.github/workflows/jira-label.yml
vendored
26
.github/workflows/jira-label.yml
vendored
@@ -1,26 +0,0 @@
|
||||
# **what?**
|
||||
# Calls mirroring Jira label Action. Includes adding a new label
|
||||
# to an existing issue or removing a label as well
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these labels in there
|
||||
|
||||
# **when?**
|
||||
# On labels being added or removed from issues
|
||||
|
||||
name: Jira Label Mirroring
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
issues: read
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/actions/.github/workflows/jira-label-actions.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
27
.github/workflows/jira-transition.yml
vendored
27
.github/workflows/jira-transition.yml
vendored
@@ -1,27 +0,0 @@
|
||||
# **what?**
|
||||
# Transition a Jira issue to a new state
|
||||
# Only supports these GitHub Issue transitions:
|
||||
# closed, deleted, reopened
|
||||
|
||||
# **why?**
|
||||
# Jira needs to be kept up-to-date
|
||||
|
||||
# **when?**
|
||||
# On issue closing, deletion, reopened
|
||||
|
||||
name: Jira Issue Transition
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [closed, deleted, reopened]
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
call-transition-action:
|
||||
uses: dbt-labs/actions/.github/workflows/jira-transition-actions.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
237
.github/workflows/main.yml
vendored
237
.github/workflows/main.yml
vendored
@@ -1,237 +0,0 @@
|
||||
# **what?**
|
||||
# Runs code quality checks, unit tests, integration tests and
|
||||
# verifies python build on all code commited to the repository. This workflow
|
||||
# should not require any secrets since it runs for PRs from forked repos. By
|
||||
# default, secrets are not passed to workflows running from a forked repos.
|
||||
|
||||
# **why?**
|
||||
# Ensure code for dbt meets a certain quality standard.
|
||||
|
||||
# **when?**
|
||||
# This will run for all PRs, when code is pushed to a release
|
||||
# branch, and when manually triggered.
|
||||
|
||||
name: Tests and Code Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
code-quality:
|
||||
name: code-quality
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
make dev
|
||||
mypy --version
|
||||
dbt --version
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
run: pre-commit run --all-files --show-diff-on-failure
|
||||
|
||||
unit:
|
||||
name: unit test / python ${{ matrix.python-version }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: |
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload Unit Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
integration:
|
||||
name: integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
os: [ubuntu-20.04]
|
||||
include:
|
||||
- python-version: 3.8
|
||||
os: windows-latest
|
||||
- python-version: 3.8
|
||||
os: macos-latest
|
||||
|
||||
env:
|
||||
TOXENV: integration
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
DBT_TEST_USER_3: dbt_test_user_3
|
||||
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
|
||||
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
|
||||
DD_SITE: datadoghq.com
|
||||
DD_ENV: ci
|
||||
DD_SERVICE: ${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up postgres (linux)
|
||||
if: runner.os == 'Linux'
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: Set up postgres (macos)
|
||||
if: runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: Set up postgres (windows)
|
||||
if: runner.os == 'Windows'
|
||||
uses: ./.github/actions/setup-postgres-windows
|
||||
|
||||
- name: Install python tools
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run tests
|
||||
run: tox -- --ddtrace
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: |
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}
|
||||
path: ./logs
|
||||
|
||||
- name: Upload Integration Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
build:
|
||||
name: build packages
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip install --upgrade setuptools wheel twine check-wheel-contents
|
||||
python -m pip --version
|
||||
|
||||
- name: Build distributions
|
||||
run: ./scripts/build-dist.sh
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Check distribution descriptions
|
||||
run: |
|
||||
twine check dist/*
|
||||
|
||||
- name: Check wheel contents
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check wheel distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
- name: Install source distributions
|
||||
# ignore dbt-1.0.0, which intentionally raises an error when installed from source
|
||||
run: |
|
||||
find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
dbt --version
|
||||
265
.github/workflows/model_performance.yml
vendored
265
.github/workflows/model_performance.yml
vendored
@@ -1,265 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow models the performance characteristics of a point in time in dbt.
|
||||
# It runs specific dbt commands on committed projects multiple times to create and
|
||||
# commit information about the distribution to the current branch. For more information
|
||||
# see the readme in the performance module at /performance/README.md.
|
||||
#
|
||||
# **why?**
|
||||
# When developing new features, we can take quick performance samples and compare
|
||||
# them against the commited baseline measurements produced by this workflow to detect
|
||||
# some performance regressions at development time before they reach users.
|
||||
#
|
||||
# **when?**
|
||||
# This is only run once directly after each release (for non-prereleases). If for some
|
||||
# reason the results of a run are not satisfactory, it can also be triggered manually.
|
||||
|
||||
name: Model Performance Characteristics
|
||||
|
||||
on:
|
||||
# runs after non-prereleases are published.
|
||||
release:
|
||||
types: [released]
|
||||
# run manually from the actions tab
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_id:
|
||||
description: 'dbt version to model (must be non-prerelease in Pypi)'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
env:
|
||||
RUNNER_CACHE_PATH: performance/runner/target/release/runner
|
||||
|
||||
# both jobs need to write
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
set-variables:
|
||||
name: Setting Variables
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache_key: ${{ steps.variables.outputs.cache_key }}
|
||||
release_id: ${{ steps.semver.outputs.base-version }}
|
||||
release_branch: ${{ steps.variables.outputs.release_branch }}
|
||||
steps:
|
||||
|
||||
# explicitly checkout the performance runner from main regardless of which
|
||||
# version we are modeling.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: main
|
||||
|
||||
- name: Parse version into parts
|
||||
id: semver
|
||||
uses: dbt-labs/actions/parse-semver@v1
|
||||
with:
|
||||
version: ${{ github.event.inputs.release_id || github.event.release.tag_name }}
|
||||
|
||||
# collect all the variables that need to be used in subsequent jobs
|
||||
- name: Set variables
|
||||
id: variables
|
||||
run: |
|
||||
# create a cache key that will be used in the next job. without this the
|
||||
# next job would have to checkout from main and hash the files itself.
|
||||
echo "cache_key=${{ runner.os }}-${{ hashFiles('performance/runner/Cargo.toml')}}-${{ hashFiles('performance/runner/src/*') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
branch_name="${{steps.semver.outputs.major}}.${{steps.semver.outputs.minor}}.latest"
|
||||
echo "release_branch=$branch_name" >> $GITHUB_OUTPUT
|
||||
echo "release branch is inferred to be ${branch_name}"
|
||||
|
||||
latest-runner:
|
||||
name: Build or Fetch Runner
|
||||
runs-on: ubuntu-latest
|
||||
needs: [set-variables]
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
steps:
|
||||
- name: '[DEBUG] print variables'
|
||||
run: |
|
||||
echo "all variables defined in set-variables"
|
||||
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
|
||||
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
# explicitly checkout the performance runner from main regardless of which
|
||||
# version we are modeling.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: main
|
||||
|
||||
# attempts to access a previously cached runner
|
||||
- uses: actions/cache@v3
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.RUNNER_CACHE_PATH }}
|
||||
key: ${{ needs.set-variables.outputs.cache_key }}
|
||||
|
||||
- name: Fetch Rust Toolchain
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Add fmt
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: rustup component add rustfmt
|
||||
|
||||
- name: Cargo fmt
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --manifest-path performance/runner/Cargo.toml --all -- --check
|
||||
|
||||
- name: Test
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --manifest-path performance/runner/Cargo.toml
|
||||
|
||||
- name: Build (optimized)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --manifest-path performance/runner/Cargo.toml
|
||||
# the cache action automatically caches this binary at the end of the job
|
||||
|
||||
model:
|
||||
# depends on `latest-runner` as a separate job so that failures in this job do not prevent
|
||||
# a successfully tested and built binary from being cached.
|
||||
needs: [set-variables, latest-runner]
|
||||
name: Model a release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: '[DEBUG] print variables'
|
||||
run: |
|
||||
echo "all variables defined in set-variables"
|
||||
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
|
||||
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- name: Install dbt
|
||||
run: pip install dbt-postgres==${{ needs.set-variables.outputs.release_id }}
|
||||
|
||||
- name: Install Hyperfine
|
||||
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
|
||||
|
||||
# explicitly checkout main to get the latest project definitions
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: main
|
||||
|
||||
# this was built in the previous job so it will be there.
|
||||
- name: Fetch Runner
|
||||
uses: actions/cache@v3
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.RUNNER_CACHE_PATH }}
|
||||
key: ${{ needs.set-variables.outputs.cache_key }}
|
||||
|
||||
- name: Move Runner
|
||||
run: mv performance/runner/target/release/runner performance/app
|
||||
|
||||
- name: Change Runner Permissions
|
||||
run: chmod +x ./performance/app
|
||||
|
||||
- name: '[DEBUG] ls baseline directory before run'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
# `${{ github.workspace }}` is used to pass the absolute path
|
||||
- name: Create directories
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/performance/tmp/
|
||||
mkdir -p performance/baselines/${{ needs.set-variables.outputs.release_id }}/
|
||||
|
||||
# Run modeling with taking 20 samples
|
||||
- name: Run Measurement
|
||||
run: |
|
||||
performance/app model -v ${{ needs.set-variables.outputs.release_id }} -b ${{ github.workspace }}/performance/baselines/ -p ${{ github.workspace }}/performance/projects/ -t ${{ github.workspace }}/performance/tmp/ -n 20
|
||||
|
||||
- name: '[DEBUG] ls baseline directory after run'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: baseline
|
||||
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}/
|
||||
|
||||
create-pr:
|
||||
name: Open PR for ${{ matrix.base-branch }}
|
||||
|
||||
# depends on `model` as a separate job so that the baseline can be committed to more than one branch
|
||||
# i.e. release branch and main
|
||||
needs: [set-variables, latest-runner, model]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- base-branch: refs/heads/main
|
||||
target-branch: performance-bot/main_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
|
||||
- base-branch: refs/heads/${{ needs.set-variables.outputs.release_branch }}
|
||||
target-branch: performance-bot/release_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
|
||||
|
||||
steps:
|
||||
- name: '[DEBUG] print variables'
|
||||
run: |
|
||||
echo "all variables defined in set-variables"
|
||||
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
|
||||
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ matrix.base-branch }}
|
||||
|
||||
- name: Create PR branch
|
||||
run: |
|
||||
git checkout -b ${{ matrix.target-branch }}
|
||||
git push origin ${{ matrix.target-branch }}
|
||||
git branch --set-upstream-to=origin/${{ matrix.target-branch }} ${{ matrix.target-branch }}
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: baseline
|
||||
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}
|
||||
|
||||
- name: '[DEBUG] ls baselines after artifact download'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
- name: Commit baseline
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
add: 'performance/baselines/*'
|
||||
author_name: 'Github Build Bot'
|
||||
author_email: 'buildbot@fishtownanalytics.com'
|
||||
message: 'adding performance baseline for ${{ needs.set-variables.outputs.release_id }}'
|
||||
push: 'origin origin/${{ matrix.target-branch }}'
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
|
||||
base: ${{ matrix.base-branch }}
|
||||
branch: '${{ matrix.target-branch }}'
|
||||
title: 'Adding performance modeling for ${{needs.set-variables.outputs.release_id}} to ${{ matrix.base-branch }}'
|
||||
body: 'Committing perf results for tracking for the ${{needs.set-variables.outputs.release_id}}'
|
||||
labels: |
|
||||
Skip Changelog
|
||||
Performance
|
||||
109
.github/workflows/nightly-release.yml
vendored
109
.github/workflows/nightly-release.yml
vendored
@@ -1,109 +0,0 @@
|
||||
# **what?**
|
||||
# Nightly releases to GitHub and PyPI. This workflow produces the following outcome:
|
||||
# - generate and validate data for night release (commit SHA, version number, release branch);
|
||||
# - pass data to release workflow;
|
||||
# - night release will be pushed to GitHub as a draft release;
|
||||
# - night build will be pushed to test PyPI;
|
||||
#
|
||||
# **why?**
|
||||
# Ensure an automated and tested release process for nightly builds
|
||||
#
|
||||
# **when?**
|
||||
# This workflow runs on schedule or can be run manually on demand.
|
||||
|
||||
name: Nightly Test Release to GitHub and PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch: # for manual triggering
|
||||
schedule:
|
||||
- cron: 0 9 * * *
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
RELEASE_BRANCH: "main"
|
||||
|
||||
jobs:
|
||||
aggregate-release-data:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
commit_sha: ${{ steps.resolve-commit-sha.outputs.release_commit }}
|
||||
version_number: ${{ steps.nightly-release-version.outputs.number }}
|
||||
release_branch: ${{ steps.release-branch.outputs.name }}
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ env.RELEASE_BRANCH }}
|
||||
|
||||
- name: "Resolve Commit To Release"
|
||||
id: resolve-commit-sha
|
||||
run: |
|
||||
commit_sha=$(git rev-parse HEAD)
|
||||
echo "release_commit=$commit_sha" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Get Current Version Number"
|
||||
id: version-number-sources
|
||||
run: |
|
||||
current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
|
||||
echo "current_version=$current_version" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Audit Version And Parse Into Parts"
|
||||
id: semver
|
||||
uses: dbt-labs/actions/parse-semver@v1.1.0
|
||||
with:
|
||||
version: ${{ steps.version-number-sources.outputs.current_version }}
|
||||
|
||||
- name: "Get Current Date"
|
||||
id: current-date
|
||||
run: echo "date=$(date +'%m%d%Y')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Generate Nightly Release Version Number"
|
||||
id: nightly-release-version
|
||||
run: |
|
||||
number="${{ steps.semver.outputs.version }}.dev${{ steps.current-date.outputs.date }}"
|
||||
echo "number=$number" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Audit Nightly Release Version And Parse Into Parts"
|
||||
uses: dbt-labs/actions/parse-semver@v1.1.0
|
||||
with:
|
||||
version: ${{ steps.nightly-release-version.outputs.number }}
|
||||
|
||||
- name: "Set Release Branch"
|
||||
id: release-branch
|
||||
run: |
|
||||
echo "name=${{ env.RELEASE_BRANCH }}" >> $GITHUB_OUTPUT
|
||||
|
||||
log-outputs-aggregate-release-data:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [aggregate-release-data]
|
||||
|
||||
steps:
|
||||
- name: "[DEBUG] Log Outputs"
|
||||
run: |
|
||||
echo commit_sha : ${{ needs.aggregate-release-data.outputs.commit_sha }}
|
||||
echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
|
||||
echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
|
||||
|
||||
release-github-pypi:
|
||||
needs: [aggregate-release-data]
|
||||
|
||||
uses: ./.github/workflows/release.yml
|
||||
with:
|
||||
sha: ${{ needs.aggregate-release-data.outputs.commit_sha }}
|
||||
target_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
|
||||
version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
|
||||
build_script_path: "scripts/build-dist.sh"
|
||||
env_setup_script_path: "scripts/env-setup.sh"
|
||||
s3_bucket_name: "core-team-artifacts"
|
||||
package_test_command: "dbt --version"
|
||||
test_run: true
|
||||
nightly_release: true
|
||||
secrets: inherit
|
||||
31
.github/workflows/release-branch-tests.yml
vendored
31
.github/workflows/release-branch-tests.yml
vendored
@@ -1,31 +0,0 @@
|
||||
# **what?**
|
||||
# The purpose of this workflow is to trigger CI to run for each
|
||||
# release branch and main branch on a regular cadence. If the CI workflow
|
||||
# fails for a branch, it will post to #dev-core-alerts to raise awareness.
|
||||
|
||||
# **why?**
|
||||
# Ensures release branches and main are always shippable and not broken.
|
||||
# Also, can catch any dependencies shifting beneath us that might
|
||||
# introduce breaking changes (could also impact Cloud).
|
||||
|
||||
# **when?**
|
||||
# Mainly on a schedule of 9:00, 13:00, 18:00 UTC everyday.
|
||||
# Manual trigger can also test on demand
|
||||
|
||||
name: Release branch scheduled testing
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 9,13,18 * * *' # 9:00, 13:00, 18:00 UTC
|
||||
|
||||
workflow_dispatch: # for manual triggering
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
run_tests:
|
||||
uses: dbt-labs/actions/.github/workflows/release-branch-tests.yml@main
|
||||
with:
|
||||
workflows_to_run: '["main.yml"]'
|
||||
secrets: inherit
|
||||
118
.github/workflows/release-docker.yml
vendored
118
.github/workflows/release-docker.yml
vendored
@@ -1,118 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will generate a series of docker images for dbt and push them to the github container registry
|
||||
|
||||
# **why?**
|
||||
# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. This is how we keep those images up-to-date.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
# **next steps**
|
||||
# - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
|
||||
|
||||
name: Docker release
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to release. _One_ of [dbt-core, dbt-redshift, dbt-bigquery, dbt-snowflake, dbt-spark, dbt-postgres]
|
||||
required: true
|
||||
version_number:
|
||||
description: The release version number (i.e. 1.0.0b1). Do not include `latest` tags or a leading `v`!
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
get_version_meta:
|
||||
name: Get version meta
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
major: ${{ steps.version.outputs.major }}
|
||||
minor: ${{ steps.version.outputs.minor }}
|
||||
patch: ${{ steps.version.outputs.patch }}
|
||||
latest: ${{ steps.latest.outputs.latest }}
|
||||
minor_latest: ${{ steps.latest.outputs.minor_latest }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Split version
|
||||
id: version
|
||||
run: |
|
||||
IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
|
||||
echo "major=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "minor=$MINOR" >> $GITHUB_OUTPUT
|
||||
echo "patch=$PATCH" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Is pkg 'latest'
|
||||
id: latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.version_number }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
halt_on_missing: False
|
||||
|
||||
setup_image_builder:
|
||||
name: Set up docker image builder
|
||||
runs-on: ubuntu-latest
|
||||
needs: [get_version_meta]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
build_and_push:
|
||||
name: Build images and push to GHCR
|
||||
runs-on: ubuntu-latest
|
||||
needs: [setup_image_builder, get_version_meta]
|
||||
steps:
|
||||
- name: Get docker build arg
|
||||
id: build_arg
|
||||
run: |
|
||||
BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
|
||||
BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
|
||||
echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT
|
||||
echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Log in to the GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push MAJOR.MINOR.PATCH tag
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ github.event.inputs.version_number }}
|
||||
|
||||
- name: Build and push MINOR.latest tag
|
||||
uses: docker/build-push-action@v4
|
||||
if: ${{ needs.get_version_meta.outputs.minor_latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ needs.get_version_meta.outputs.major }}.${{ needs.get_version_meta.outputs.minor }}.latest
|
||||
|
||||
- name: Build and push latest tag
|
||||
uses: docker/build-push-action@v4
|
||||
if: ${{ needs.get_version_meta.outputs.latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest
|
||||
229
.github/workflows/release.yml
vendored
229
.github/workflows/release.yml
vendored
@@ -1,229 +0,0 @@
|
||||
# **what?**
|
||||
# Release workflow provides the following steps:
|
||||
# - checkout the given commit;
|
||||
# - validate version in sources and changelog file for given version;
|
||||
# - bump the version and generate a changelog if needed;
|
||||
# - merge all changes to the target branch if needed;
|
||||
# - run unit and integration tests against given commit;
|
||||
# - build and package that SHA;
|
||||
# - release it to GitHub and PyPI with that specific build;
|
||||
#
|
||||
# **why?**
|
||||
# Ensure an automated and tested release process
|
||||
#
|
||||
# **when?**
|
||||
# This workflow can be run manually on demand or can be called by other workflows
|
||||
|
||||
name: Release to GitHub and PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: "The last commit sha in the release"
|
||||
type: string
|
||||
required: true
|
||||
target_branch:
|
||||
description: "The branch to release from"
|
||||
type: string
|
||||
required: true
|
||||
version_number:
|
||||
description: "The release version number (i.e. 1.0.0b1)"
|
||||
type: string
|
||||
required: true
|
||||
build_script_path:
|
||||
description: "Build script path"
|
||||
type: string
|
||||
default: "scripts/build-dist.sh"
|
||||
required: true
|
||||
env_setup_script_path:
|
||||
description: "Environment setup script path"
|
||||
type: string
|
||||
default: "scripts/env-setup.sh"
|
||||
required: false
|
||||
s3_bucket_name:
|
||||
description: "AWS S3 bucket name"
|
||||
type: string
|
||||
default: "core-team-artifacts"
|
||||
required: true
|
||||
package_test_command:
|
||||
description: "Package test command"
|
||||
type: string
|
||||
default: "dbt --version"
|
||||
required: true
|
||||
test_run:
|
||||
description: "Test run (Publish release as draft)"
|
||||
type: boolean
|
||||
default: true
|
||||
required: false
|
||||
nightly_release:
|
||||
description: "Nightly release to dev environment"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
workflow_call:
|
||||
inputs:
|
||||
sha:
|
||||
description: "The last commit sha in the release"
|
||||
type: string
|
||||
required: true
|
||||
target_branch:
|
||||
description: "The branch to release from"
|
||||
type: string
|
||||
required: true
|
||||
version_number:
|
||||
description: "The release version number (i.e. 1.0.0b1)"
|
||||
type: string
|
||||
required: true
|
||||
build_script_path:
|
||||
description: "Build script path"
|
||||
type: string
|
||||
default: "scripts/build-dist.sh"
|
||||
required: true
|
||||
env_setup_script_path:
|
||||
description: "Environment setup script path"
|
||||
type: string
|
||||
default: "scripts/env-setup.sh"
|
||||
required: false
|
||||
s3_bucket_name:
|
||||
description: "AWS S3 bucket name"
|
||||
type: string
|
||||
default: "core-team-artifacts"
|
||||
required: true
|
||||
package_test_command:
|
||||
description: "Package test command"
|
||||
type: string
|
||||
default: "dbt --version"
|
||||
required: true
|
||||
test_run:
|
||||
description: "Test run (Publish release as draft)"
|
||||
type: boolean
|
||||
default: true
|
||||
required: false
|
||||
nightly_release:
|
||||
description: "Nightly release to dev environment"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
log-inputs:
|
||||
name: Log Inputs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo The last commit sha in the release: ${{ inputs.sha }}
|
||||
echo The branch to release from: ${{ inputs.target_branch }}
|
||||
echo The release version number: ${{ inputs.version_number }}
|
||||
echo Build script path: ${{ inputs.build_script_path }}
|
||||
echo Environment setup script path: ${{ inputs.env_setup_script_path }}
|
||||
echo AWS S3 bucket name: ${{ inputs.s3_bucket_name }}
|
||||
echo Package test command: ${{ inputs.package_test_command }}
|
||||
echo Test run: ${{ inputs.test_run }}
|
||||
echo Nightly release: ${{ inputs.nightly_release }}
|
||||
|
||||
bump-version-generate-changelog:
|
||||
name: Bump package version, Generate changelog
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
|
||||
|
||||
with:
|
||||
sha: ${{ inputs.sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
target_branch: ${{ inputs.target_branch }}
|
||||
env_setup_script_path: ${{ inputs.env_setup_script_path }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
|
||||
secrets: inherit
|
||||
|
||||
log-outputs-bump-version-generate-changelog:
|
||||
name: "[Log output] Bump package version, Generate changelog"
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
||||
needs: [bump-version-generate-changelog]
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Print variables
|
||||
run: |
|
||||
echo Final SHA : ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
echo Changelog path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
|
||||
build-test-package:
|
||||
name: Build, Test, Package
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [bump-version-generate-changelog]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
|
||||
|
||||
with:
|
||||
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
build_script_path: ${{ inputs.build_script_path }}
|
||||
s3_bucket_name: ${{ inputs.s3_bucket_name }}
|
||||
package_test_command: ${{ inputs.package_test_command }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
|
||||
secrets:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
github-release:
|
||||
name: GitHub Release
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
||||
needs: [bump-version-generate-changelog, build-test-package]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/github-release.yml@main
|
||||
|
||||
with:
|
||||
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
|
||||
pypi-release:
|
||||
name: PyPI Release
|
||||
|
||||
needs: [github-release]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/pypi-release.yml@main
|
||||
|
||||
with:
|
||||
version_number: ${{ inputs.version_number }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
|
||||
secrets:
|
||||
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||
TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||
|
||||
slack-notification:
|
||||
name: Slack Notification
|
||||
if: ${{ failure() && (!inputs.test_run || inputs.nightly_release) }}
|
||||
|
||||
needs:
|
||||
[
|
||||
bump-version-generate-changelog,
|
||||
build-test-package,
|
||||
github-release,
|
||||
pypi-release,
|
||||
]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
|
||||
with:
|
||||
status: "failure"
|
||||
|
||||
secrets:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
|
||||
90
.github/workflows/schema-check.yml
vendored
90
.github/workflows/schema-check.yml
vendored
@@ -1,90 +0,0 @@
|
||||
# **what?**
|
||||
# Compares the schema of the dbt version of the given ref vs
|
||||
# the latest official schema releases found in schemas.getdbt.com.
|
||||
# If there are differences, the workflow will fail and upload the
|
||||
# diff as an artifact. The metadata team should be alerted to the change.
|
||||
#
|
||||
# **why?**
|
||||
# Reaction work may need to be done if artifact schema changes
|
||||
# occur so we want to proactively alert to it.
|
||||
#
|
||||
# **when?**
|
||||
# On pushes to `develop` and release branches. Manual runs are also enabled.
|
||||
name: Artifact Schema Check
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request: #TODO: remove before merging
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
|
||||
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
|
||||
DBT_REPO_DIRECTORY: ${{ github.workspace }}/dbt
|
||||
SCHEMA_REPO_DIRECTORY: ${{ github.workspace }}/schemas.getdbt.com
|
||||
|
||||
jobs:
|
||||
checking-schemas:
|
||||
name: "Checking schemas"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Checkout dbt repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: ${{ env.DBT_REPO_DIRECTORY }}
|
||||
|
||||
- name: Checkout schemas.getdbt.com repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: dbt-labs/schemas.getdbt.com
|
||||
ref: 'main'
|
||||
ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
|
||||
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
|
||||
- name: Generate current schema
|
||||
run: |
|
||||
cd ${{ env.DBT_REPO_DIRECTORY }}
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
python scripts/collect-artifact-schema.py --path ${{ env.LATEST_SCHEMA_PATH }}
|
||||
|
||||
# Copy generated schema files into the schemas.getdbt.com repo
|
||||
# Do a git diff to find any changes
|
||||
# Ignore any date or version changes though
|
||||
- name: Compare schemas
|
||||
run: |
|
||||
cp -r ${{ env.LATEST_SCHEMA_PATH }}/dbt ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
cd ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
diff_results=$(git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
|
||||
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' --compact-summary)
|
||||
if [[ $(echo diff_results) ]]; then
|
||||
echo $diff_results
|
||||
echo "Schema changes detected!"
|
||||
git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
|
||||
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' > ${{ env.SCHEMA_DIFF_ARTIFACT }}
|
||||
exit 1
|
||||
else
|
||||
echo "No schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Upload schema diff
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
name: 'schema_schanges.txt'
|
||||
path: '${{ env.SCHEMA_DIFF_ARTIFACT }}'
|
||||
12
.github/workflows/stale.yml
vendored
12
.github/workflows/stale.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: "Close stale issues and PRs"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main
|
||||
@@ -1,67 +0,0 @@
|
||||
# This Action checks makes a dbt run to sample json structured logs
|
||||
# and checks that they conform to the currently documented schema.
|
||||
#
|
||||
# If this action fails it either means we have unintentionally deviated
|
||||
# from our documented structured logging schema, or we need to bump the
|
||||
# version of our structured logging and add new documentation to
|
||||
# communicate these changes.
|
||||
|
||||
name: Structured Logging Schema Check
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
# run the performance measurements on the current or default branch
|
||||
test-schema:
|
||||
name: Test Log Schema
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
# turns warnings into errors
|
||||
RUSTFLAGS: "-D warnings"
|
||||
# points tests to the log file
|
||||
LOG_DIR: "/home/runner/work/dbt-core/dbt-core/logs"
|
||||
# tells integration tests to output into json format
|
||||
DBT_LOG_FORMAT: "json"
|
||||
# tell eventmgr to convert logging events into bytes
|
||||
DBT_TEST_BINARY_SERIALIZATION: "true"
|
||||
# Additional test users
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
DBT_TEST_USER_3: dbt_test_user_3
|
||||
|
||||
steps:
|
||||
- name: checkout dev
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip --version
|
||||
pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Set up postgres
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: ls
|
||||
run: ls
|
||||
|
||||
# integration tests generate a ton of logs in different files. the next step will find them all.
|
||||
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
|
||||
- name: Run integration tests
|
||||
run: tox -e integration -- -nauto
|
||||
155
.github/workflows/test-repeater.yml
vendored
155
.github/workflows/test-repeater.yml
vendored
@@ -1,155 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will test all test(s) at the input path given number of times to determine if it's flaky or not. You can test with any supported OS/Python combination.
|
||||
# This is batched in 10 to allow more test iterations faster.
|
||||
|
||||
# **why?**
|
||||
# Testing if a test is flaky and if a previously flaky test has been fixed. This allows easy testing on supported python versions and OS combinations.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually from dbt-core.
|
||||
|
||||
name: Flaky Tester
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
description: 'Branch to check out'
|
||||
type: string
|
||||
required: true
|
||||
default: 'main'
|
||||
test_path:
|
||||
description: 'Path to single test to run (ex: tests/functional/retry/test_retry.py::TestRetry::test_fail_fast)'
|
||||
type: string
|
||||
required: true
|
||||
default: 'tests/functional/...'
|
||||
python_version:
|
||||
description: 'Version of Python to Test Against'
|
||||
type: choice
|
||||
options:
|
||||
- '3.8'
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
os:
|
||||
description: 'OS to run test in'
|
||||
type: choice
|
||||
options:
|
||||
- 'ubuntu-latest'
|
||||
- 'macos-latest'
|
||||
- 'windows-latest'
|
||||
num_runs_per_batch:
|
||||
description: 'Max number of times to run the test per batch. We always run 10 batches.'
|
||||
type: number
|
||||
required: true
|
||||
default: '50'
|
||||
|
||||
permissions: read-all
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
debug:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "[DEBUG] Output Inputs"
|
||||
run: |
|
||||
echo "Branch: ${{ inputs.branch }}"
|
||||
echo "test_path: ${{ inputs.test_path }}"
|
||||
echo "python_version: ${{ inputs.python_version }}"
|
||||
echo "os: ${{ inputs.os }}"
|
||||
echo "num_runs_per_batch: ${{ inputs.num_runs_per_batch }}"
|
||||
|
||||
pytest:
|
||||
runs-on: ${{ inputs.os }}
|
||||
strategy:
|
||||
# run all batches, even if one fails. This informs how flaky the test may be.
|
||||
fail-fast: false
|
||||
# using a matrix to speed up the jobs since the matrix will run in parallel when runners are available
|
||||
matrix:
|
||||
batch: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
|
||||
env:
|
||||
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
DBT_TEST_USER_3: dbt_test_user_3
|
||||
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
|
||||
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
|
||||
DD_SITE: datadoghq.com
|
||||
DD_ENV: ci
|
||||
DD_SERVICE: ${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: "Setup Python"
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "${{ inputs.python_version }}"
|
||||
|
||||
- name: "Setup Dev Environment"
|
||||
run: make dev
|
||||
|
||||
- name: "Set up postgres (linux)"
|
||||
if: inputs.os == 'ubuntu-latest'
|
||||
run: make setup-db
|
||||
|
||||
# mac and windows don't use make due to limitations with docker with those runners in GitHub
|
||||
- name: "Set up postgres (macos)"
|
||||
if: inputs.os == 'macos-latest'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: "Set up postgres (windows)"
|
||||
if: inputs.os == 'windows-latest'
|
||||
uses: ./.github/actions/setup-postgres-windows
|
||||
|
||||
- name: "Test Command"
|
||||
id: command
|
||||
run: |
|
||||
test_command="python -m pytest ${{ inputs.test_path }}"
|
||||
echo "test_command=$test_command" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Run test ${{ inputs.num_runs_per_batch }} times"
|
||||
id: pytest
|
||||
run: |
|
||||
set +e
|
||||
for ((i=1; i<=${{ inputs.num_runs_per_batch }}; i++))
|
||||
do
|
||||
echo "Running pytest iteration $i..."
|
||||
python -m pytest --ddtrace ${{ inputs.test_path }}
|
||||
exit_code=$?
|
||||
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
success=$((success + 1))
|
||||
echo "Iteration $i: Success"
|
||||
else
|
||||
failure=$((failure + 1))
|
||||
echo "Iteration $i: Failure"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "==========================="
|
||||
echo "Successful runs: $success"
|
||||
echo "Failed runs: $failure"
|
||||
echo "==========================="
|
||||
echo
|
||||
done
|
||||
|
||||
echo "failure=$failure" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Success and Failure Summary: ${{ inputs.os }}/Python ${{ inputs.python_version }}"
|
||||
run: |
|
||||
echo "Batch: ${{ matrix.batch }}"
|
||||
echo "Successful runs: ${{ steps.pytest.outputs.success }}"
|
||||
echo "Failed runs: ${{ steps.pytest.outputs.failure }}"
|
||||
|
||||
- name: "Error for Failures"
|
||||
if: ${{ steps.pytest.outputs.failure }}
|
||||
run: |
|
||||
echo "Batch ${{ matrix.batch }} failed ${{ steps.pytest.outputs.failure }} of ${{ inputs.num_runs_per_batch }} tests"
|
||||
exit 1
|
||||
1
.github/workflows/test/.actrc
vendored
1
.github/workflows/test/.actrc
vendored
@@ -1 +0,0 @@
|
||||
-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest
|
||||
1
.github/workflows/test/.gitignore
vendored
1
.github/workflows/test/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
.secrets
|
||||
1
.github/workflows/test/.secrets.EXAMPLE
vendored
1
.github/workflows/test/.secrets.EXAMPLE
vendored
@@ -1 +0,0 @@
|
||||
GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-postgres"
|
||||
}
|
||||
}
|
||||
31
.github/workflows/triage-labels.yml
vendored
31
.github/workflows/triage-labels.yml
vendored
@@ -1,31 +0,0 @@
|
||||
# **what?**
|
||||
# When the core team triages, we sometimes need more information from the issue creator. In
|
||||
# those cases we remove the `triage` label and add the `awaiting_response` label. Once we
|
||||
# recieve a response in the form of a comment, we want the `awaiting_response` label removed
|
||||
# in favor of the `triage` label so we are aware that the issue needs action.
|
||||
|
||||
# **why?**
|
||||
# To help with out team triage issue tracking
|
||||
|
||||
# **when?**
|
||||
# This will run when a comment is added to an issue and that issue has to `awaiting_response` label.
|
||||
|
||||
name: Update Triage Label
|
||||
|
||||
on: issue_comment
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
triage_label:
|
||||
if: contains(github.event.issue.labels.*.name, 'awaiting_response')
|
||||
uses: dbt-labs/actions/.github/workflows/swap-labels.yml@main
|
||||
with:
|
||||
add_label: "triage"
|
||||
remove_label: "awaiting_response"
|
||||
secrets: inherit
|
||||
28
.github/workflows/version-bump.yml
vendored
28
.github/workflows/version-bump.yml
vendored
@@ -1,28 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will take the new version number to bump to. With that
|
||||
# it will run versionbump to update the version number everywhere in the
|
||||
# code base and then run changie to create the corresponding changelog.
|
||||
# A PR will be created with the changes that can be reviewed before committing.
|
||||
|
||||
# **why?**
|
||||
# This is to aid in releasing dbt and making sure we have updated
|
||||
# the version in all places and generated the changelog.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
name: Version Bump
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_number:
|
||||
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
version_bump_and_changie:
|
||||
uses: dbt-labs/actions/.github/workflows/version-bump.yml@main
|
||||
with:
|
||||
version_number: ${{ inputs.version_number }}
|
||||
secrets: inherit # ok since what we are calling is internally maintained
|
||||
24
.gitignore
vendored
24
.gitignore
vendored
@@ -8,11 +8,9 @@ __pycache__/
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env*/
|
||||
env/
|
||||
dbt_env/
|
||||
build/
|
||||
!tests/functional/build
|
||||
!core/dbt/docs/build
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
@@ -26,11 +24,8 @@ var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
*.mypy_cache/
|
||||
logs/
|
||||
.user.yml
|
||||
profiles.yml
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
@@ -54,9 +49,9 @@ coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
test.env
|
||||
makefile.test.env
|
||||
*.pytest_cache/
|
||||
|
||||
# Mypy
|
||||
.mypy_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
@@ -71,10 +66,10 @@ docs/_build/
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Ipython Notebook
|
||||
#Ipython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# Emacs
|
||||
#Emacs
|
||||
*~
|
||||
|
||||
# Sublime Text
|
||||
@@ -83,7 +78,6 @@ target/
|
||||
# Vim
|
||||
*.sw*
|
||||
|
||||
# Pyenv
|
||||
.python-version
|
||||
|
||||
# Vim
|
||||
@@ -91,17 +85,11 @@ target/
|
||||
|
||||
# pycharm
|
||||
.idea/
|
||||
venv/
|
||||
|
||||
# AWS credentials
|
||||
.aws/
|
||||
|
||||
# MacOS
|
||||
.DS_Store
|
||||
|
||||
# vscode
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
|
||||
# poetry
|
||||
poetry.lock
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# Configuration for pre-commit hooks (see https://pre-commit.com/).
|
||||
# Eventually the hooks described here will be run as tests before merging each PR.
|
||||
|
||||
exclude: ^(core/dbt/docs/build/|core/dbt/events/types_pb2.py)
|
||||
|
||||
# Force all unspecified python hooks to run python 3.8
|
||||
default_language_version:
|
||||
python: python3
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
args: [--unsafe]
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
exclude_types:
|
||||
- "markdown"
|
||||
- id: check-case-conflict
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
- id: black
|
||||
alias: black-check
|
||||
stages: [manual]
|
||||
args:
|
||||
- "--check"
|
||||
- "--diff"
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 4.0.1
|
||||
hooks:
|
||||
- id: flake8
|
||||
- id: flake8
|
||||
alias: flake8-check
|
||||
stages: [manual]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.3.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
# N.B.: Mypy is... a bit fragile.
|
||||
#
|
||||
# By using `language: system` we run this hook in the local
|
||||
# environment instead of a pre-commit isolated one. This is needed
|
||||
# to ensure mypy correctly parses the project.
|
||||
|
||||
# It may cause trouble
|
||||
# in that it adds environmental variables out of our control to the
|
||||
# mix. Unfortunately, there's nothing we can do about per pre-commit's
|
||||
# author.
|
||||
# See https://github.com/pre-commit/pre-commit/issues/730 for details.
|
||||
args: [--show-error-codes]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
- id: mypy
|
||||
alias: mypy-check
|
||||
stages: [manual]
|
||||
args: [--show-error-codes, --pretty]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
@@ -1,54 +0,0 @@
|
||||
The core function of dbt is SQL compilation and execution. Users create projects of dbt resources (models, tests, seeds, snapshots, ...), defined in SQL and YAML files, and they invoke dbt to create, update, or query associated views and tables. Today, dbt makes heavy use of Jinja2 to enable the templating of SQL, and to construct a DAG (Directed Acyclic Graph) from all of the resources in a project. Users can also extend their projects by installing resources (including Jinja macros) from other projects, called "packages."
|
||||
|
||||
## dbt-core
|
||||
|
||||
Most of the python code in the repository is within the `core/dbt` directory.
|
||||
- [`single python files`](core/dbt/README.md): A number of individual files, such as 'compilation.py' and 'exceptions.py'
|
||||
|
||||
The main subdirectories of core/dbt:
|
||||
- [`adapters`](core/dbt/adapters/README.md): Define base classes for behavior that is likely to differ across databases
|
||||
- [`clients`](core/dbt/clients/README.md): Interface with dependencies (agate, jinja) or across operating systems
|
||||
- [`config`](core/dbt/config/README.md): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
|
||||
- [`context`](core/dbt/context/README.md): Build and expose dbt-specific Jinja functionality
|
||||
- [`contracts`](core/dbt/contracts/README.md): Define Python objects (dataclasses) that dbt expects to create and validate
|
||||
- [`deps`](core/dbt/deps/README.md): Package installation and dependency resolution
|
||||
- [`events`](core/dbt/events/README.md): Logging events
|
||||
- [`graph`](core/dbt/graph/README.md): Produce a `networkx` DAG of project resources, and selecting those resources given user-supplied criteria
|
||||
- [`include`](core/dbt/include/README.md): The dbt "global project," which defines default implementations of Jinja2 macros
|
||||
- [`parser`](core/dbt/parser/README.md): Read project files, validate, construct python objects
|
||||
- [`task`](core/dbt/task/README.md): Set forth the actions that dbt can perform when invoked
|
||||
|
||||
Legacy tests are found in the 'test' directory:
|
||||
- [`unit tests`](core/dbt/test/unit/README.md): Unit tests
|
||||
- [`integration tests`](core/dbt/test/integration/README.md): Integration tests
|
||||
|
||||
### Invoking dbt
|
||||
|
||||
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
|
||||
|
||||
core/dbt/include/index.html
|
||||
This is the docs website code. It comes from the dbt-docs repository, and is generated when a release is packaged.
|
||||
|
||||
## Adapters
|
||||
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. For testing and development purposes, the dbt-postgres plugin lives alongside the dbt-core codebase, in the [`plugins`](plugins) subdirectory. Like other adapter plugins, it is a self-contained codebase and package that builds on top of dbt-core.
|
||||
|
||||
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.
|
||||
|
||||
Each adapter plugin is a standalone python package that includes:
|
||||
|
||||
- `dbt/include/[name]`: A "sub-global" dbt project, of YAML and SQL files, that reimplements Jinja macros to use the adapter's supported SQL syntax
|
||||
- `dbt/adapters/[name]`: Python modules that inherit, and optionally reimplement, the base adapter classes defined in dbt-core
|
||||
- `setup.py`
|
||||
|
||||
The Postgres adapter code is the most central, and many of its implementations are used as the default defined in the dbt-core global project. The greater the distance of a data technology from Postgres, the more its adapter plugin may need to reimplement.
|
||||
|
||||
## Testing dbt
|
||||
|
||||
The [`test/`](test/) subdirectory includes unit and integration tests that run as continuous integration checks against open pull requests. Unit tests check mock inputs and outputs of specific python functions. Integration tests perform end-to-end dbt invocations against real adapters (Postgres, Redshift, Snowflake, BigQuery) and assert that the results match expectations. See [the contributing guide](CONTRIBUTING.md) for a step-by-step walkthrough of setting up a local development and testing environment.
|
||||
|
||||
## Everything else
|
||||
|
||||
- [docker](docker/): All dbt versions are published as Docker images on DockerHub. This subfolder contains the `Dockerfile` (constant) and `requirements.txt` (one for each version).
|
||||
- [etc](etc/): Images for README
|
||||
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, nor are they rigorously tested—they're just handy tools for the dbt maintainers :)
|
||||
2562
CHANGELOG.md
Executable file → Normal file
2562
CHANGELOG.md
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
286
CONTRIBUTING.md
286
CONTRIBUTING.md
@@ -1,230 +1,222 @@
|
||||
# Contributing to `dbt-core`
|
||||
|
||||
`dbt-core` is open source software. It is what it is today because community members have opened issues, provided feedback, and [contributed to the knowledge loop](https://www.getdbt.com/dbt-labs/values/). Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
# Contributing to dbt
|
||||
|
||||
1. [About this document](#about-this-document)
|
||||
2. [Getting the code](#getting-the-code)
|
||||
3. [Setting up an environment](#setting-up-an-environment)
|
||||
4. [Running dbt-core in development](#running-dbt-core-in-development)
|
||||
5. [Testing dbt-core](#testing)
|
||||
6. [Debugging](#debugging)
|
||||
7. [Adding or modifying a changelog entry](#adding-or-modifying-a-changelog-entry)
|
||||
8. [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
2. [Proposing a change](#proposing-a-change)
|
||||
3. [Getting the code](#getting-the-code)
|
||||
4. [Setting up an environment](#setting-up-an-environment)
|
||||
5. [Running dbt in development](#running-dbt-in-development)
|
||||
6. [Testing](#testing)
|
||||
7. [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
|
||||
## About this document
|
||||
|
||||
There are many ways to contribute to the ongoing development of `dbt-core`, such as by participating in discussions and issues. We encourage you to first read our higher-level document: ["Expectations for Open Source Contributors"](https://docs.getdbt.com/docs/contributing/oss-expectations).
|
||||
This document is a guide intended for folks interested in contributing to dbt. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using dbt, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
|
||||
|
||||
The rest of this document serves as a more granular guide for contributing code changes to `dbt-core` (this repository). It is not intended as a guide for using `dbt-core`, and some pieces assume a level of familiarity with Python development (virtualenvs, `pip`, etc). Specific code snippets in this guide assume you are using macOS or Linux and are comfortable with the command line.
|
||||
If you're new to python development or contributing to open-source software, we encourage you to read this document from start to finish. If you get stuck, drop us a line in the #development channel on [slack](community.getdbt.com).
|
||||
|
||||
If you get stuck, we're happy to help! Drop us a line in the `#dbt-core-development` channel in the [dbt Community Slack](https://community.getdbt.com).
|
||||
### Signing the CLA
|
||||
|
||||
### Notes
|
||||
Please note that all contributors to dbt must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the dbt codebase. If you are unable to sign the CLA, then the dbt maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
|
||||
|
||||
## Proposing a change
|
||||
|
||||
dbt is Apache 2.0-licensed open source software. dbt is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
|
||||
### Defining the problem
|
||||
|
||||
If you have an idea for a new feature or if you've discovered a bug in dbt, the first step is to open an issue. Please check the list of [open issues](https://github.com/fishtown-analytics/dbt/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The dbt maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
|
||||
|
||||
**Note:** All community-contributed Pull Requests _must_ be associated with an open issue. If you submit a Pull Request that does not pertain to an open issue, you will be asked to create an issue describing the problem before the Pull Request can be reviewed.
|
||||
|
||||
### Discussing the idea
|
||||
|
||||
After you open an issue, a dbt maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The dbt maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
|
||||
|
||||
### Submitting a change
|
||||
|
||||
If an issue is appropriately well scoped and describes a beneficial change to the dbt codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
|
||||
|
||||
The dbt maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/fishtown-analytics/dbt/contribute) page.
|
||||
|
||||
Here's a good workflow:
|
||||
- Comment on the open issue, expressing your interest in contributing the required code change
|
||||
- Outline your planned implementation. If you want help getting started, ask!
|
||||
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the dbt maintainers will work with you to review your code.
|
||||
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding dbt's [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
|
||||
|
||||
In some cases, the right resolution to an open issue might be tangential to the dbt codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the dbt maintainers are unwilling or unable to incorporate into the dbt codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the dbt repository, the issue will be tagged with the `wontfix` label (see below) and closed.
|
||||
|
||||
### Using issue labels
|
||||
|
||||
The dbt maintainers use labels to categorize open issues. Some labels indicate the databases impacted by the issue, while others describe the domain in the dbt codebase germane to the discussion. While most of these labels are self-explanatory (eg. `snowflake` or `bigquery`), there are others that are worth describing.
|
||||
|
||||
| tag | description |
|
||||
| --- | ----------- |
|
||||
| [triage](https://github.com/fishtown-analytics/dbt/labels/triage) | This is a new issue which has not yet been reviewed by a dbt maintainer. This label is removed when a maintainer reviews and responds to the issue. |
|
||||
| [bug](https://github.com/fishtown-analytics/dbt/labels/bug) | This issue represents a defect or regression in dbt |
|
||||
| [enhancement](https://github.com/fishtown-analytics/dbt/labels/enhancement) | This issue represents net-new functionality in dbt |
|
||||
| [good first issue](https://github.com/fishtown-analytics/dbt/labels/good%20first%20issue) | This issue does not require deep knowledge of the dbt codebase to implement. This issue is appropriate for a first-time contributor. |
|
||||
| [help wanted](https://github.com/fishtown-analytics/dbt/labels/help%20wanted) / [discussion](https://github.com/fishtown-analytics/dbt/labels/discussion) | Conversation around this issue in ongoing, and there isn't yet a clear path forward. Input from community members is most welcome. |
|
||||
| [duplicate](https://github.com/fishtown-analytics/dbt/issues/duplicate) | This issue is functionally identical to another open issue. The dbt maintainers will close this issue and encourage community members to focus conversation on the other one. |
|
||||
| [snoozed](https://github.com/fishtown-analytics/dbt/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The dbt maintainers will revist these issues periodically and re-prioritize them accordingly. |
|
||||
| [stale](https://github.com/fishtown-analytics/dbt/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by dbt maintainers, but they can be re-opened if the discussion is restarted. |
|
||||
| [wontfix](https://github.com/fishtown-analytics/dbt/labels/wontfix) | This issue does not require a code change in the dbt repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
|
||||
|
||||
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead. The sole exception is Postgres; the `dbt-postgres` plugin lives in this repository (`dbt-core`).
|
||||
- **CLA:** Please note that anyone contributing code to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements). If you are unable to sign the CLA, the `dbt-core` maintainers will unfortunately be unable to merge any of your Pull Requests. We welcome you to participate in discussions, open issues, and comment on existing ones.
|
||||
- **Branches:** All pull requests from community contributors should target the `main` branch (default). If the change is needed as a patch for a minor version of dbt that has already been released (or is already a release candidate), a maintainer will backport the changes in your PR to the relevant "latest" release branch (`1.0.latest`, `1.1.latest`, ...). If an issue fix applies to a release branch, that fix should be first committed to the development branch and then to the release branch (rarely release-branch fixes may not apply to `main`).
|
||||
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via pip, homebrew, and dbt Cloud.
|
||||
|
||||
## Getting the code
|
||||
|
||||
### Installing git
|
||||
|
||||
You will need `git` in order to download and modify the `dbt-core` source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
|
||||
You will need `git` in order to download and modify the dbt source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
|
||||
|
||||
### External contributors
|
||||
|
||||
If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt-core` by forking the `dbt-core` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
|
||||
If you are not a member of the `fishtown-analytics` GitHub organization, you can contribute to dbt by forking the dbt repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
|
||||
|
||||
1. Fork the `dbt-core` repository
|
||||
2. Clone your fork locally
|
||||
3. Check out a new branch for your proposed changes
|
||||
4. Push changes to your fork
|
||||
5. Open a pull request against `dbt-labs/dbt-core` from your forked repository
|
||||
1. fork the dbt repository
|
||||
2. clone your fork locally
|
||||
3. check out a new branch for your proposed changes
|
||||
4. push changes to your fork
|
||||
5. open a pull request against `fishtown-analytics/dbt` from your forked repository
|
||||
|
||||
### dbt Labs contributors
|
||||
### Core contributors
|
||||
|
||||
If you are a member of the `fishtown-analytics` GitHub organization, you will have push access to the dbt repo. Rather than
|
||||
forking dbt to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
|
||||
|
||||
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch. Branch names should be fixed by `CT-XXX/` where:
|
||||
* CT stands for 'core team'
|
||||
* XXX stands for a JIRA ticket number
|
||||
|
||||
## Setting up an environment
|
||||
|
||||
There are some tools that will be helpful to you in developing locally. While this is the list relevant for `dbt-core` development, many of these tools are used commonly across open-source python projects.
|
||||
There are some tools that will be helpful to you in developing locally. While this is the list relevant for dbt development, many of these tools are used commonly across open-source python projects.
|
||||
|
||||
### Tools
|
||||
|
||||
These are the tools used in `dbt-core` development and testing:
|
||||
A short list of tools used in dbt testing that will be helpful to your understanding:
|
||||
|
||||
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.8, 3.9, 3.10 and 3.11
|
||||
- [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests
|
||||
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
|
||||
- [`black`](https://github.com/psf/black) for code formatting
|
||||
- [`mypy`](https://mypy.readthedocs.io/en/stable/) for static type checking
|
||||
- [`pre-commit`](https://pre-commit.com) to easily run those checks
|
||||
- [`changie`](https://changie.dev/) to create changelog entries, without merge conflicts
|
||||
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) to run multiple setup or test steps in combination. Don't worry too much, nobody _really_ understands how `make` works, and our Makefile aims to be super simple.
|
||||
- [GitHub Actions](https://github.com/features/actions) for automating tests and checks, once a PR is pushed to the `dbt-core` repository
|
||||
- [virtualenv](https://virtualenv.pypa.io/en/stable/) to manage dependencies
|
||||
- [tox](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions
|
||||
- [pytest](https://docs.pytest.org/en/latest/) to discover/run tests
|
||||
- [make](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) - but don't worry too much, nobody _really_ understands how make works and our Makefile is super simple
|
||||
- [flake8](https://gitlab.com/pycqa/flake8) for code linting
|
||||
- [CircleCI](https://circleci.com/product/) and [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/)
|
||||
|
||||
A deep understanding of these tools in not required to effectively contribute to `dbt-core`, but we recommend checking out the attached documentation if you're interested in learning more about each one.
|
||||
A deep understanding of these tools in not required to effectively contribute to dbt, but we recommend checking out the attached documentation if you're interested in learning more about them.
|
||||
|
||||
#### Virtual environments
|
||||
#### virtual environments
|
||||
|
||||
We strongly recommend using virtual environments when developing code in `dbt-core`. We recommend creating this virtualenv
|
||||
in the root of the `dbt-core` repository. To create a new virtualenv, run:
|
||||
```sh
|
||||
We strongly recommend using virtual environments when developing code in dbt. We recommend creating this virtualenv
|
||||
in the root of the dbt repository. To create a new virtualenv, run:
|
||||
```
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
```
|
||||
|
||||
This will create and activate a new Python virtual environment.
|
||||
|
||||
#### Docker and `docker-compose`
|
||||
#### docker and docker-compose
|
||||
|
||||
Docker and `docker-compose` are both used in testing. Specific instructions for you OS can be found [here](https://docs.docker.com/get-docker/).
|
||||
Docker and docker-compose are both used in testing. For macOS, the easiest thing to do is to [download docker for mac](https://store.docker.com/editions/community/docker-ce-desktop-mac). You'll need to make an account. On Linux, you can use one of the packages [here](https://docs.docker.com/install/#server). We recommend installing from docker.com instead of from your package manager. On Linux you also have to install docker-compose separately, following [these instructions](https://docs.docker.com/compose/install/#install-compose).
|
||||
|
||||
|
||||
#### Postgres (optional)
|
||||
#### postgres (optional)
|
||||
|
||||
For testing, and later in the examples in this document, you may want to have `psql` available so you can poke around in the database and see what happened. We recommend that you use [homebrew](https://brew.sh/) for that on macOS, and your package manager on Linux. You can install any version of the postgres client that you'd like. On macOS, with homebrew setup, you can run:
|
||||
|
||||
```sh
|
||||
```
|
||||
brew install postgresql
|
||||
```
|
||||
|
||||
## Running `dbt-core` in development
|
||||
## Running dbt in development
|
||||
|
||||
### Installation
|
||||
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies):
|
||||
First make sure that you set up your `virtualenv` as described in section _Setting up an environment_. Next, install dbt (and its dependencies) with:
|
||||
|
||||
```sh
|
||||
make dev
|
||||
```
|
||||
or, alternatively:
|
||||
```sh
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
pre-commit install
|
||||
pip install -r editable_requirements.txt
|
||||
```
|
||||
|
||||
When installed in this way, any changes you make to your local copy of the source code will be reflected immediately in your next `dbt` run.
|
||||
When dbt is installed from source in this way, any changes you make to the dbt source code will be reflected immediately in your next `dbt` run.
|
||||
|
||||
### Running `dbt-core`
|
||||
### Running dbt
|
||||
|
||||
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
|
||||
|
||||
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local Postgres instance, or a specific test sandbox within your data warehouse if appropriate. Make sure to create a profile before running integration tests.
|
||||
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local postgres instance, or a specific test sandbox within your data warehouse if appropriate.
|
||||
|
||||
## Testing
|
||||
|
||||
Once you're able to manually test that your code change is working as expected, it's important to run existing automated tests, as well as adding some new ones. These tests will ensure that:
|
||||
- Your code changes do not unexpectedly break other established functionality
|
||||
- Your code changes can handle all known edge cases
|
||||
- The functionality you're adding will _keep_ working in the future
|
||||
Getting the dbt integration tests set up in your local environment will be very helpful as you start to make changes to your local version of dbt. The section that follows outlines some helpful tips for setting up the test environment.
|
||||
|
||||
Although `dbt-core` works with a number of different databases, you won't need to supply credentials for every one of these databases in your test environment. Instead, you can test most `dbt-core` code changes with Python and Postgres.
|
||||
### Running tests via Docker
|
||||
|
||||
### Initial setup
|
||||
dbt's unit and integration tests run in Docker. Because dbt works with a number of different databases, you will need to supply credentials for one or more of these databases in your test environment. Most organizations don't have access to each of a BigQuery, Redshift, Snowflake, and Postgres database, so it's likely that you will be unable to run every integration test locally. Fortunately, Fishtown Analytics provides a CI environment with access to sandboxed Redshift, Snowflake, BigQuery, and Postgres databases. See the section on [_Submitting a Pull Request_](#submitting-a-pull-request) below for more information on this CI setup.
|
||||
|
||||
Postgres offers the easiest way to test most `dbt-core` functionality today. They are the fastest to run, and the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
|
||||
|
||||
```sh
|
||||
make setup-db
|
||||
### Specifying your test credentials
|
||||
|
||||
dbt uses test credentials specified in a `test.env` file in the root of the repository. This `test.env` file is git-ignored, but please be _extra_ careful to never check in credentials or other sensitive information when developing against dbt. To create your `test.env` file, copy the provided sample file, then supply your relevant credentials:
|
||||
|
||||
```
|
||||
cp test.env.sample test.env
|
||||
atom test.env # supply your credentials
|
||||
```
|
||||
|
||||
We recommend starting with dbt's Postgres tests. These tests cover most of the functionality in dbt, are the fastest to run, and are the easiest to set up. dbt's test suite runs Postgres in a Docker container, so no setup should be required to run these tests.
|
||||
|
||||
If you additionally want to test Snowflake, Bigquery, or Redshift, locally you'll need to get credentials and add them to the `test.env` file. In general, it's most important to have successful unit and Postgres tests. Once you open a PR, dbt will automatically run integration tests for the other three core database adapters. Of course, if you are a BigQuery user, contributing a BigQuery-only feature, it's important to run BigQuery tests as well.
|
||||
|
||||
### Test commands
|
||||
|
||||
dbt's unit tests and Python linter can be run with:
|
||||
|
||||
```
|
||||
make test-unit
|
||||
```
|
||||
|
||||
To run the Postgres + Python 3.6 integration tests, you'll have to do one extra step of setting up the test database:
|
||||
|
||||
```
|
||||
or, alternatively:
|
||||
```sh
|
||||
docker-compose up -d database
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
```
|
||||
|
||||
### Test commands
|
||||
To run a quick test for Python3 integration tests on Postgres, you can run:
|
||||
|
||||
There are a few methods for running tests locally.
|
||||
|
||||
#### Makefile
|
||||
|
||||
There are multiple targets in the Makefile to run common test suites and code
|
||||
checks, most notably:
|
||||
|
||||
```sh
|
||||
# Runs unit tests with py38 and code checks in parallel.
|
||||
make test
|
||||
# Runs postgres integration tests with py38 in "fail fast" mode.
|
||||
make integration
|
||||
```
|
||||
> These make targets assume you have a local installation of a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) for unit/integration testing and pre-commit for code quality checks,
|
||||
> unless you use choose a Docker container to run tests. Run `make help` for more info.
|
||||
|
||||
Check out the other targets in the Makefile to see other commonly used test
|
||||
suites.
|
||||
|
||||
#### `pre-commit`
|
||||
[`pre-commit`](https://pre-commit.com) takes care of running all code-checks for formatting and linting. Run `make dev` to install `pre-commit` in your local environment (we recommend running this command with a python virtual environment active). This command installs several pip executables including black, mypy, and flake8. Once this is done you can use any of the linter-based make targets as well as a git pre-commit hook that will ensure proper formatting and linting.
|
||||
|
||||
#### `tox`
|
||||
|
||||
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py38`. The configuration for these tests in located in `tox.ini`.
|
||||
|
||||
#### `pytest`
|
||||
|
||||
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. With a virtualenv active and dev dependencies installed you can do things like:
|
||||
|
||||
```sh
|
||||
# run all unit tests in a file
|
||||
python3 -m pytest tests/unit/test_graph.py
|
||||
# run a specific unit test
|
||||
python3 -m pytest tests/unit/test_graph.py::GraphTest::test__dependency_list
|
||||
# run specific Postgres functional tests
|
||||
python3 -m pytest tests/functional/sources
|
||||
make test-quick
|
||||
```
|
||||
|
||||
> See [pytest usage docs](https://docs.pytest.org/en/6.2.x/usage.html) for an overview of useful command-line options.
|
||||
To run tests for a specific database, invoke `tox` directly with the required flags:
|
||||
```
|
||||
# Run Postgres py36 tests
|
||||
docker-compose run test tox -e integration-postgres-py36 -- -x
|
||||
|
||||
### Unit, Integration, Functional?
|
||||
# Run Snowflake py36 tests
|
||||
docker-compose run test tox -e integration-snowflake-py36 -- -x
|
||||
|
||||
Here are some general rules for adding tests:
|
||||
* unit tests (`tests/unit`) don’t need to access a database; "pure Python" tests should be written as unit tests
|
||||
* functional tests (`tests/functional`) cover anything that interacts with a database, namely adapter
|
||||
# Run BigQuery py36 tests
|
||||
docker-compose run test tox -e integration-bigquery-py36 -- -x
|
||||
|
||||
## Debugging
|
||||
|
||||
1. The logs for a `dbt run` have stack traces and other information for debugging errors (in `logs/dbt.log` in your project directory).
|
||||
2. Try using a debugger, like `ipdb`. For pytest: `--pdb --pdbcls=IPython.terminal.debugger:pdb`
|
||||
3. Sometimes, it’s easier to debug on a single thread: `dbt --single-threaded run`
|
||||
4. To make print statements from Jinja macros: `{{ log(msg, info=true) }}`
|
||||
5. You can also add `{{ debug() }}` statements, which will drop you into some auto-generated code that the macro wrote.
|
||||
6. The dbt “artifacts” are written out to the ‘target’ directory of your dbt project. They are in unformatted json, which can be hard to read. Format them with:
|
||||
> python -m json.tool target/run_results.json > run_results.json
|
||||
|
||||
### Assorted development tips
|
||||
* Append `# type: ignore` to the end of a line if you need to disable `mypy` on that line.
|
||||
* Sometimes flake8 complains about lines that are actually fine, in which case you can put a comment on the line such as: # noqa or # noqa: ANNN, where ANNN is the error code that flake8 issues.
|
||||
* To collect output for `CProfile`, run dbt with the `-r` option and the name of an output file, i.e. `dbt -r dbt.cprof run`. If you just want to profile parsing, you can do: `dbt -r dbt.cprof parse`. `pip` install `snakeviz` to view the output. Run `snakeviz dbt.cprof` and output will be rendered in a browser window.
|
||||
|
||||
## Adding or modifying a CHANGELOG Entry
|
||||
|
||||
We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost.
|
||||
|
||||
Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system.
|
||||
|
||||
Once changie is installed and your PR is created for a new feature, simply run the following command and changie will walk you through the process of creating a changelog entry:
|
||||
|
||||
```shell
|
||||
changie new
|
||||
# Run Redshift py36 tests
|
||||
docker-compose run test tox -e integration-redshift-py36 -- -x
|
||||
```
|
||||
|
||||
Commit the file that's created and your changelog entry is complete!
|
||||
To run a specific test by itself:
|
||||
```
|
||||
docker-compose run test tox -e explicit-py36 -- -s -x -m profile_{adapter} {path_to_test_file_or_folder}
|
||||
```
|
||||
E.g.
|
||||
```
|
||||
docker-compose run test tox -e explicit-py36 -- -s -x -m profile_snowflake test/integration/001_simple_copy_test
|
||||
```
|
||||
|
||||
If you are contributing to a feature already in progress, you will modify the changie yaml file in dbt/.changes/unreleased/ related to your change. If you need help finding this file, please ask within the discussion for the pull request!
|
||||
|
||||
You don't need to worry about which `dbt-core` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-core`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-core`.
|
||||
See the `Makefile` contents for more some other examples of ways to run `tox`.
|
||||
|
||||
## Submitting a Pull Request
|
||||
|
||||
Code can be merged into the current development branch `main` by opening a pull request. A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
Fishtown Analytics provides a sandboxed Redshift, Snowflake, and BigQuery database for use in a CI environment. When pull requests are submitted to the `fishtown-analytics/dbt` repo, GitHub will trigger automated tests in CircleCI and Azure Pipelines.
|
||||
|
||||
Automated tests run via GitHub Actions. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-core` repository trigger integration tests against Postgres. dbt Labs also provides CI environments in which to test changes to other adapters, triggered by PRs in those adapters' repositories, as well as periodic maintenance checks of each adapter in concert with the latest `dbt-core` code changes.
|
||||
A dbt maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
|
||||
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
Sometimes, the content license agreement auto-check bot doesn't find a user's entry in its roster. If you need to force a rerun, add `@cla-bot check` in a comment on the pull request.
|
||||
Once all tests are passing and your PR has been approved, a dbt maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
51
Dockerfile
Normal file
51
Dockerfile
Normal file
@@ -0,0 +1,51 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
netcat postgresql curl git ssh software-properties-common \
|
||||
make build-essential ca-certificates libpq-dev \
|
||||
libsasl2-dev libsasl2-2 libsasl2-modules-gssapi-mit libyaml-dev \
|
||||
&& \
|
||||
add-apt-repository ppa:deadsnakes/ppa && \
|
||||
apt-get install -y \
|
||||
python python-dev python-pip \
|
||||
python3.6 python3.6-dev python3-pip python3.6-venv \
|
||||
python3.7 python3.7-dev python3.7-venv \
|
||||
python3.8 python3.8-dev python3.8-venv \
|
||||
python3.9 python3.9-dev python3.9-venv && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
ARG DOCKERIZE_VERSION=v0.6.1
|
||||
RUN curl -LO https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && \
|
||||
tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && \
|
||||
rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
|
||||
|
||||
RUN pip3 install -U "tox==3.14.4" wheel "six>=1.14.0,<1.15.0" "virtualenv==20.0.3" setuptools
|
||||
# tox fails if the 'python' interpreter (python2) doesn't have `tox` installed
|
||||
RUN pip install -U "tox==3.14.4" "six>=1.14.0,<1.15.0" "virtualenv==20.0.3" setuptools
|
||||
|
||||
# These args are passed in via docker-compose, which reads then from the .env file.
|
||||
# On Linux, run `make .env` to create the .env file for the current user.
|
||||
# On MacOS and Windows, these can stay unset.
|
||||
ARG USER_ID
|
||||
ARG GROUP_ID
|
||||
|
||||
RUN if [ ${USER_ID:-0} -ne 0 ] && [ ${GROUP_ID:-0} -ne 0 ]; then \
|
||||
groupadd -g ${GROUP_ID} dbt_test_user && \
|
||||
useradd -m -l -u ${USER_ID} -g ${GROUP_ID} dbt_test_user; \
|
||||
else \
|
||||
useradd -mU -l dbt_test_user; \
|
||||
fi
|
||||
RUN mkdir /usr/app && chown dbt_test_user /usr/app
|
||||
RUN mkdir /home/tox && chown dbt_test_user /home/tox
|
||||
|
||||
WORKDIR /usr/app
|
||||
VOLUME /usr/app
|
||||
|
||||
USER dbt_test_user
|
||||
|
||||
ENV PYTHONIOENCODING=utf-8
|
||||
ENV LANG C.UTF-8
|
||||
@@ -1,79 +0,0 @@
|
||||
##
|
||||
# This dockerfile is used for local development and adapter testing only.
|
||||
# See `/docker` for a generic and production-ready docker file
|
||||
##
|
||||
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
software-properties-common gpg-agent \
|
||||
&& add-apt-repository ppa:git-core/ppa -y \
|
||||
&& apt-get dist-upgrade -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
netcat \
|
||||
postgresql \
|
||||
curl \
|
||||
git \
|
||||
ssh \
|
||||
software-properties-common \
|
||||
make \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
libpq-dev \
|
||||
libsasl2-dev \
|
||||
libsasl2-2 \
|
||||
libsasl2-modules-gssapi-mit \
|
||||
libyaml-dev \
|
||||
unixodbc-dev \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get install -y \
|
||||
python-is-python3 \
|
||||
python-dev-is-python3 \
|
||||
python3-pip \
|
||||
python3.8 \
|
||||
python3.8-dev \
|
||||
python3.8-venv \
|
||||
python3.9 \
|
||||
python3.9-dev \
|
||||
python3.9-venv \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3.10-venv \
|
||||
python3.11 \
|
||||
python3.11-dev \
|
||||
python3.11-venv \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
ARG DOCKERIZE_VERSION=v0.6.1
|
||||
RUN curl -LO https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
|
||||
&& tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
|
||||
&& rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
|
||||
|
||||
RUN pip3 install -U tox wheel six setuptools
|
||||
|
||||
# These args are passed in via docker-compose, which reads then from the .env file.
|
||||
# On Linux, run `make .env` to create the .env file for the current user.
|
||||
# On MacOS and Windows, these can stay unset.
|
||||
ARG USER_ID
|
||||
ARG GROUP_ID
|
||||
|
||||
RUN if [ ${USER_ID:-0} -ne 0 ] && [ ${GROUP_ID:-0} -ne 0 ]; then \
|
||||
groupadd -g ${GROUP_ID} dbt_test_user && \
|
||||
useradd -m -l -u ${USER_ID} -g ${GROUP_ID} dbt_test_user; \
|
||||
else \
|
||||
useradd -mU -l dbt_test_user; \
|
||||
fi
|
||||
RUN mkdir /usr/app && chown dbt_test_user /usr/app
|
||||
RUN mkdir /home/tox && chown dbt_test_user /home/tox
|
||||
|
||||
WORKDIR /usr/app
|
||||
VOLUME /usr/app
|
||||
|
||||
USER dbt_test_user
|
||||
|
||||
ENV PYTHONIOENCODING=utf-8
|
||||
ENV LANG C.UTF-8
|
||||
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2021 dbt Labs, Inc.
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
161
Makefile
161
Makefile
@@ -1,141 +1,44 @@
|
||||
.DEFAULT_GOAL:=help
|
||||
.PHONY: install test test-unit test-integration
|
||||
|
||||
# Optional flag to run target in a docker container.
|
||||
# (example `make test USE_DOCKER=true`)
|
||||
ifeq ($(USE_DOCKER),true)
|
||||
DOCKER_CMD := docker-compose run --rm test
|
||||
endif
|
||||
changed_tests := `git status --porcelain | grep '^\(M\| M\|A\| A\)' | awk '{ print $$2 }' | grep '\/test_[a-zA-Z_\-\.]\+.py'`
|
||||
|
||||
#
|
||||
# To override CI_flags, create a file at this repo's root dir named `makefile.test.env`. Fill it
|
||||
# with any ENV_VAR overrides required by your test environment, e.g.
|
||||
# DBT_TEST_USER_1=user
|
||||
# LOG_DIR="dir with a space in it"
|
||||
#
|
||||
# Warn: Restrict each line to one variable only.
|
||||
#
|
||||
ifeq (./makefile.test.env,$(wildcard ./makefile.test.env))
|
||||
include ./makefile.test.env
|
||||
endif
|
||||
install:
|
||||
pip install -e .
|
||||
|
||||
CI_FLAGS =\
|
||||
DBT_TEST_USER_1=$(if $(DBT_TEST_USER_1),$(DBT_TEST_USER_1),dbt_test_user_1)\
|
||||
DBT_TEST_USER_2=$(if $(DBT_TEST_USER_2),$(DBT_TEST_USER_2),dbt_test_user_2)\
|
||||
DBT_TEST_USER_3=$(if $(DBT_TEST_USER_3),$(DBT_TEST_USER_3),dbt_test_user_3)\
|
||||
RUSTFLAGS=$(if $(RUSTFLAGS),$(RUSTFLAGS),"-D warnings")\
|
||||
LOG_DIR=$(if $(LOG_DIR),$(LOG_DIR),./logs)\
|
||||
DBT_LOG_FORMAT=$(if $(DBT_LOG_FORMAT),$(DBT_LOG_FORMAT),json)
|
||||
test: .env
|
||||
@echo "Full test run starting..."
|
||||
@time docker-compose run test tox
|
||||
|
||||
test-unit: .env
|
||||
@echo "Unit test run starting..."
|
||||
@time docker-compose run test tox -e unit-py36,flake8
|
||||
|
||||
.PHONY: dev_req
|
||||
dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
|
||||
@\
|
||||
pip install -r dev-requirements.txt
|
||||
pip install -r editable-requirements.txt
|
||||
test-integration: .env
|
||||
@echo "Integration test run starting..."
|
||||
@time docker-compose run test tox -e integration-postgres-py36,integration-redshift-py36,integration-snowflake-py36,integration-bigquery-py36
|
||||
|
||||
.PHONY: dev
|
||||
dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
|
||||
@\
|
||||
pre-commit install
|
||||
|
||||
.PHONY: proto_types
|
||||
proto_types: ## generates google protobuf python file from types.proto
|
||||
protoc -I=./core/dbt/events --python_out=./core/dbt/events ./core/dbt/events/types.proto
|
||||
|
||||
.PHONY: mypy
|
||||
mypy: .env ## Runs mypy against staged changes for static type checking.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual mypy-check | grep -v "INFO"
|
||||
|
||||
.PHONY: flake8
|
||||
flake8: .env ## Runs flake8 against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual flake8-check | grep -v "INFO"
|
||||
|
||||
.PHONY: black
|
||||
black: .env ## Runs black against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual black-check -v | grep -v "INFO"
|
||||
|
||||
.PHONY: lint
|
||||
lint: .env ## Runs flake8 and mypy code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: unit
|
||||
unit: .env ## Runs unit tests with py
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py
|
||||
|
||||
.PHONY: test
|
||||
test: .env ## Runs unit tests with py and code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py; \
|
||||
$(DOCKER_CMD) pre-commit run black-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: integration
|
||||
integration: .env ## Runs postgres integration tests with py-integration
|
||||
@\
|
||||
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto
|
||||
|
||||
.PHONY: integration-fail-fast
|
||||
integration-fail-fast: .env ## Runs postgres integration tests with py-integration in "fail fast" mode.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py-integration -- -x -nauto
|
||||
|
||||
.PHONY: interop
|
||||
interop: clean
|
||||
@\
|
||||
mkdir $(LOG_DIR) && \
|
||||
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto && \
|
||||
LOG_DIR=$(LOG_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
|
||||
|
||||
.PHONY: setup-db
|
||||
setup-db: ## Setup Postgres database with docker-compose for system testing.
|
||||
@\
|
||||
docker-compose up -d database && \
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
test-quick: .env
|
||||
@echo "Integration test run starting..."
|
||||
@time docker-compose run test tox -e integration-postgres-py36 -- -x
|
||||
|
||||
# This rule creates a file named .env that is used by docker-compose for passing
|
||||
# the USER_ID and GROUP_ID arguments to the Docker image.
|
||||
.env: ## Setup step for using using docker-compose with make target.
|
||||
@touch .env
|
||||
ifneq ($(OS),Windows_NT)
|
||||
ifneq ($(shell uname -s), Darwin)
|
||||
.env:
|
||||
@echo USER_ID=$(shell id -u) > .env
|
||||
@echo GROUP_ID=$(shell id -g) >> .env
|
||||
endif
|
||||
endif
|
||||
@time docker-compose build
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Resets development environment.
|
||||
@echo 'cleaning repo...'
|
||||
@rm -f .coverage
|
||||
@rm -f .coverage.*
|
||||
@rm -rf .eggs/
|
||||
@rm -f .env
|
||||
@rm -rf .tox/
|
||||
@rm -rf build/
|
||||
@rm -rf dbt.egg-info/
|
||||
@rm -f dbt_project.yml
|
||||
@rm -rf dist/
|
||||
@rm -f htmlcov/*.{css,html,js,json,png}
|
||||
@rm -rf logs/
|
||||
@rm -rf target/
|
||||
@find . -type f -name '*.pyc' -delete
|
||||
@find . -type d -name '__pycache__' -depth -delete
|
||||
@echo 'done.'
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help message.
|
||||
@echo 'usage: make [target] [USE_DOCKER=true]'
|
||||
@echo
|
||||
@echo 'targets:'
|
||||
@grep -E '^[8+a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
@echo
|
||||
@echo 'options:'
|
||||
@echo 'use USE_DOCKER=true to run target in a docker container'
|
||||
clean:
|
||||
rm -f .coverage
|
||||
rm -rf .eggs/
|
||||
rm -f .env
|
||||
rm -rf .tox/
|
||||
rm -rf build/
|
||||
rm -rf dbt.egg-info/
|
||||
rm -f dbt_project.yml
|
||||
rm -rf dist/
|
||||
rm -f htmlcov/*.{css,html,js,json,png}
|
||||
rm -rf logs/
|
||||
rm -rf target/
|
||||
find . -type f -name '*.pyc' -delete
|
||||
find . -type d -name '__pycache__' -depth -delete
|
||||
|
||||
45
README.md
45
README.md
@@ -1,15 +1,28 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt-core/fa1ea14ddfb1d5ae319d5141844910dd53ab2834/etc/dbt-core.svg" alt="dbt logo" width="750"/>
|
||||
<img src="/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
<a href="https://codeclimate.com/github/fishtown-analytics/dbt">
|
||||
<img src="https://codeclimate.com/github/fishtown-analytics/dbt/badges/gpa.svg" alt="Code Climate"/>
|
||||
</a>
|
||||
<a href="https://circleci.com/gh/fishtown-analytics/dbt/tree/master">
|
||||
<img src="https://circleci.com/gh/fishtown-analytics/dbt/tree/master.svg?style=svg" alt="CircleCI" />
|
||||
</a>
|
||||
<a href="https://ci.appveyor.com/project/DrewBanin/dbt/branch/development">
|
||||
<img src="https://ci.appveyor.com/api/projects/status/v01rwd3q91jnwp9m/branch/development?svg=true" alt="AppVeyor" />
|
||||
</a>
|
||||
<a href="https://community.getdbt.com">
|
||||
<img src="https://community.getdbt.com/badge.svg" alt="Slack" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
**[dbt](https://www.getdbt.com/)** (data build tool) enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||

|
||||
dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
|
||||
|
||||

|
||||
|
||||
dbt can be used to [aggregate pageviews into sessions](https://github.com/fishtown-analytics/snowplow), calculate [ad spend ROI](https://github.com/fishtown-analytics/facebook-ads), or report on [email campaign performance](https://github.com/fishtown-analytics/mailchimp).
|
||||
|
||||
## Understanding dbt
|
||||
|
||||
@@ -17,22 +30,28 @@ Analysts using dbt can transform their data by simply writing select statements,
|
||||
|
||||
These select statements, or "models", form a dbt project. Models frequently build on top of one another – dbt makes it easy to [manage relationships](https://docs.getdbt.com/docs/ref) between models, and [visualize these relationships](https://docs.getdbt.com/docs/documentation), as well as assure the quality of your transformations through [testing](https://docs.getdbt.com/docs/testing).
|
||||
|
||||

|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
- [Install dbt](https://docs.getdbt.com/docs/get-started/installation)
|
||||
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
|
||||
- [Install dbt](https://docs.getdbt.com/docs/installation)
|
||||
- Read the [documentation](https://docs.getdbt.com/).
|
||||
- Productionize your dbt project with [dbt Cloud](https://www.getdbt.com)
|
||||
|
||||
## Join the dbt Community
|
||||
## Find out more
|
||||
|
||||
- Be part of the conversation in the [dbt Community Slack](http://community.getdbt.com/)
|
||||
- Read more on the [dbt Community Discourse](https://discourse.getdbt.com)
|
||||
- Check out the [Introduction to dbt](https://docs.getdbt.com/docs/introduction/).
|
||||
- Read the [dbt Viewpoint](https://docs.getdbt.com/docs/about/viewpoint/).
|
||||
|
||||
## Join thousands of analysts in the dbt community
|
||||
|
||||
- Join the [chat](http://community.getdbt.com/) on Slack.
|
||||
- Find community posts on [dbt Discourse](https://discourse.getdbt.com).
|
||||
|
||||
## Reporting bugs and contributing code
|
||||
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-core/issues/new)
|
||||
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md)
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/fishtown-analytics/dbt/issues/new).
|
||||
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](/CONTRIBUTING.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
92
RELEASE.md
Normal file
92
RELEASE.md
Normal file
@@ -0,0 +1,92 @@
|
||||
### Release Procedure :shipit:
|
||||
|
||||
#### Branching Strategy
|
||||
|
||||
dbt has three types of branches:
|
||||
|
||||
- **Trunks** track the latest release of a minor version of dbt. Historically, we used the `master` branch as the trunk. Each minor version release has a corresponding trunk. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of dbt.
|
||||
- **Release Branches** track a specific, not yet complete release of dbt. These releases are codenamed since we don't always know what their semantic version will be. Example: `dev/lucretia-mott` became `0.11.1`.
|
||||
- **Feature Branches** track individual features and fixes. On completion they should be merged into a release branch.
|
||||
|
||||
#### Git & PyPI
|
||||
|
||||
1. Update CHANGELOG.md with the most recent changes
|
||||
2. If this is a release candidate, you want to create it off of your release branch. If it's an actual release, you must first merge to a master branch. Open a Pull Request in Github to merge it into the appropriate trunk (`X.X.latest`)
|
||||
3. Bump the version using `bumpversion`:
|
||||
- Dry run first by running `bumpversion --new-version <desired-version> <part>` and checking the diff. If it looks correct, clean up the chanages and move on:
|
||||
- Alpha releases: `bumpversion --commit --no-tag --new-version 0.10.2a1 num`
|
||||
- Patch releases: `bumpversion --commit --no-tag --new-version 0.10.2 patch`
|
||||
- Minor releases: `bumpversion --commit --no-tag --new-version 0.11.0 minor`
|
||||
- Major releases: `bumpversion --commit --no-tag --new-version 1.0.0 major`
|
||||
4. (If this is a not a release candidate) Merge to `x.x.latest` and (optionally) `master`.
|
||||
5. Update the default branch to the next dev release branch.
|
||||
6. Build source distributions for all packages by running `./scripts/build-sdists.sh`. Note that this will clean out your `dist/` folder, so if you have important stuff in there, don't run it!!!
|
||||
7. Deploy to pypi
|
||||
- `twine upload dist/*`
|
||||
8. Deploy to homebrew (see below)
|
||||
9. Deploy to conda-forge (see below)
|
||||
10. Git release notes (points to changelog)
|
||||
11. Post to slack (point to changelog)
|
||||
|
||||
After releasing a new version, it's important to merge the changes back into the other outstanding release branches. This avoids merge conflicts moving forward.
|
||||
|
||||
In some cases, where the branches have diverged wildly, it's ok to skip this step. But this means that the changes you just released won't be included in future releases.
|
||||
|
||||
#### Homebrew Release Process
|
||||
|
||||
1. Clone the `homebrew-dbt` repository:
|
||||
|
||||
```
|
||||
git clone git@github.com:fishtown-analytics/homebrew-dbt.git
|
||||
```
|
||||
|
||||
2. For ALL releases (prereleases and version releases), copy the relevant formula. To copy from the latest version release of dbt, do:
|
||||
|
||||
```bash
|
||||
cp Formula/dbt.rb Formula/dbt@{NEW-VERSION}.rb
|
||||
```
|
||||
|
||||
To copy from a different version, simply copy the corresponding file.
|
||||
|
||||
3. Open the file, and edit the following:
|
||||
- the name of the ruby class: this is important, homebrew won't function properly if the class name is wrong. Check historical versions to figure out the right name.
|
||||
- under the `bottle` section, remove all of the hashes (lines starting with `sha256`)
|
||||
|
||||
4. Create a **Python 3.7** virtualenv, activate it, and then install two packages: `homebrew-pypi-poet`, and the version of dbt you are preparing. I use:
|
||||
|
||||
```
|
||||
pyenv virtualenv 3.7.0 homebrew-dbt-{VERSION}
|
||||
pyenv activate homebrew-dbt-{VERSION}
|
||||
pip install dbt=={VERSION} homebrew-pypi-poet
|
||||
```
|
||||
|
||||
homebrew-pypi-poet is a program that generates a valid homebrew formula for an installed pip package. You want to use it to generate a diff against the existing formula. Then you want to apply the diff for the dependency packages only -- e.g. it will tell you that `google-api-core` has been updated and that you need to use the latest version.
|
||||
|
||||
5. reinstall, test, and audit dbt. if the test or audit fails, fix the formula with step 1.
|
||||
|
||||
```bash
|
||||
brew uninstall --force Formula/{YOUR-FILE}.rb
|
||||
brew install Formula/{YOUR-FILE}.rb
|
||||
brew test dbt
|
||||
brew audit --strict dbt
|
||||
```
|
||||
|
||||
6. Ask Connor to bottle the change (only his laptop can do it!)
|
||||
|
||||
#### Conda Forge Release Process
|
||||
|
||||
1. Clone the fork of `conda-forge/dbt-feedstock` [here](https://github.com/fishtown-analytics/dbt-feedstock)
|
||||
```bash
|
||||
git clone git@github.com:fishtown-analytics/dbt-feedstock.git
|
||||
|
||||
```
|
||||
2. Update the version and sha256 in `recipe/meta.yml`. To calculate the sha256, run:
|
||||
|
||||
```bash
|
||||
wget https://github.com/fishtown-analytics/dbt/archive/v{version}.tar.gz
|
||||
openssl sha256 v{version}.tar.gz
|
||||
```
|
||||
|
||||
3. Push the changes and create a PR against `conda-forge/dbt-feedstock`
|
||||
|
||||
4. Confirm that all automated conda-forge tests are passing
|
||||
154
azure-pipelines.yml
Normal file
154
azure-pipelines.yml
Normal file
@@ -0,0 +1,154 @@
|
||||
# Python package
|
||||
# Create and test a Python package on multiple Python versions.
|
||||
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- dev/*
|
||||
- pr/*
|
||||
|
||||
jobs:
|
||||
- job: UnitTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-unit
|
||||
displayName: Run unit tests
|
||||
|
||||
- job: PostgresIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: UnitTest
|
||||
|
||||
steps:
|
||||
- pwsh: |
|
||||
$serviceName = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $serviceName -StartupType Automatic
|
||||
Start-Service -InputObject $serviceName
|
||||
|
||||
& $env:PGBIN\createdb.exe -U postgres dbt
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE root WITH PASSWORD 'password';"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE root WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CREATE, CONNECT ON DATABASE dbt TO root WITH GRANT OPTION;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE noaccess WITH PASSWORD 'password' NOSUPERUSER;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE noaccess WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CONNECT ON DATABASE dbt TO noaccess;"
|
||||
displayName: Install postgresql and set up database
|
||||
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-postgres
|
||||
displayName: Run integration tests
|
||||
|
||||
# These three are all similar except secure environment variables, which MUST be passed along to their tasks,
|
||||
# but there's probably a better way to do this!
|
||||
- job: SnowflakeIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: PostgresIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-snowflake
|
||||
env:
|
||||
SNOWFLAKE_TEST_ACCOUNT: $(SNOWFLAKE_TEST_ACCOUNT)
|
||||
SNOWFLAKE_TEST_PASSWORD: $(SNOWFLAKE_TEST_PASSWORD)
|
||||
SNOWFLAKE_TEST_USER: $(SNOWFLAKE_TEST_USER)
|
||||
SNOWFLAKE_TEST_WAREHOUSE: $(SNOWFLAKE_TEST_WAREHOUSE)
|
||||
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: $(SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN)
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: $(SNOWFLAKE_TEST_OAUTH_CLIENT_ID)
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: $(SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: BigQueryIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: PostgresIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
- script: python -m tox -e pywin-bigquery
|
||||
env:
|
||||
BIGQUERY_SERVICE_ACCOUNT_JSON: $(BIGQUERY_SERVICE_ACCOUNT_JSON)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: RedshiftIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: PostgresIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-redshift
|
||||
env:
|
||||
REDSHIFT_TEST_DBNAME: $(REDSHIFT_TEST_DBNAME)
|
||||
REDSHIFT_TEST_PASS: $(REDSHIFT_TEST_PASS)
|
||||
REDSHIFT_TEST_USER: $(REDSHIFT_TEST_USER)
|
||||
REDSHIFT_TEST_PORT: $(REDSHIFT_TEST_PORT)
|
||||
REDSHIFT_TEST_HOST: $(REDSHIFT_TEST_HOST)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: BuildWheel
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn:
|
||||
- UnitTest
|
||||
- PostgresIntegrationTest
|
||||
- RedshiftIntegrationTest
|
||||
- SnowflakeIntegrationTest
|
||||
- BigQueryIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
- script: python -m pip install --upgrade pip setuptools && python -m pip install -r requirements.txt && python -m pip install -r dev_requirements.txt
|
||||
displayName: Install dependencies
|
||||
- task: ShellScript@2
|
||||
inputs:
|
||||
scriptPath: scripts/build-wheels.sh
|
||||
- task: CopyFiles@2
|
||||
inputs:
|
||||
contents: 'dist\?(*.whl|*.tar.gz)'
|
||||
TargetFolder: '$(Build.ArtifactStagingDirectory)'
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
|
||||
artifactName: dists
|
||||
73
converter.py
Executable file
73
converter.py
Executable file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python
|
||||
import json
|
||||
import yaml
|
||||
import sys
|
||||
import argparse
|
||||
from datetime import datetime, timezone
|
||||
import dbt.clients.registry as registry
|
||||
|
||||
|
||||
def yaml_type(fname):
|
||||
with open(fname) as f:
|
||||
return yaml.load(f)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--project", type=yaml_type, default="dbt_project.yml")
|
||||
parser.add_argument("--namespace", required=True)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_full_name(args):
|
||||
return "{}/{}".format(args.namespace, args.project["name"])
|
||||
|
||||
|
||||
def init_project_in_packages(args, packages):
|
||||
full_name = get_full_name(args)
|
||||
if full_name not in packages:
|
||||
packages[full_name] = {
|
||||
"name": args.project["name"],
|
||||
"namespace": args.namespace,
|
||||
"latest": args.project["version"],
|
||||
"assets": {},
|
||||
"versions": {},
|
||||
}
|
||||
return packages[full_name]
|
||||
|
||||
|
||||
def add_version_to_package(args, project_json):
|
||||
project_json["versions"][args.project["version"]] = {
|
||||
"id": "{}/{}".format(get_full_name(args), args.project["version"]),
|
||||
"name": args.project["name"],
|
||||
"version": args.project["version"],
|
||||
"description": "",
|
||||
"published_at": datetime.now(timezone.utc).astimezone().isoformat(),
|
||||
"packages": args.project.get("packages") or [],
|
||||
"works_with": [],
|
||||
"_source": {
|
||||
"type": "github",
|
||||
"url": "",
|
||||
"readme": "",
|
||||
},
|
||||
"downloads": {
|
||||
"tarball": "",
|
||||
"format": "tgz",
|
||||
"sha1": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
packages = registry.packages()
|
||||
project_json = init_project_in_packages(args, packages)
|
||||
if args.project["version"] in project_json["versions"]:
|
||||
raise Exception("Version {} already in packages JSON"
|
||||
.format(args.project["version"]),
|
||||
file=sys.stderr)
|
||||
add_version_to_package(args, project_json)
|
||||
print(json.dumps(packages, indent=2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,2 +1 @@
|
||||
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore
|
||||
include dbt/py.typed
|
||||
recursive-include dbt/include *.py *.sql *.yml *.html *.md
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt-core/fa1ea14ddfb1d5ae319d5141844910dd53ab2834/etc/dbt-core.svg" alt="dbt logo" width="750"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||

|
||||
|
||||
## Understanding dbt
|
||||
|
||||
Analysts using dbt can transform their data by simply writing select statements, while dbt handles turning these statements into tables and views in a data warehouse.
|
||||
|
||||
These select statements, or "models", form a dbt project. Models frequently build on top of one another – dbt makes it easy to [manage relationships](https://docs.getdbt.com/docs/ref) between models, and [visualize these relationships](https://docs.getdbt.com/docs/documentation), as well as assure the quality of your transformations through [testing](https://docs.getdbt.com/docs/testing).
|
||||
|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
- [Install dbt](https://docs.getdbt.com/docs/installation)
|
||||
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
|
||||
|
||||
## Join the dbt Community
|
||||
|
||||
- Be part of the conversation in the [dbt Community Slack](http://community.getdbt.com/)
|
||||
- Read more on the [dbt Community Discourse](https://discourse.getdbt.com)
|
||||
|
||||
## Reporting bugs and contributing code
|
||||
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-core/issues/new)
|
||||
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Everyone interacting in the dbt project's codebases, issue trackers, chat rooms, and mailing lists is expected to follow the [dbt Code of Conduct](https://community.getdbt.com/code-of-conduct).
|
||||
@@ -1,60 +0,0 @@
|
||||
# core/dbt directory README
|
||||
|
||||
## The following are individual files in this directory.
|
||||
|
||||
### compilation.py
|
||||
|
||||
### constants.py
|
||||
|
||||
### dataclass_schema.py
|
||||
|
||||
### deprecations.py
|
||||
|
||||
### exceptions.py
|
||||
|
||||
### flags.py
|
||||
|
||||
### helper_types.py
|
||||
|
||||
### hooks.py
|
||||
|
||||
### lib.py
|
||||
|
||||
### links.py
|
||||
|
||||
### logger.py
|
||||
|
||||
### main.py
|
||||
|
||||
### node_types.py
|
||||
|
||||
### profiler.py
|
||||
|
||||
### selected_resources.py
|
||||
|
||||
### semver.py
|
||||
|
||||
### tracking.py
|
||||
|
||||
### ui.py
|
||||
|
||||
### utils.py
|
||||
|
||||
### version.py
|
||||
|
||||
|
||||
## The subdirectories will be documented in a README in the subdirectory
|
||||
* adapters
|
||||
* cli
|
||||
* clients
|
||||
* config
|
||||
* context
|
||||
* contracts
|
||||
* deps
|
||||
* docs
|
||||
* events
|
||||
* graph
|
||||
* include
|
||||
* parser
|
||||
* task
|
||||
* tests
|
||||
@@ -1,7 +0,0 @@
|
||||
# N.B.
|
||||
# This will add to the package’s __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
|
||||
# The matching statement is in plugins/postgres/dbt/__init__.py
|
||||
|
||||
from pkgutil import extend_path
|
||||
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
@@ -1,30 +0,0 @@
|
||||
# Adapters README
|
||||
|
||||
The Adapters module is responsible for defining database connection methods, caching information from databases, how relations are defined, and the two major connection types we have - base and sql.
|
||||
|
||||
# Directories
|
||||
|
||||
## `base`
|
||||
|
||||
Defines the base implementation Adapters can use to build out full functionality.
|
||||
|
||||
## `sql`
|
||||
|
||||
Defines a sql implementation for adapters that initially inherits the above base implementation and comes with some premade methods and macros that can be overwritten as needed per adapter. (most common type of adapter.)
|
||||
|
||||
# Files
|
||||
|
||||
## `cache.py`
|
||||
|
||||
Cached information from the database.
|
||||
|
||||
## `factory.py`
|
||||
Defines how we generate adapter objects
|
||||
|
||||
## `protocol.py`
|
||||
|
||||
Defines various interfaces for various adapter objects. Helps mypy correctly resolve methods.
|
||||
|
||||
## `reference_keys.py`
|
||||
|
||||
Configures naming scheme for cache elements to be universal.
|
||||
@@ -1,7 +0,0 @@
|
||||
# N.B.
|
||||
# This will add to the package’s __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
|
||||
# The matching statement is in plugins/postgres/dbt/adapters/__init__.py
|
||||
|
||||
from pkgutil import extend_path
|
||||
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
@@ -1,10 +0,0 @@
|
||||
|
||||
## Base adapters
|
||||
|
||||
### impl.py
|
||||
|
||||
The class `SQLAdapter` in [base/imply.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/adapters/base/impl.py) is a (mostly) abstract object that adapter objects inherit from. The base class scaffolds out methods that every adapter project usually should implement for smooth communication between dbt and database.
|
||||
|
||||
Some target databases require more or fewer methods--it all depends on what the warehouse's featureset is.
|
||||
|
||||
Look into the class for function-level comments.
|
||||
@@ -1,19 +1,14 @@
|
||||
# these are all just exports, #noqa them so flake8 will be happy
|
||||
|
||||
# TODO: Should we still include this in the `adapters` namespace?
|
||||
from dbt.contracts.connection import Credentials # noqa: F401
|
||||
from dbt.adapters.base.meta import available # noqa: F401
|
||||
from dbt.adapters.base.connections import BaseConnectionManager # noqa: F401
|
||||
from dbt.adapters.base.relation import ( # noqa: F401
|
||||
from dbt.contracts.connection import Credentials # noqa
|
||||
from dbt.adapters.base.meta import available # noqa
|
||||
from dbt.adapters.base.connections import BaseConnectionManager # noqa
|
||||
from dbt.adapters.base.relation import ( # noqa
|
||||
BaseRelation,
|
||||
RelationType,
|
||||
SchemaSearchMap,
|
||||
)
|
||||
from dbt.adapters.base.column import Column # noqa: F401
|
||||
from dbt.adapters.base.impl import ( # noqa: F401
|
||||
AdapterConfig,
|
||||
BaseAdapter,
|
||||
PythonJobHelper,
|
||||
ConstraintSupport,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin # noqa: F401
|
||||
from dbt.adapters.base.column import Column # noqa
|
||||
from dbt.adapters.base.impl import AdapterConfig, BaseAdapter # noqa
|
||||
from dbt.adapters.base.plugin import AdapterPlugin # noqa
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
from dataclasses import dataclass
|
||||
import re
|
||||
from typing import Dict, ClassVar, Any, Optional
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
from hologram import JsonSchemaMixin
|
||||
from dbt.exceptions import RuntimeException
|
||||
|
||||
from typing import Dict, ClassVar, Any, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class Column:
|
||||
class Column(JsonSchemaMixin):
|
||||
TYPE_LABELS: ClassVar[Dict[str, str]] = {
|
||||
"STRING": "TEXT",
|
||||
"TIMESTAMP": "TIMESTAMP",
|
||||
"FLOAT": "FLOAT",
|
||||
"INTEGER": "INT",
|
||||
"BOOLEAN": "BOOLEAN",
|
||||
'STRING': 'TEXT',
|
||||
'TIMESTAMP': 'TIMESTAMP',
|
||||
'FLOAT': 'FLOAT',
|
||||
'INTEGER': 'INT'
|
||||
}
|
||||
column: str
|
||||
dtype: str
|
||||
@@ -25,7 +26,7 @@ class Column:
|
||||
return cls.TYPE_LABELS.get(dtype.upper(), dtype)
|
||||
|
||||
@classmethod
|
||||
def create(cls, name, label_or_dtype: str) -> "Column":
|
||||
def create(cls, name, label_or_dtype: str) -> 'Column':
|
||||
column_type = cls.translate_type(label_or_dtype)
|
||||
return cls(name, column_type)
|
||||
|
||||
@@ -40,14 +41,16 @@ class Column:
|
||||
@property
|
||||
def data_type(self) -> str:
|
||||
if self.is_string():
|
||||
return self.string_type(self.string_size())
|
||||
return Column.string_type(self.string_size())
|
||||
elif self.is_numeric():
|
||||
return self.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
|
||||
return Column.numeric_type(self.dtype, self.numeric_precision,
|
||||
self.numeric_scale)
|
||||
else:
|
||||
return self.dtype
|
||||
|
||||
def is_string(self) -> bool:
|
||||
return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
|
||||
return self.dtype.lower() in ['text', 'character varying', 'character',
|
||||
'varchar']
|
||||
|
||||
def is_number(self):
|
||||
return any([self.is_integer(), self.is_numeric(), self.is_float()])
|
||||
@@ -55,46 +58,33 @@ class Column:
|
||||
def is_float(self):
|
||||
return self.dtype.lower() in [
|
||||
# floats
|
||||
"real",
|
||||
"float4",
|
||||
"float",
|
||||
"double precision",
|
||||
"float8",
|
||||
"double",
|
||||
'real', 'float4', 'float', 'double precision', 'float8'
|
||||
]
|
||||
|
||||
def is_integer(self) -> bool:
|
||||
return self.dtype.lower() in [
|
||||
# real types
|
||||
"smallint",
|
||||
"integer",
|
||||
"bigint",
|
||||
"smallserial",
|
||||
"serial",
|
||||
"bigserial",
|
||||
'smallint', 'integer', 'bigint',
|
||||
'smallserial', 'serial', 'bigserial',
|
||||
# aliases
|
||||
"int2",
|
||||
"int4",
|
||||
"int8",
|
||||
"serial2",
|
||||
"serial4",
|
||||
"serial8",
|
||||
'int2', 'int4', 'int8',
|
||||
'serial2', 'serial4', 'serial8',
|
||||
]
|
||||
|
||||
def is_numeric(self) -> bool:
|
||||
return self.dtype.lower() in ["numeric", "decimal"]
|
||||
return self.dtype.lower() in ['numeric', 'decimal']
|
||||
|
||||
def string_size(self) -> int:
|
||||
if not self.is_string():
|
||||
raise DbtRuntimeError("Called string_size() on non-string field!")
|
||||
raise RuntimeException("Called string_size() on non-string field!")
|
||||
|
||||
if self.dtype == "text" or self.char_size is None:
|
||||
if self.dtype == 'text' or self.char_size is None:
|
||||
# char_size should never be None. Handle it reasonably just in case
|
||||
return 256
|
||||
else:
|
||||
return int(self.char_size)
|
||||
|
||||
def can_expand_to(self, other_column: "Column") -> bool:
|
||||
def can_expand_to(self, other_column: 'Column') -> bool:
|
||||
"""returns True if this column can be expanded to the size of the
|
||||
other column"""
|
||||
if not self.is_string() or not other_column.is_string():
|
||||
@@ -122,10 +112,12 @@ class Column:
|
||||
return "<Column {} ({})>".format(self.name, self.data_type)
|
||||
|
||||
@classmethod
|
||||
def from_description(cls, name: str, raw_data_type: str) -> "Column":
|
||||
match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
|
||||
def from_description(cls, name: str, raw_data_type: str) -> 'Column':
|
||||
match = re.match(r'([^(]+)(\([^)]+\))?', raw_data_type)
|
||||
if match is None:
|
||||
raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"')
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data type "{raw_data_type}"'
|
||||
)
|
||||
data_type, size_info = match.groups()
|
||||
char_size = None
|
||||
numeric_precision = None
|
||||
@@ -133,12 +125,12 @@ class Column:
|
||||
if size_info is not None:
|
||||
# strip out the parentheses
|
||||
size_info = size_info[1:-1]
|
||||
parts = size_info.split(",")
|
||||
parts = size_info.split(',')
|
||||
if len(parts) == 1:
|
||||
try:
|
||||
char_size = int(parts[0])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[0]}" to an integer'
|
||||
)
|
||||
@@ -146,16 +138,18 @@ class Column:
|
||||
try:
|
||||
numeric_precision = int(parts[0])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[0]}" to an integer'
|
||||
)
|
||||
try:
|
||||
numeric_scale = int(parts[1])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[1]}" to an integer'
|
||||
)
|
||||
|
||||
return cls(name, data_type, char_size, numeric_precision, numeric_scale)
|
||||
return cls(
|
||||
name, data_type, char_size, numeric_precision, numeric_scale
|
||||
)
|
||||
|
||||
@@ -1,59 +1,24 @@
|
||||
import abc
|
||||
import os
|
||||
from time import sleep
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
# multiprocessing.RLock is a function returning this type
|
||||
from multiprocessing.synchronize import RLock
|
||||
from threading import get_ident
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Tuple,
|
||||
Hashable,
|
||||
Optional,
|
||||
ContextManager,
|
||||
List,
|
||||
Type,
|
||||
Union,
|
||||
Iterable,
|
||||
Callable,
|
||||
Dict, Tuple, Hashable, Optional, ContextManager, List
|
||||
)
|
||||
|
||||
import agate
|
||||
|
||||
import dbt.exceptions
|
||||
from dbt.contracts.connection import (
|
||||
Connection,
|
||||
Identifier,
|
||||
ConnectionState,
|
||||
AdapterRequiredConfig,
|
||||
LazyHandle,
|
||||
AdapterResponse,
|
||||
Connection, Identifier, ConnectionState, AdapterRequiredConfig, LazyHandle
|
||||
)
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.adapters.base.query_headers import (
|
||||
MacroQueryStringSetter,
|
||||
)
|
||||
from dbt.events import AdapterLogger
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
NewConnection,
|
||||
ConnectionReused,
|
||||
ConnectionLeftOpenInCleanup,
|
||||
ConnectionLeftOpen,
|
||||
ConnectionClosedInCleanup,
|
||||
ConnectionClosed,
|
||||
Rollback,
|
||||
RollbackFailed,
|
||||
)
|
||||
from dbt.events.contextvars import get_node_info
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt import flags
|
||||
from dbt.utils import cast_to_str
|
||||
|
||||
SleepTime = Union[int, float] # As taken by time.sleep.
|
||||
AdapterHandle = Any # Adapter connection handle objects can be any class.
|
||||
|
||||
|
||||
class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
@@ -69,7 +34,6 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
You must also set the 'TYPE' class attribute with a class-unique constant
|
||||
string.
|
||||
"""
|
||||
|
||||
TYPE: str = NotImplemented
|
||||
|
||||
def __init__(self, profile: AdapterRequiredConfig):
|
||||
@@ -91,14 +55,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
key = self.get_thread_identifier()
|
||||
with self.lock:
|
||||
if key not in self.thread_connections:
|
||||
raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections))
|
||||
raise dbt.exceptions.InvalidConnectionException(
|
||||
key, list(self.thread_connections)
|
||||
)
|
||||
return self.thread_connections[key]
|
||||
|
||||
def set_thread_connection(self, conn: Connection) -> None:
|
||||
key = self.get_thread_identifier()
|
||||
if key in self.thread_connections:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
"In set_thread_connection, existing connection exists for {}"
|
||||
raise dbt.exceptions.InternalException(
|
||||
'In set_thread_connection, existing connection exists for {}'
|
||||
)
|
||||
self.thread_connections[key] = conn
|
||||
|
||||
@@ -137,148 +103,60 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
:return: A context manager that handles exceptions raised by the
|
||||
underlying database.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`exception_handler` is not implemented for this adapter!"
|
||||
)
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`exception_handler` is not implemented for this adapter!')
|
||||
|
||||
def set_connection_name(self, name: Optional[str] = None) -> Connection:
|
||||
"""Called by 'acquire_connection' in BaseAdapter, which is called by
|
||||
'connection_named', called by 'connection_for(node)'.
|
||||
Creates a connection for this thread if one doesn't already
|
||||
exist, and will rename an existing connection."""
|
||||
conn_name: str
|
||||
if name is None:
|
||||
# if a name isn't specified, we'll re-use a single handle
|
||||
# named 'master'
|
||||
conn_name = 'master'
|
||||
else:
|
||||
if not isinstance(name, str):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f'For connection name, got {name} - not a string!'
|
||||
)
|
||||
assert isinstance(name, str)
|
||||
conn_name = name
|
||||
|
||||
conn_name: str = "master" if name is None else name
|
||||
|
||||
# Get a connection for this thread
|
||||
conn = self.get_if_exists()
|
||||
|
||||
if conn and conn.name == conn_name and conn.state == "open":
|
||||
# Found a connection and nothing to do, so just return it
|
||||
return conn
|
||||
|
||||
if conn is None:
|
||||
# Create a new connection
|
||||
conn = Connection(
|
||||
type=Identifier(self.TYPE),
|
||||
name=conn_name,
|
||||
name=None,
|
||||
state=ConnectionState.INIT,
|
||||
transaction_open=False,
|
||||
handle=None,
|
||||
credentials=self.profile.credentials,
|
||||
credentials=self.profile.credentials
|
||||
)
|
||||
conn.handle = LazyHandle(self.open)
|
||||
# Add the connection to thread_connections for this thread
|
||||
self.set_thread_connection(conn)
|
||||
fire_event(
|
||||
NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
|
||||
)
|
||||
else: # existing connection either wasn't open or didn't have the right name
|
||||
if conn.state != "open":
|
||||
conn.handle = LazyHandle(self.open)
|
||||
if conn.name != conn_name:
|
||||
orig_conn_name: str = conn.name or ""
|
||||
conn.name = conn_name
|
||||
fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name))
|
||||
|
||||
return conn
|
||||
if conn.name == conn_name and conn.state == 'open':
|
||||
return conn
|
||||
|
||||
@classmethod
|
||||
def retry_connection(
|
||||
cls,
|
||||
connection: Connection,
|
||||
connect: Callable[[], AdapterHandle],
|
||||
logger: AdapterLogger,
|
||||
retryable_exceptions: Iterable[Type[Exception]],
|
||||
retry_limit: int = 1,
|
||||
retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1,
|
||||
_attempts: int = 0,
|
||||
) -> Connection:
|
||||
"""Given a Connection, set its handle by calling connect.
|
||||
|
||||
The calls to connect will be retried up to retry_limit times to deal with transient
|
||||
connection errors. By default, one retry will be attempted if retryable_exceptions is set.
|
||||
|
||||
:param Connection connection: An instance of a Connection that needs a handle to be set,
|
||||
usually when attempting to open it.
|
||||
:param connect: A callable that returns the appropiate connection handle for a
|
||||
given adapter. This callable will be retried retry_limit times if a subclass of any
|
||||
Exception in retryable_exceptions is raised by connect.
|
||||
:type connect: Callable[[], AdapterHandle]
|
||||
:param AdapterLogger logger: A logger to emit messages on retry attempts or errors. When
|
||||
handling expected errors, we call debug, and call warning on unexpected errors or when
|
||||
all retry attempts have been exhausted.
|
||||
:param retryable_exceptions: An iterable of exception classes that if raised by
|
||||
connect should trigger a retry.
|
||||
:type retryable_exceptions: Iterable[Type[Exception]]
|
||||
:param int retry_limit: How many times to retry the call to connect. If this limit
|
||||
is exceeded before a successful call, a FailedToConnectError will be raised.
|
||||
Must be non-negative.
|
||||
:param retry_timeout: Time to wait between attempts to connect. Can also take a
|
||||
Callable that takes the number of attempts so far, beginning at 0, and returns an int
|
||||
or float to be passed to time.sleep.
|
||||
:type retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1
|
||||
:param int _attempts: Parameter used to keep track of the number of attempts in calling the
|
||||
connect function across recursive calls. Passed as an argument to retry_timeout if it
|
||||
is a Callable. This parameter should not be set by the initial caller.
|
||||
:raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without
|
||||
successfully acquiring a handle.
|
||||
:return: The given connection with its appropriate state and handle attributes set
|
||||
depending on whether we successfully acquired a handle or not.
|
||||
"""
|
||||
timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout
|
||||
if timeout < 0:
|
||||
raise dbt.exceptions.FailedToConnectError(
|
||||
"retry_timeout cannot be negative or return a negative time."
|
||||
)
|
||||
|
||||
if retry_limit < 0 or retry_limit > sys.getrecursionlimit():
|
||||
# This guard is not perfect others may add to the recursion limit (e.g. built-ins).
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative")
|
||||
|
||||
try:
|
||||
connection.handle = connect()
|
||||
connection.state = ConnectionState.OPEN
|
||||
return connection
|
||||
|
||||
except tuple(retryable_exceptions) as e:
|
||||
if retry_limit <= 0:
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError(str(e))
|
||||
logger.debug(
|
||||
'Acquiring new {} connection "{}".'.format(self.TYPE, conn_name))
|
||||
|
||||
if conn.state == 'open':
|
||||
logger.debug(
|
||||
f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n"
|
||||
f"{retry_limit} attempts remaining. Retrying in {timeout} seconds.\n"
|
||||
f"Error:\n{e}"
|
||||
'Re-using an available connection from the pool (formerly {}).'
|
||||
.format(conn.name)
|
||||
)
|
||||
else:
|
||||
conn.handle = LazyHandle(self.open)
|
||||
|
||||
sleep(timeout)
|
||||
return cls.retry_connection(
|
||||
connection=connection,
|
||||
connect=connect,
|
||||
logger=logger,
|
||||
retry_limit=retry_limit - 1,
|
||||
retry_timeout=retry_timeout,
|
||||
retryable_exceptions=retryable_exceptions,
|
||||
_attempts=_attempts + 1,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError(str(e))
|
||||
conn.name = conn_name
|
||||
return conn
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel_open(self) -> Optional[List[str]]:
|
||||
"""Cancel all open connections on the adapter. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`cancel_open` is not implemented for this adapter!"
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`cancel_open` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def open(cls, connection: Connection) -> Connection:
|
||||
"""Open the given connection on the adapter and return it.
|
||||
|
||||
@@ -288,7 +166,9 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
This should be thread-safe, or hold the lock if necessary. The given
|
||||
connection should not be in either in_use or available.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`open` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
def release(self) -> None:
|
||||
with self.lock:
|
||||
@@ -308,10 +188,12 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
def cleanup_all(self) -> None:
|
||||
with self.lock:
|
||||
for connection in self.thread_connections.values():
|
||||
if connection.state not in {"closed", "init"}:
|
||||
fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name)))
|
||||
if connection.state not in {'closed', 'init'}:
|
||||
logger.debug("Connection '{}' was left open."
|
||||
.format(connection.name))
|
||||
else:
|
||||
fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name)))
|
||||
logger.debug("Connection '{}' was properly closed."
|
||||
.format(connection.name))
|
||||
self.close(connection)
|
||||
|
||||
# garbage collect these connections
|
||||
@@ -320,12 +202,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def begin(self) -> None:
|
||||
"""Begin a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`begin` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def commit(self) -> None:
|
||||
"""Commit a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`commit` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _rollback_handle(cls, connection: Connection) -> None:
|
||||
@@ -333,52 +219,55 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
try:
|
||||
connection.handle.rollback()
|
||||
except Exception:
|
||||
fire_event(
|
||||
RollbackFailed(
|
||||
conn_name=cast_to_str(connection.name),
|
||||
exc_info=traceback.format_exc(),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
logger.debug(
|
||||
'Failed to rollback {}'.format(connection.name),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _close_handle(cls, connection: Connection) -> None:
|
||||
"""Perform the actual close operation."""
|
||||
# On windows, sometimes connection handles don't have a close() attr.
|
||||
if hasattr(connection.handle, "close"):
|
||||
fire_event(
|
||||
ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info())
|
||||
)
|
||||
if hasattr(connection.handle, 'close'):
|
||||
logger.debug(f'On {connection.name}: Close')
|
||||
connection.handle.close()
|
||||
else:
|
||||
fire_event(
|
||||
ConnectionLeftOpen(
|
||||
conn_name=cast_to_str(connection.name), node_info=get_node_info()
|
||||
)
|
||||
)
|
||||
logger.debug(f'On {connection.name}: No close available on handle')
|
||||
|
||||
@classmethod
|
||||
def _rollback(cls, connection: Connection) -> None:
|
||||
"""Roll back the given connection."""
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f'In _rollback, got {connection} - not a Connection!'
|
||||
)
|
||||
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
f"Tried to rollback transaction on connection "
|
||||
raise dbt.exceptions.InternalException(
|
||||
f'Tried to rollback transaction on connection '
|
||||
f'"{connection.name}", but it does not have one open!'
|
||||
)
|
||||
|
||||
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
|
||||
logger.debug(f'On {connection.name}: ROLLBACK')
|
||||
cls._rollback_handle(connection)
|
||||
|
||||
connection.transaction_open = False
|
||||
|
||||
@classmethod
|
||||
def close(cls, connection: Connection) -> Connection:
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f'In close, got {connection} - not a Connection!'
|
||||
)
|
||||
|
||||
# if the connection is in closed or init, there's nothing to do
|
||||
if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}:
|
||||
return connection
|
||||
|
||||
if connection.transaction_open and connection.handle:
|
||||
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
|
||||
logger.debug('On {}: ROLLBACK'.format(connection.name))
|
||||
cls._rollback_handle(connection)
|
||||
connection.transaction_open = False
|
||||
|
||||
@@ -401,14 +290,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
) -> Tuple[str, agate.Table]:
|
||||
"""Execute the given SQL.
|
||||
|
||||
:param str sql: The sql to execute.
|
||||
:param bool auto_begin: If set, and dbt is not currently inside a
|
||||
transaction, automatically begin one.
|
||||
:param bool fetch: If set, fetch results.
|
||||
:return: A tuple of the query status and results (empty if fetch=False).
|
||||
:rtype: Tuple[AdapterResponse, agate.Table]
|
||||
:return: A tuple of the status and the results (empty if fetch=False).
|
||||
:rtype: Tuple[str, agate.Table]
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`execute` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,11 +30,9 @@ class _Available:
|
||||
x.update(big_expensive_db_query())
|
||||
return x
|
||||
"""
|
||||
|
||||
def inner(func):
|
||||
func._parse_replacement_ = parse_replacement
|
||||
return self(func)
|
||||
|
||||
return inner
|
||||
|
||||
def deprecated(
|
||||
@@ -59,14 +57,13 @@ class _Available:
|
||||
The optional parse_replacement, if provided, will provide a parse-time
|
||||
replacement for the actual method (see `available.parse`).
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
func_name = func.__name__
|
||||
renamed_method(func_name, supported_name)
|
||||
|
||||
@wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
warn("adapter:{}".format(func_name))
|
||||
warn('adapter:{}'.format(func_name))
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if parse_replacement:
|
||||
@@ -74,7 +71,6 @@ class _Available:
|
||||
else:
|
||||
available_function = self
|
||||
return available_function(inner)
|
||||
|
||||
return wrapper
|
||||
|
||||
def parse_none(self, func: Callable) -> Callable:
|
||||
@@ -99,7 +95,9 @@ class AdapterMeta(abc.ABCMeta):
|
||||
# I'm not sure there is any benefit to it after poking around a bit,
|
||||
# but having it doesn't hurt on the python side (and omitting it could
|
||||
# hurt for obscure metaclass reasons, for all I know)
|
||||
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) # type: ignore
|
||||
cls = abc.ABCMeta.__new__( # type: ignore
|
||||
mcls, name, bases, namespace, **kwargs
|
||||
)
|
||||
|
||||
# this is very much inspired by ABCMeta's own implementation
|
||||
|
||||
@@ -111,14 +109,14 @@ class AdapterMeta(abc.ABCMeta):
|
||||
|
||||
# collect base class data first
|
||||
for base in bases:
|
||||
available.update(getattr(base, "_available_", set()))
|
||||
replacements.update(getattr(base, "_parse_replacements_", set()))
|
||||
available.update(getattr(base, '_available_', set()))
|
||||
replacements.update(getattr(base, '_parse_replacements_', set()))
|
||||
|
||||
# override with local data if it exists
|
||||
for name, value in namespace.items():
|
||||
if getattr(value, "_is_available_", False):
|
||||
if getattr(value, '_is_available_', False):
|
||||
available.add(name)
|
||||
parse_replacement = getattr(value, "_parse_replacement_", None)
|
||||
parse_replacement = getattr(value, '_parse_replacement_', None)
|
||||
if parse_replacement is not None:
|
||||
replacements[name] = parse_replacement
|
||||
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from dbt.adapters.base import Credentials
|
||||
from dbt.exceptions import CompilationError
|
||||
from dbt.exceptions import CompilationException
|
||||
from dbt.adapters.protocol import AdapterProtocol
|
||||
|
||||
|
||||
def project_name_from_path(include_path: str) -> str:
|
||||
# avoid an import cycle
|
||||
from dbt.config.project import PartialProject
|
||||
|
||||
partial = PartialProject.from_project_root(include_path)
|
||||
from dbt.config.project import Project
|
||||
partial = Project.partial_load(include_path)
|
||||
if partial.project_name is None:
|
||||
raise CompilationError(f"Invalid project at {include_path}: name not set!")
|
||||
raise CompilationException(
|
||||
f'Invalid project at {include_path}: name not set!'
|
||||
)
|
||||
return partial.project_name
|
||||
|
||||
|
||||
@@ -22,13 +23,12 @@ class AdapterPlugin:
|
||||
:param dependencies: A list of adapter names that this adapter depends
|
||||
upon.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
adapter: Type[AdapterProtocol],
|
||||
credentials: Type[Credentials],
|
||||
include_path: str,
|
||||
dependencies: Optional[List[str]] = None,
|
||||
dependencies: Optional[List[str]] = None
|
||||
):
|
||||
|
||||
self.adapter: Type[AdapterProtocol] = adapter
|
||||
|
||||
@@ -5,9 +5,9 @@ from dbt.clients.jinja import QueryStringGenerator
|
||||
|
||||
from dbt.context.manifest import generate_query_header_context
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, QueryComment
|
||||
from dbt.contracts.graph.nodes import ResultNode
|
||||
from dbt.contracts.graph.compiled import CompileResultNode
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
from dbt.exceptions import RuntimeException
|
||||
|
||||
|
||||
class NodeWrapper:
|
||||
@@ -15,7 +15,7 @@ class NodeWrapper:
|
||||
self._inner_node = node
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._inner_node, name, "")
|
||||
return getattr(self._inner_node, name, '')
|
||||
|
||||
|
||||
class _QueryComment(local):
|
||||
@@ -24,7 +24,6 @@ class _QueryComment(local):
|
||||
- the current thread's query comment.
|
||||
- a source_name indicating what set the current thread's query comment
|
||||
"""
|
||||
|
||||
def __init__(self, initial):
|
||||
self.query_comment: Optional[str] = initial
|
||||
self.append = False
|
||||
@@ -36,19 +35,21 @@ class _QueryComment(local):
|
||||
if self.append:
|
||||
# replace last ';' with '<comment>;'
|
||||
sql = sql.rstrip()
|
||||
if sql[-1] == ";":
|
||||
if sql[-1] == ';':
|
||||
sql = sql[:-1]
|
||||
return "{}\n/* {} */;".format(sql, self.query_comment.strip())
|
||||
return '{}\n/* {} */;'.format(sql, self.query_comment.strip())
|
||||
|
||||
return "{}\n/* {} */".format(sql, self.query_comment.strip())
|
||||
return '{}\n/* {} */'.format(sql, self.query_comment.strip())
|
||||
|
||||
return "/* {} */\n{}".format(self.query_comment.strip(), sql)
|
||||
return '/* {} */\n{}'.format(self.query_comment.strip(), sql)
|
||||
|
||||
def set(self, comment: Optional[str], append: bool):
|
||||
if isinstance(comment, str) and "*/" in comment:
|
||||
if isinstance(comment, str) and '*/' in comment:
|
||||
# tell the user "no" so they don't hurt themselves by writing
|
||||
# garbage
|
||||
raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}')
|
||||
raise RuntimeException(
|
||||
f'query comment contains illegal value "*/": {comment}'
|
||||
)
|
||||
self.query_comment = comment
|
||||
self.append = append
|
||||
|
||||
@@ -62,17 +63,15 @@ class MacroQueryStringSetter:
|
||||
self.config = config
|
||||
|
||||
comment_macro = self._get_comment_macro()
|
||||
self.generator: QueryStringFunc = lambda name, model: ""
|
||||
self.generator: QueryStringFunc = lambda name, model: ''
|
||||
# if the comment value was None or the empty string, just skip it
|
||||
if comment_macro:
|
||||
assert isinstance(comment_macro, str)
|
||||
macro = "\n".join(
|
||||
(
|
||||
"{%- macro query_comment_macro(connection_name, node) -%}",
|
||||
comment_macro,
|
||||
"{% endmacro %}",
|
||||
)
|
||||
)
|
||||
macro = '\n'.join((
|
||||
'{%- macro query_comment_macro(connection_name, node) -%}',
|
||||
comment_macro,
|
||||
'{% endmacro %}'
|
||||
))
|
||||
ctx = self._get_context()
|
||||
self.generator = QueryStringGenerator(macro, ctx)
|
||||
self.comment = _QueryComment(None)
|
||||
@@ -88,9 +87,9 @@ class MacroQueryStringSetter:
|
||||
return self.comment.add(sql)
|
||||
|
||||
def reset(self):
|
||||
self.set("master", None)
|
||||
self.set('master', None)
|
||||
|
||||
def set(self, name: str, node: Optional[ResultNode]):
|
||||
def set(self, name: str, node: Optional[CompileResultNode]):
|
||||
wrapped: Optional[NodeWrapper] = None
|
||||
if node is not None:
|
||||
wrapped = NodeWrapper(node)
|
||||
|
||||
@@ -1,39 +1,31 @@
|
||||
from collections.abc import Hashable
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
|
||||
)
|
||||
|
||||
from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode
|
||||
from dbt.contracts.graph.compiled import CompiledNode
|
||||
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
|
||||
from dbt.contracts.relation import (
|
||||
RelationType,
|
||||
ComponentName,
|
||||
HasQuoting,
|
||||
FakeAPIObject,
|
||||
Policy,
|
||||
Path,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
ApproximateMatchError,
|
||||
DbtInternalError,
|
||||
MultipleDatabasesNotAllowedError,
|
||||
RelationType, ComponentName, HasQuoting, FakeAPIObject, Policy, Path
|
||||
)
|
||||
from dbt.exceptions import InternalException
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.utils import filter_null_values, deep_merge, classproperty
|
||||
|
||||
import dbt.exceptions
|
||||
|
||||
|
||||
Self = TypeVar("Self", bound="BaseRelation")
|
||||
Self = TypeVar('Self', bound='BaseRelation')
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=False, repr=False)
|
||||
class BaseRelation(FakeAPIObject, Hashable):
|
||||
type: Optional[RelationType]
|
||||
path: Path
|
||||
type: Optional[RelationType] = None
|
||||
quote_character: str = '"'
|
||||
# Python 3.11 requires that these use default_factory instead of simple default
|
||||
# ValueError: mutable default <class 'dbt.contracts.relation.Policy'> for field include_policy is not allowed: use default_factory
|
||||
include_policy: Policy = field(default_factory=lambda: Policy())
|
||||
quote_policy: Policy = field(default_factory=lambda: Policy())
|
||||
include_policy: Policy = Policy()
|
||||
quote_policy: Policy = Policy()
|
||||
dbt_created: bool = False
|
||||
|
||||
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
|
||||
@@ -44,31 +36,33 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
@classmethod
|
||||
def _get_field_named(cls, field_name):
|
||||
for f, _ in cls._get_fields():
|
||||
if f.name == field_name:
|
||||
return f
|
||||
for field, _ in cls._get_fields():
|
||||
if field.name == field_name:
|
||||
return field
|
||||
# this should be unreachable
|
||||
raise ValueError(f"BaseRelation has no {field_name} field!")
|
||||
raise ValueError(f'BaseRelation has no {field_name} field!')
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return False
|
||||
return self.to_dict(omit_none=True) == other.to_dict(omit_none=True)
|
||||
return self.to_dict() == other.to_dict()
|
||||
|
||||
@classmethod
|
||||
def get_default_quote_policy(cls) -> Policy:
|
||||
return cls._get_field_named("quote_policy").default_factory()
|
||||
return cls._get_field_named('quote_policy').default
|
||||
|
||||
@classmethod
|
||||
def get_default_include_policy(cls) -> Policy:
|
||||
return cls._get_field_named("include_policy").default_factory()
|
||||
return cls._get_field_named('include_policy').default
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Override `.get` to return a metadata object so we don't break
|
||||
dbt_utils.
|
||||
"""
|
||||
if key == "metadata":
|
||||
return {"type": self.__class__.__name__}
|
||||
if key == 'metadata':
|
||||
return {
|
||||
'type': self.__class__.__name__
|
||||
}
|
||||
return super().get(key, default)
|
||||
|
||||
def matches(
|
||||
@@ -77,19 +71,16 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema: Optional[str] = None,
|
||||
identifier: Optional[str] = None,
|
||||
) -> bool:
|
||||
search = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
search = filter_null_values({
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier
|
||||
})
|
||||
|
||||
if not search:
|
||||
# nothing was passed in
|
||||
raise dbt.exceptions.DbtRuntimeError(
|
||||
"Tried to match relation, but no search path was passed!"
|
||||
)
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
"Tried to match relation, but no search path was passed!")
|
||||
|
||||
exact_match = True
|
||||
approximate_match = True
|
||||
@@ -97,14 +88,15 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
for k, v in search.items():
|
||||
if not self._is_exactish_match(k, v):
|
||||
exact_match = False
|
||||
if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
|
||||
self.quote_character
|
||||
):
|
||||
approximate_match = False # type: ignore[union-attr]
|
||||
|
||||
if self.path.get_lowered_part(k) != v.lower():
|
||||
approximate_match = False
|
||||
|
||||
if approximate_match and not exact_match:
|
||||
target = self.create(database=database, schema=schema, identifier=identifier)
|
||||
raise ApproximateMatchError(target, self)
|
||||
target = self.create(
|
||||
database=database, schema=schema, identifier=identifier
|
||||
)
|
||||
dbt.exceptions.approximate_relation_match(target, self)
|
||||
|
||||
return exact_match
|
||||
|
||||
@@ -117,13 +109,11 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema: Optional[bool] = None,
|
||||
identifier: Optional[bool] = None,
|
||||
) -> Self:
|
||||
policy = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
policy = filter_null_values({
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier
|
||||
})
|
||||
|
||||
new_quote_policy = self.quote_policy.replace_dict(policy)
|
||||
return self.replace(quote_policy=new_quote_policy)
|
||||
@@ -134,18 +124,16 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema: Optional[bool] = None,
|
||||
identifier: Optional[bool] = None,
|
||||
) -> Self:
|
||||
policy = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
policy = filter_null_values({
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier
|
||||
})
|
||||
|
||||
new_include_policy = self.include_policy.replace_dict(policy)
|
||||
return self.replace(include_policy=new_include_policy)
|
||||
|
||||
def information_schema(self, view_name=None) -> "InformationSchema":
|
||||
def information_schema(self, view_name=None) -> 'InformationSchema':
|
||||
# some of our data comes from jinja, where things can be `Undefined`.
|
||||
if not isinstance(view_name, str):
|
||||
view_name = None
|
||||
@@ -155,10 +143,10 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
info_schema = InformationSchema.from_relation(self, view_name)
|
||||
return info_schema.incorporate(path={"schema": None})
|
||||
|
||||
def information_schema_only(self) -> "InformationSchema":
|
||||
def information_schema_only(self) -> 'InformationSchema':
|
||||
return self.information_schema()
|
||||
|
||||
def without_identifier(self) -> "BaseRelation":
|
||||
def without_identifier(self) -> 'BaseRelation':
|
||||
"""Return a form of this relation that only has the database and schema
|
||||
set to included. To get the appropriately-quoted form the schema out of
|
||||
the result (for use as part of a query), use `.render()`. To get the
|
||||
@@ -168,7 +156,9 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
"""
|
||||
return self.include(identifier=False).replace_path(identifier=None)
|
||||
|
||||
def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
def _render_iterator(
|
||||
self
|
||||
) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
|
||||
for key in ComponentName:
|
||||
path_part: Optional[str] = None
|
||||
@@ -180,22 +170,27 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
def render(self) -> str:
|
||||
# if there is nothing set, this will return the empty string.
|
||||
return ".".join(part for _, part in self._render_iterator() if part is not None)
|
||||
return '.'.join(
|
||||
part for _, part in self._render_iterator()
|
||||
if part is not None
|
||||
)
|
||||
|
||||
def quoted(self, identifier):
|
||||
return "{quote_char}{identifier}{quote_char}".format(
|
||||
return '{quote_char}{identifier}{quote_char}'.format(
|
||||
quote_char=self.quote_character,
|
||||
identifier=identifier,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self:
|
||||
source_quoting = source.quoting.to_dict(omit_none=True)
|
||||
source_quoting.pop("column", None)
|
||||
def create_from_source(
|
||||
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
|
||||
) -> Self:
|
||||
source_quoting = source.quoting.to_dict()
|
||||
source_quoting.pop('column', None)
|
||||
quote_policy = deep_merge(
|
||||
cls.get_default_quote_policy().to_dict(omit_none=True),
|
||||
cls.get_default_quote_policy().to_dict(),
|
||||
source_quoting,
|
||||
kwargs.get("quote_policy", {}),
|
||||
kwargs.get('quote_policy', {}),
|
||||
)
|
||||
|
||||
return cls.create(
|
||||
@@ -203,18 +198,18 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema=source.schema,
|
||||
identifier=source.identifier,
|
||||
quote_policy=quote_policy,
|
||||
**kwargs,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def add_ephemeral_prefix(name: str):
|
||||
return f"__dbt__cte__{name}"
|
||||
return f'__dbt__CTE__{name}'
|
||||
|
||||
@classmethod
|
||||
def create_ephemeral_from_node(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ManifestNode,
|
||||
node: Union[ParsedNode, CompiledNode],
|
||||
) -> Self:
|
||||
# Note that ephemeral models are based on the name.
|
||||
identifier = cls.add_ephemeral_prefix(node.name)
|
||||
@@ -227,7 +222,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
def create_from_node(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node,
|
||||
node: Union[ParsedNode, CompiledNode],
|
||||
quote_policy: Optional[Dict[str, bool]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Self:
|
||||
@@ -241,27 +236,27 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
schema=node.schema,
|
||||
identifier=node.alias,
|
||||
quote_policy=quote_policy,
|
||||
**kwargs,
|
||||
)
|
||||
**kwargs)
|
||||
|
||||
@classmethod
|
||||
def create_from(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ResultNode,
|
||||
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
|
||||
**kwargs: Any,
|
||||
) -> Self:
|
||||
if node.resource_type == NodeType.Source:
|
||||
if not isinstance(node, SourceDefinition):
|
||||
raise DbtInternalError(
|
||||
"type mismatch, expected SourceDefinition but got {}".format(type(node))
|
||||
if not isinstance(node, ParsedSourceDefinition):
|
||||
raise InternalException(
|
||||
'type mismatch, expected ParsedSourceDefinition but got {}'
|
||||
.format(type(node))
|
||||
)
|
||||
return cls.create_from_source(node, **kwargs)
|
||||
else:
|
||||
# Can't use ManifestNode here because of parameterized generics
|
||||
if not isinstance(node, (ParsedNode)):
|
||||
raise DbtInternalError(
|
||||
f"type mismatch, expected ManifestNode but got {type(node)}"
|
||||
if not isinstance(node, (ParsedNode, CompiledNode)):
|
||||
raise InternalException(
|
||||
'type mismatch, expected ParsedNode or CompiledNode but '
|
||||
'got {}'.format(type(node))
|
||||
)
|
||||
return cls.create_from_node(config, node, **kwargs)
|
||||
|
||||
@@ -274,16 +269,14 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
type: Optional[RelationType] = None,
|
||||
**kwargs,
|
||||
) -> Self:
|
||||
kwargs.update(
|
||||
{
|
||||
"path": {
|
||||
"database": database,
|
||||
"schema": schema,
|
||||
"identifier": identifier,
|
||||
},
|
||||
"type": type,
|
||||
}
|
||||
)
|
||||
kwargs.update({
|
||||
'path': {
|
||||
'database': database,
|
||||
'schema': schema,
|
||||
'identifier': identifier,
|
||||
},
|
||||
'type': type,
|
||||
})
|
||||
return cls.from_dict(kwargs)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
@@ -328,10 +321,6 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
def is_view(self) -> bool:
|
||||
return self.type == RelationType.View
|
||||
|
||||
@property
|
||||
def is_materialized_view(self) -> bool:
|
||||
return self.type == RelationType.MaterializedView
|
||||
|
||||
@classproperty
|
||||
def Table(cls) -> str:
|
||||
return str(RelationType.Table)
|
||||
@@ -348,16 +337,12 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
def External(cls) -> str:
|
||||
return str(RelationType.External)
|
||||
|
||||
@classproperty
|
||||
def MaterializedView(cls) -> str:
|
||||
return str(RelationType.MaterializedView)
|
||||
|
||||
@classproperty
|
||||
def get_relation_type(cls) -> Type[RelationType]:
|
||||
return RelationType
|
||||
|
||||
|
||||
Info = TypeVar("Info", bound="InformationSchema")
|
||||
Info = TypeVar('Info', bound='InformationSchema')
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=False, repr=False)
|
||||
@@ -366,16 +351,18 @@ class InformationSchema(BaseRelation):
|
||||
|
||||
def __post_init__(self):
|
||||
if not isinstance(self.information_schema_view, (type(None), str)):
|
||||
raise dbt.exceptions.CompilationError(
|
||||
"Got an invalid name: {}".format(self.information_schema_view)
|
||||
raise dbt.exceptions.CompilationException(
|
||||
'Got an invalid name: {}'.format(self.information_schema_view)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
|
||||
def get_path(
|
||||
cls, relation: BaseRelation, information_schema_view: Optional[str]
|
||||
) -> Path:
|
||||
return Path(
|
||||
database=relation.database,
|
||||
schema=relation.schema,
|
||||
identifier="INFORMATION_SCHEMA",
|
||||
identifier='INFORMATION_SCHEMA',
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -406,7 +393,9 @@ class InformationSchema(BaseRelation):
|
||||
relation: BaseRelation,
|
||||
information_schema_view: Optional[str],
|
||||
) -> Info:
|
||||
include_policy = cls.get_include_policy(relation, information_schema_view)
|
||||
include_policy = cls.get_include_policy(
|
||||
relation, information_schema_view
|
||||
)
|
||||
quote_policy = cls.get_quote_policy(relation, information_schema_view)
|
||||
path = cls.get_path(relation, information_schema_view)
|
||||
return cls(
|
||||
@@ -428,7 +417,6 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
search for what schemas. The schema values are all lowercased to avoid
|
||||
duplication.
|
||||
"""
|
||||
|
||||
def add(self, relation: BaseRelation):
|
||||
key = relation.information_schema_only()
|
||||
if key not in self:
|
||||
@@ -438,28 +426,30 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
schema = relation.schema.lower()
|
||||
self[key].add(schema)
|
||||
|
||||
def search(self) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
|
||||
def search(
|
||||
self
|
||||
) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
|
||||
for information_schema_name, schemas in self.items():
|
||||
for schema in schemas:
|
||||
yield information_schema_name, schema
|
||||
|
||||
def flatten(self, allow_multiple_databases: bool = False):
|
||||
def flatten(self):
|
||||
new = self.__class__()
|
||||
|
||||
# make sure we don't have multiple databases if allow_multiple_databases is set to False
|
||||
if not allow_multiple_databases:
|
||||
seen = {r.database.lower() for r in self if r.database}
|
||||
if len(seen) > 1:
|
||||
raise MultipleDatabasesNotAllowedError(seen)
|
||||
# make sure we don't have duplicates
|
||||
seen = {r.database.lower() for r in self if r.database}
|
||||
if len(seen) > 1:
|
||||
dbt.exceptions.raise_compiler_error(str(seen))
|
||||
|
||||
for information_schema_name, schema in self.search():
|
||||
path = {"database": information_schema_name.database, "schema": schema}
|
||||
new.add(
|
||||
information_schema_name.incorporate(
|
||||
path=path,
|
||||
quote_policy={"database": False},
|
||||
include_policy={"database": False},
|
||||
)
|
||||
)
|
||||
path = {
|
||||
'database': information_schema_name.database,
|
||||
'schema': schema
|
||||
}
|
||||
new.add(information_schema_name.incorporate(
|
||||
path=path,
|
||||
quote_policy={'database': False},
|
||||
include_policy={'database': False},
|
||||
))
|
||||
|
||||
return new
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import List, Iterable, Optional, Dict, Set, Tuple, Any
|
||||
import threading
|
||||
|
||||
from dbt.adapters.reference_keys import (
|
||||
_make_ref_key,
|
||||
_make_ref_key_dict,
|
||||
_ReferenceKey,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
DependentLinkNotCachedError,
|
||||
NewNameAlreadyInCacheError,
|
||||
NoneRelationFoundError,
|
||||
ReferencedLinkNotCachedError,
|
||||
TruncatedModelNameCausedCollisionError,
|
||||
)
|
||||
from dbt.events.functions import fire_event, fire_event_if
|
||||
from dbt.events.types import CacheAction, CacheDumpGraph
|
||||
from dbt.flags import get_flags
|
||||
from dbt.logger import CACHE_LOGGER as logger
|
||||
from dbt.utils import lowercase
|
||||
import dbt.exceptions
|
||||
|
||||
_ReferenceKey = namedtuple('_ReferenceKey', 'database schema identifier')
|
||||
|
||||
|
||||
def _make_key(relation) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(lowercase(relation.database),
|
||||
lowercase(relation.schema),
|
||||
lowercase(relation.identifier))
|
||||
|
||||
|
||||
def dot_separated(key: _ReferenceKey) -> str:
|
||||
@@ -25,7 +25,7 @@ def dot_separated(key: _ReferenceKey) -> str:
|
||||
|
||||
:param _ReferenceKey key: The key to stringify.
|
||||
"""
|
||||
return ".".join(map(str, key))
|
||||
return '.'.join(map(str, key))
|
||||
|
||||
|
||||
class _CachedRelation:
|
||||
@@ -37,15 +37,14 @@ class _CachedRelation:
|
||||
that refer to this relation.
|
||||
:attr BaseRelation inner: The underlying dbt relation.
|
||||
"""
|
||||
|
||||
def __init__(self, inner):
|
||||
self.referenced_by = {}
|
||||
self.inner = inner
|
||||
|
||||
def __str__(self) -> str:
|
||||
return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
|
||||
self.database, self.schema, self.identifier, self.inner
|
||||
)
|
||||
return (
|
||||
'_CachedRelation(database={}, schema={}, identifier={}, inner={})'
|
||||
).format(self.database, self.schema, self.identifier, self.inner)
|
||||
|
||||
@property
|
||||
def database(self) -> Optional[str]:
|
||||
@@ -77,9 +76,9 @@ class _CachedRelation:
|
||||
|
||||
:return _ReferenceKey: A key for this relation.
|
||||
"""
|
||||
return _make_ref_key(self)
|
||||
return _make_key(self)
|
||||
|
||||
def add_reference(self, referrer: "_CachedRelation"):
|
||||
def add_reference(self, referrer: '_CachedRelation'):
|
||||
"""Add a reference from referrer to self, indicating that if this node
|
||||
were drop...cascaded, the referrer would be dropped as well.
|
||||
|
||||
@@ -123,9 +122,9 @@ class _CachedRelation:
|
||||
# table_name is ever anything but the identifier (via .create())
|
||||
self.inner = self.inner.incorporate(
|
||||
path={
|
||||
"database": new_relation.inner.database,
|
||||
"schema": new_relation.inner.schema,
|
||||
"identifier": new_relation.inner.identifier,
|
||||
'database': new_relation.inner.database,
|
||||
'schema': new_relation.inner.schema,
|
||||
'identifier': new_relation.inner.identifier
|
||||
},
|
||||
)
|
||||
|
||||
@@ -140,7 +139,10 @@ class _CachedRelation:
|
||||
:raises InternalError: If the new key already exists.
|
||||
"""
|
||||
if new_key in self.referenced_by:
|
||||
raise NewNameAlreadyInCacheError(old_key, new_key)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in rename of "{}" -> "{}", new name is in the cache already'
|
||||
.format(old_key, new_key)
|
||||
)
|
||||
|
||||
if old_key not in self.referenced_by:
|
||||
return
|
||||
@@ -155,6 +157,12 @@ class _CachedRelation:
|
||||
return [dot_separated(r) for r in self.referenced_by]
|
||||
|
||||
|
||||
def lazy_log(msg, func):
|
||||
if logger.disabled:
|
||||
return
|
||||
logger.debug(msg.format(func()))
|
||||
|
||||
|
||||
class RelationsCache:
|
||||
"""A cache of the relations known to dbt. Keeps track of relationships
|
||||
declared between tables and handles renames/drops as a real database would.
|
||||
@@ -164,16 +172,13 @@ class RelationsCache:
|
||||
The adapters also hold this lock while filling the cache.
|
||||
:attr Set[str] schemas: The set of known/cached schemas, all lowercased.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.relations: Dict[_ReferenceKey, _CachedRelation] = {}
|
||||
self.lock = threading.RLock()
|
||||
self.schemas: Set[Tuple[Optional[str], Optional[str]]] = set()
|
||||
|
||||
def add_schema(
|
||||
self,
|
||||
database: Optional[str],
|
||||
schema: Optional[str],
|
||||
self, database: Optional[str], schema: Optional[str],
|
||||
) -> None:
|
||||
"""Add a schema to the set of known schemas (case-insensitive)
|
||||
|
||||
@@ -183,9 +188,7 @@ class RelationsCache:
|
||||
self.schemas.add((lowercase(database), lowercase(schema)))
|
||||
|
||||
def drop_schema(
|
||||
self,
|
||||
database: Optional[str],
|
||||
schema: Optional[str],
|
||||
self, database: Optional[str], schema: Optional[str],
|
||||
) -> None:
|
||||
"""Drop the given schema and remove it from the set of known schemas.
|
||||
|
||||
@@ -229,7 +232,10 @@ class RelationsCache:
|
||||
# self.relations or any cache entry's referenced_by during iteration
|
||||
# it's a runtime error!
|
||||
with self.lock:
|
||||
return {dot_separated(k): str(v.dump_graph_entry()) for k, v in self.relations.items()}
|
||||
return {
|
||||
dot_separated(k): v.dump_graph_entry()
|
||||
for k, v in self.relations.items()
|
||||
}
|
||||
|
||||
def _setdefault(self, relation: _CachedRelation):
|
||||
"""Add a relation to the cache, or return it if it already exists.
|
||||
@@ -256,17 +262,22 @@ class RelationsCache:
|
||||
if referenced is None:
|
||||
return
|
||||
if referenced is None:
|
||||
raise ReferencedLinkNotCachedError(referenced_key)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in add_link, referenced link key {} not in cache!'
|
||||
.format(referenced_key)
|
||||
)
|
||||
|
||||
dependent = self.relations.get(dependent_key)
|
||||
if dependent is None:
|
||||
raise DependentLinkNotCachedError(dependent_key)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in add_link, dependent link key {} not in cache!'
|
||||
.format(dependent_key)
|
||||
)
|
||||
|
||||
assert dependent is not None # we just raised!
|
||||
|
||||
referenced.add_reference(dependent)
|
||||
|
||||
# This is called in plugins/postgres/dbt/adapters/postgres/impl.py
|
||||
def add_link(self, referenced, dependent):
|
||||
"""Add a link between two relations to the database. If either relation
|
||||
does not exist, it will be added as an "external" relation.
|
||||
@@ -281,33 +292,33 @@ class RelationsCache:
|
||||
:param BaseRelation dependent: The dependent model.
|
||||
:raises InternalError: If either entry does not exist.
|
||||
"""
|
||||
ref_key = _make_ref_key(referenced)
|
||||
dep_key = _make_ref_key(dependent)
|
||||
ref_key = _make_key(referenced)
|
||||
if (ref_key.database, ref_key.schema) not in self:
|
||||
# if we have not cached the referenced schema at all, we must be
|
||||
# referring to a table outside our control. There's no need to make
|
||||
# a link - we will never drop the referenced relation during a run.
|
||||
fire_event(
|
||||
CacheAction(
|
||||
ref_key=ref_key._asdict(),
|
||||
ref_key_2=dep_key._asdict(),
|
||||
)
|
||||
logger.debug(
|
||||
'{dep!s} references {ref!s} but {ref.database}.{ref.schema} '
|
||||
'is not in the cache, skipping assumed external relation'
|
||||
.format(dep=dependent, ref=ref_key)
|
||||
)
|
||||
return
|
||||
if ref_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
referenced = referenced.replace(type=referenced.External)
|
||||
referenced = referenced.replace(
|
||||
type=referenced.External
|
||||
)
|
||||
self.add(referenced)
|
||||
|
||||
dep_key = _make_key(dependent)
|
||||
if dep_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
dependent = dependent.replace(type=referenced.External)
|
||||
self.add(dependent)
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="add_link",
|
||||
ref_key=dep_key._asdict(),
|
||||
ref_key_2=ref_key._asdict(),
|
||||
dependent = dependent.replace(
|
||||
type=referenced.External
|
||||
)
|
||||
self.add(dependent)
|
||||
logger.debug(
|
||||
'adding link, {!s} references {!s}'.format(dep_key, ref_key)
|
||||
)
|
||||
with self.lock:
|
||||
self._add_link(ref_key, dep_key)
|
||||
@@ -318,20 +329,15 @@ class RelationsCache:
|
||||
|
||||
:param BaseRelation relation: The underlying relation.
|
||||
"""
|
||||
flags = get_flags()
|
||||
cached = _CachedRelation(relation)
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()),
|
||||
)
|
||||
fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_dict(cached)))
|
||||
logger.debug('Adding relation: {!s}'.format(cached))
|
||||
|
||||
lazy_log('before adding: {!s}', self.dump_graph)
|
||||
|
||||
with self.lock:
|
||||
self._setdefault(cached)
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()),
|
||||
)
|
||||
|
||||
lazy_log('after adding: {!s}', self.dump_graph)
|
||||
|
||||
def _remove_refs(self, keys):
|
||||
"""Removes all references to all entries in keys. This does not
|
||||
@@ -346,6 +352,22 @@ class RelationsCache:
|
||||
for cached in self.relations.values():
|
||||
cached.release_references(keys)
|
||||
|
||||
def _drop_cascade_relation(self, dropped):
|
||||
"""Drop the given relation and cascade it appropriately to all
|
||||
dependent relations.
|
||||
|
||||
:param _CachedRelation dropped: An existing _CachedRelation to drop.
|
||||
"""
|
||||
if dropped not in self.relations:
|
||||
logger.debug('dropped a nonexistent relationship: {!s}'
|
||||
.format(dropped))
|
||||
return
|
||||
consequences = self.relations[dropped].collect_consequences()
|
||||
logger.debug(
|
||||
'drop {} is cascading to {}'.format(dropped, consequences)
|
||||
)
|
||||
self._remove_refs(consequences)
|
||||
|
||||
def drop(self, relation):
|
||||
"""Drop the named relation and cascade it appropriately to all
|
||||
dependent relations.
|
||||
@@ -357,22 +379,10 @@ class RelationsCache:
|
||||
:param str schema: The schema of the relation to drop.
|
||||
:param str identifier: The identifier of the relation to drop.
|
||||
"""
|
||||
dropped_key = _make_ref_key(relation)
|
||||
dropped_key_msg = _make_ref_key_dict(relation)
|
||||
fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg))
|
||||
dropped = _make_key(relation)
|
||||
logger.debug('Dropping relation: {!s}'.format(dropped))
|
||||
with self.lock:
|
||||
if dropped_key not in self.relations:
|
||||
fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg))
|
||||
return
|
||||
consequences = self.relations[dropped_key].collect_consequences()
|
||||
# convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs
|
||||
consequence_msgs = [key._asdict() for key in consequences]
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs
|
||||
)
|
||||
)
|
||||
self._remove_refs(consequences)
|
||||
self._drop_cascade_relation(dropped)
|
||||
|
||||
def _rename_relation(self, old_key, new_relation):
|
||||
"""Rename a relation named old_key to new_key, updating references.
|
||||
@@ -388,20 +398,15 @@ class RelationsCache:
|
||||
relation = self.relations.pop(old_key)
|
||||
new_key = new_relation.key()
|
||||
|
||||
# relation has to rename its innards, so it needs the _CachedRelation.
|
||||
# relaton has to rename its innards, so it needs the _CachedRelation.
|
||||
relation.rename(new_relation)
|
||||
# update all the relations that refer to it
|
||||
for cached in self.relations.values():
|
||||
if cached.is_referenced_by(old_key):
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="update_reference",
|
||||
ref_key=_make_ref_key_dict(old_key),
|
||||
ref_key_2=_make_ref_key_dict(new_key),
|
||||
ref_key_3=_make_ref_key_dict(cached.key()),
|
||||
)
|
||||
logger.debug(
|
||||
'updated reference from {0} -> {2} to {1} -> {2}'
|
||||
.format(old_key, new_key, cached.key())
|
||||
)
|
||||
|
||||
cached.rename_key(old_key, new_key)
|
||||
|
||||
self.relations[new_key] = relation
|
||||
@@ -424,12 +429,16 @@ class RelationsCache:
|
||||
:raises InternalError: If the new key is already present.
|
||||
"""
|
||||
if new_key in self.relations:
|
||||
# Tell user when collision caused by model names truncated during
|
||||
# materialization.
|
||||
raise TruncatedModelNameCausedCollisionError(new_key, self.relations)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in rename, new key {} already in cache: {}'
|
||||
.format(new_key, list(self.relations.keys()))
|
||||
)
|
||||
|
||||
if old_key not in self.relations:
|
||||
fire_event(CacheAction(action="temporary_relation", ref_key=old_key._asdict()))
|
||||
logger.debug(
|
||||
'old key {} not found in self.relations, assuming temporary'
|
||||
.format(old_key)
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -445,20 +454,13 @@ class RelationsCache:
|
||||
:param BaseRelation new: The new relation name information.
|
||||
:raises InternalError: If the new key is already present.
|
||||
"""
|
||||
old_key = _make_ref_key(old)
|
||||
new_key = _make_ref_key(new)
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="rename_relation",
|
||||
ref_key=old_key._asdict(),
|
||||
ref_key_2=new_key._asdict(),
|
||||
)
|
||||
)
|
||||
flags = get_flags()
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()),
|
||||
)
|
||||
old_key = _make_key(old)
|
||||
new_key = _make_key(new)
|
||||
logger.debug('Renaming relation {!s} to {!s}'.format(
|
||||
old_key, new_key
|
||||
))
|
||||
|
||||
lazy_log('before rename: {!s}', self.dump_graph)
|
||||
|
||||
with self.lock:
|
||||
if self._check_rename_constraints(old_key, new_key):
|
||||
@@ -466,12 +468,11 @@ class RelationsCache:
|
||||
else:
|
||||
self._setdefault(_CachedRelation(new))
|
||||
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()),
|
||||
)
|
||||
lazy_log('after rename: {!s}', self.dump_graph)
|
||||
|
||||
def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
|
||||
def get_relations(
|
||||
self, database: Optional[str], schema: Optional[str]
|
||||
) -> List[Any]:
|
||||
"""Case-insensitively yield all relations matching the given schema.
|
||||
|
||||
:param str schema: The case-insensitive schema name to list from.
|
||||
@@ -482,13 +483,15 @@ class RelationsCache:
|
||||
schema = lowercase(schema)
|
||||
with self.lock:
|
||||
results = [
|
||||
r.inner
|
||||
for r in self.relations.values()
|
||||
if (lowercase(r.schema) == schema and lowercase(r.database) == database)
|
||||
r.inner for r in self.relations.values()
|
||||
if (lowercase(r.schema) == schema and
|
||||
lowercase(r.database) == database)
|
||||
]
|
||||
|
||||
if None in results:
|
||||
raise NoneRelationFoundError()
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in get_relations, a None relation was found in the cache!'
|
||||
)
|
||||
return results
|
||||
|
||||
def clear(self):
|
||||
@@ -515,6 +518,6 @@ class RelationsCache:
|
||||
"""
|
||||
for relation in to_remove:
|
||||
# it may have been cascaded out already
|
||||
drop_key = _make_ref_key(relation)
|
||||
drop_key = _make_key(relation)
|
||||
if drop_key in self.relations:
|
||||
self.drop(drop_key)
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
import threading
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Type
|
||||
from importlib import import_module
|
||||
from typing import Type, Dict, Any, List, Optional, Set
|
||||
|
||||
from dbt.exceptions import RuntimeException, InternalException
|
||||
from dbt.include.global_project import (
|
||||
PACKAGE_PATH as GLOBAL_PROJECT_PATH,
|
||||
PROJECT_NAME as GLOBAL_PROJECT_NAME,
|
||||
)
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt.contracts.connection import Credentials, AdapterRequiredConfig
|
||||
|
||||
|
||||
from dbt.adapters.protocol import (
|
||||
AdapterProtocol,
|
||||
AdapterConfig,
|
||||
RelationProtocol,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import AdapterImportError, PluginLoadError, AdapterRegistered
|
||||
from dbt.exceptions import DbtInternalError, DbtRuntimeError
|
||||
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
from dbt.semver import VersionSpecifier
|
||||
|
||||
|
||||
Adapter = AdapterProtocol
|
||||
|
||||
@@ -35,7 +40,7 @@ class AdapterContainer:
|
||||
names = ", ".join(self.plugins.keys())
|
||||
|
||||
message = f"Invalid adapter type {name}! Must be one of {names}"
|
||||
raise DbtRuntimeError(message)
|
||||
raise RuntimeException(message)
|
||||
|
||||
def get_adapter_class_by_name(self, name: str) -> Type[Adapter]:
|
||||
plugin = self.get_plugin_by_name(name)
|
||||
@@ -45,7 +50,9 @@ class AdapterContainer:
|
||||
adapter = self.get_adapter_class_by_name(name)
|
||||
return adapter.Relation
|
||||
|
||||
def get_config_class_by_name(self, name: str) -> Type[AdapterConfig]:
|
||||
def get_config_class_by_name(
|
||||
self, name: str
|
||||
) -> Type[AdapterConfig]:
|
||||
adapter = self.get_adapter_class_by_name(name)
|
||||
return adapter.AdapterSpecificConfigs
|
||||
|
||||
@@ -55,25 +62,24 @@ class AdapterContainer:
|
||||
# singletons
|
||||
try:
|
||||
# mypy doesn't think modules have any attributes.
|
||||
mod: Any = import_module("." + name, "dbt.adapters")
|
||||
mod: Any = import_module('.' + name, 'dbt.adapters')
|
||||
except ModuleNotFoundError as exc:
|
||||
# if we failed to import the target module in particular, inform
|
||||
# the user about it via a runtime error
|
||||
if exc.name == "dbt.adapters." + name:
|
||||
fire_event(AdapterImportError(exc=str(exc)))
|
||||
raise DbtRuntimeError(f"Could not find adapter type {name}!")
|
||||
if exc.name == 'dbt.adapters.' + name:
|
||||
raise RuntimeException(f'Could not find adapter type {name}!')
|
||||
logger.info(f'Error importing adapter: {exc}')
|
||||
# otherwise, the error had to have come from some underlying
|
||||
# library. Log the stack trace.
|
||||
|
||||
fire_event(PluginLoadError(exc_info=traceback.format_exc()))
|
||||
logger.debug('', exc_info=True)
|
||||
raise
|
||||
plugin: AdapterPlugin = mod.Plugin
|
||||
plugin_type = plugin.adapter.type()
|
||||
|
||||
if plugin_type != name:
|
||||
raise DbtRuntimeError(
|
||||
f"Expected to find adapter with type named {name}, got "
|
||||
f"adapter with type {plugin_type}"
|
||||
raise RuntimeException(
|
||||
f'Expected to find adapter with type named {name}, got '
|
||||
f'adapter with type {plugin_type}'
|
||||
)
|
||||
|
||||
with self.lock:
|
||||
@@ -90,13 +96,7 @@ class AdapterContainer:
|
||||
def register_adapter(self, config: AdapterRequiredConfig) -> None:
|
||||
adapter_name = config.credentials.type
|
||||
adapter_type = self.get_adapter_class_by_name(adapter_name)
|
||||
adapter_version = import_module(f".{adapter_name}.__version__", "dbt.adapters").version
|
||||
adapter_version_specifier = VersionSpecifier.from_version_string(
|
||||
adapter_version
|
||||
).to_version_string()
|
||||
fire_event(
|
||||
AdapterRegistered(adapter_name=adapter_name, adapter_version=adapter_version_specifier)
|
||||
)
|
||||
|
||||
with self.lock:
|
||||
if adapter_name in self.adapters:
|
||||
# this shouldn't really happen...
|
||||
@@ -109,7 +109,8 @@ class AdapterContainer:
|
||||
return self.adapters[adapter_name]
|
||||
|
||||
def reset_adapters(self):
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
"""Clear the adapters. This is useful for tests, which change configs.
|
||||
"""
|
||||
with self.lock:
|
||||
for adapter in self.adapters.values():
|
||||
adapter.cleanup_connections()
|
||||
@@ -139,16 +140,22 @@ class AdapterContainer:
|
||||
try:
|
||||
plugin = self.plugins[plugin_name]
|
||||
except KeyError:
|
||||
raise DbtInternalError(f"No plugin found for {plugin_name}") from None
|
||||
raise InternalException(
|
||||
f'No plugin found for {plugin_name}'
|
||||
) from None
|
||||
plugins.append(plugin)
|
||||
seen.add(plugin_name)
|
||||
if plugin.dependencies is None:
|
||||
continue
|
||||
for dep in plugin.dependencies:
|
||||
if dep not in seen:
|
||||
plugin_names.append(dep)
|
||||
return plugins
|
||||
|
||||
def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
|
||||
package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
|
||||
package_names: List[str] = [
|
||||
p.project_name for p in self.get_adapter_plugins(name)
|
||||
]
|
||||
package_names.append(GLOBAL_PROJECT_NAME)
|
||||
return package_names
|
||||
|
||||
@@ -158,16 +165,15 @@ class AdapterContainer:
|
||||
try:
|
||||
path = self.packages[package_name]
|
||||
except KeyError:
|
||||
raise DbtInternalError(f"No internal package listing found for {package_name}")
|
||||
raise InternalException(
|
||||
f'No internal package listing found for {package_name}'
|
||||
)
|
||||
paths.append(path)
|
||||
return paths
|
||||
|
||||
def get_adapter_type_names(self, name: Optional[str]) -> List[str]:
|
||||
return [p.adapter.type() for p in self.get_adapter_plugins(name)]
|
||||
|
||||
def get_adapter_constraint_support(self, name: Optional[str]) -> List[str]:
|
||||
return self.lookup_adapter(name).CONSTRAINT_SUPPORT # type: ignore
|
||||
|
||||
|
||||
FACTORY: AdapterContainer = AdapterContainer()
|
||||
|
||||
@@ -180,12 +186,9 @@ def get_adapter(config: AdapterRequiredConfig):
|
||||
return FACTORY.lookup_adapter(config.credentials.type)
|
||||
|
||||
|
||||
def get_adapter_by_type(adapter_type):
|
||||
return FACTORY.lookup_adapter(adapter_type)
|
||||
|
||||
|
||||
def reset_adapters():
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
"""Clear the adapters. This is useful for tests, which change configs.
|
||||
"""
|
||||
FACTORY.reset_adapters()
|
||||
|
||||
|
||||
@@ -222,16 +225,3 @@ def get_adapter_package_names(name: Optional[str]) -> List[str]:
|
||||
|
||||
def get_adapter_type_names(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_type_names(name)
|
||||
|
||||
|
||||
def get_adapter_constraint_support(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_constraint_support(name)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def adapter_management():
|
||||
reset_adapters()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cleanup_connections()
|
||||
|
||||
@@ -1,22 +1,17 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Type,
|
||||
Hashable,
|
||||
Optional,
|
||||
ContextManager,
|
||||
List,
|
||||
Generic,
|
||||
TypeVar,
|
||||
Tuple,
|
||||
Dict,
|
||||
Any,
|
||||
Type, Hashable, Optional, ContextManager, List, Generic, TypeVar, ClassVar,
|
||||
Tuple, Union, Dict, Any
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
|
||||
import agate
|
||||
|
||||
from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
|
||||
from dbt.contracts.graph.nodes import ResultNode, ManifestNode
|
||||
from dbt.contracts.connection import Connection, AdapterRequiredConfig
|
||||
from dbt.contracts.graph.compiled import (
|
||||
CompiledNode, ManifestNode, NonSourceCompiledNode
|
||||
)
|
||||
from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition
|
||||
from dbt.contracts.graph.model_config import BaseConfig
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.contracts.relation import Policy, HasQuoting
|
||||
@@ -37,7 +32,7 @@ class ColumnProtocol(Protocol):
|
||||
pass
|
||||
|
||||
|
||||
Self = TypeVar("Self", bound="RelationProtocol")
|
||||
Self = TypeVar('Self', bound='RelationProtocol')
|
||||
|
||||
|
||||
class RelationProtocol(Protocol):
|
||||
@@ -46,7 +41,11 @@ class RelationProtocol(Protocol):
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self:
|
||||
def create_from(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
|
||||
) -> Self:
|
||||
...
|
||||
|
||||
|
||||
@@ -59,19 +58,26 @@ class CompilerProtocol(Protocol):
|
||||
node: ManifestNode,
|
||||
manifest: Manifest,
|
||||
extra_context: Optional[Dict[str, Any]] = None,
|
||||
) -> ManifestNode:
|
||||
) -> NonSourceCompiledNode:
|
||||
...
|
||||
|
||||
|
||||
AdapterConfig_T = TypeVar("AdapterConfig_T", bound=AdapterConfig)
|
||||
ConnectionManager_T = TypeVar("ConnectionManager_T", bound=ConnectionManagerProtocol)
|
||||
Relation_T = TypeVar("Relation_T", bound=RelationProtocol)
|
||||
Column_T = TypeVar("Column_T", bound=ColumnProtocol)
|
||||
Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
|
||||
AdapterConfig_T = TypeVar(
|
||||
'AdapterConfig_T', bound=AdapterConfig
|
||||
)
|
||||
ConnectionManager_T = TypeVar(
|
||||
'ConnectionManager_T', bound=ConnectionManagerProtocol
|
||||
)
|
||||
Relation_T = TypeVar(
|
||||
'Relation_T', bound=RelationProtocol
|
||||
)
|
||||
Column_T = TypeVar(
|
||||
'Column_T', bound=ColumnProtocol
|
||||
)
|
||||
Compiler_T = TypeVar('Compiler_T', bound=CompilerProtocol)
|
||||
|
||||
|
||||
# TODO CT-211
|
||||
class AdapterProtocol( # type: ignore[misc]
|
||||
class AdapterProtocol(
|
||||
Protocol,
|
||||
Generic[
|
||||
AdapterConfig_T,
|
||||
@@ -79,15 +85,12 @@ class AdapterProtocol( # type: ignore[misc]
|
||||
Relation_T,
|
||||
Column_T,
|
||||
Compiler_T,
|
||||
],
|
||||
]
|
||||
):
|
||||
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
|
||||
# ClassVar due to the restrictiveness of PEP-526
|
||||
# See: https://github.com/python/mypy/issues/5144
|
||||
AdapterSpecificConfigs: Type[AdapterConfig_T]
|
||||
Column: Type[Column_T]
|
||||
Relation: Type[Relation_T]
|
||||
ConnectionManager: Type[ConnectionManager_T]
|
||||
AdapterSpecificConfigs: ClassVar[Type[AdapterConfig_T]]
|
||||
Column: ClassVar[Type[Column_T]]
|
||||
Relation: ClassVar[Type[Relation_T]]
|
||||
ConnectionManager: ClassVar[Type[ConnectionManager_T]]
|
||||
connections: ConnectionManager_T
|
||||
|
||||
def __init__(self, config: AdapterRequiredConfig):
|
||||
@@ -151,7 +154,7 @@ class AdapterProtocol( # type: ignore[misc]
|
||||
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
) -> Tuple[str, agate.Table]:
|
||||
...
|
||||
|
||||
def get_compiler(self) -> Compiler_T:
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# this module exists to resolve circular imports with the events module
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
|
||||
|
||||
|
||||
def lowercase(value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return value.lower()
|
||||
|
||||
|
||||
# For backwards compatibility. New code should use _make_ref_key
|
||||
def _make_key(relation: Any) -> _ReferenceKey:
|
||||
return _make_ref_key(relation)
|
||||
|
||||
|
||||
def _make_ref_key(relation: Any) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(
|
||||
lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
|
||||
)
|
||||
|
||||
|
||||
def _make_ref_key_dict(relation: Any):
|
||||
return {
|
||||
"database": relation.database,
|
||||
"schema": relation.schema,
|
||||
"identifier": relation.identifier,
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
# RelationConfig
|
||||
This package serves as an initial abstraction for managing the inspection of existing relations and determining
|
||||
changes on those relations. It arose from the materialized view work and is currently only supporting
|
||||
materialized views for Postgres and Redshift as well as dynamic tables for Snowflake. There are three main
|
||||
classes in this package.
|
||||
|
||||
## RelationConfigBase
|
||||
This is a very small class that only has a `from_dict()` method and a default `NotImplementedError()`. At some
|
||||
point this could be replaced by a more robust framework, like `mashumaro` or `pydantic`.
|
||||
|
||||
## RelationConfigChange
|
||||
This class inherits from `RelationConfigBase` ; however, this can be thought of as a separate class. The subclassing
|
||||
merely points to the idea that both classes would likely inherit from the same class in a `mashumaro` or
|
||||
`pydantic` implementation. This class is much more restricted in attribution. It should really only
|
||||
ever need an `action` and a `context`. This can be though of as being analogous to a web request. You need to
|
||||
know what you're doing (`action`: 'create' = GET, 'drop' = DELETE, etc.) and the information (`context`) needed
|
||||
to make the change. In our scenarios, the context tends to be an instance of `RelationConfigBase` corresponding
|
||||
to the new state.
|
||||
|
||||
## RelationConfigValidationMixin
|
||||
This mixin provides optional validation mechanics that can be applied to either `RelationConfigBase` or
|
||||
`RelationConfigChange` subclasses. A validation rule is a combination of a `validation_check`, something
|
||||
that should evaluate to `True`, and an optional `validation_error`, an instance of `DbtRuntimeError`
|
||||
that should be raised in the event the `validation_check` fails. While optional, it's recommended that
|
||||
the `validation_error` be provided for clearer transparency to the end user.
|
||||
@@ -1,12 +0,0 @@
|
||||
from dbt.adapters.relation_configs.config_base import ( # noqa: F401
|
||||
RelationConfigBase,
|
||||
RelationResults,
|
||||
)
|
||||
from dbt.adapters.relation_configs.config_change import ( # noqa: F401
|
||||
RelationConfigChangeAction,
|
||||
RelationConfigChange,
|
||||
)
|
||||
from dbt.adapters.relation_configs.config_validation import ( # noqa: F401
|
||||
RelationConfigValidationMixin,
|
||||
RelationConfigValidationRule,
|
||||
)
|
||||
@@ -1,44 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Union, Dict
|
||||
|
||||
import agate
|
||||
from dbt.utils import filter_null_values
|
||||
|
||||
|
||||
"""
|
||||
This is what relation metadata from the database looks like. It's a dictionary because there will be
|
||||
multiple grains of data for a single object. For example, a materialized view in Postgres has base level information,
|
||||
like name. But it also can have multiple indexes, which needs to be a separate query. It might look like this:
|
||||
|
||||
{
|
||||
"base": agate.Row({"table_name": "table_abc", "query": "select * from table_def"})
|
||||
"indexes": agate.Table("rows": [
|
||||
agate.Row({"name": "index_a", "columns": ["column_a"], "type": "hash", "unique": False}),
|
||||
agate.Row({"name": "index_b", "columns": ["time_dim_a"], "type": "btree", "unique": False}),
|
||||
])
|
||||
}
|
||||
"""
|
||||
RelationResults = Dict[str, Union[agate.Row, agate.Table]]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RelationConfigBase:
|
||||
@classmethod
|
||||
def from_dict(cls, kwargs_dict) -> "RelationConfigBase":
|
||||
"""
|
||||
This assumes the subclass of `RelationConfigBase` is flat, in the sense that no attribute is
|
||||
itself another subclass of `RelationConfigBase`. If that's not the case, this should be overriden
|
||||
to manually manage that complexity.
|
||||
|
||||
Args:
|
||||
kwargs_dict: the dict representation of this instance
|
||||
|
||||
Returns: the `RelationConfigBase` representation associated with the provided dict
|
||||
"""
|
||||
return cls(**filter_null_values(kwargs_dict)) # type: ignore
|
||||
|
||||
@classmethod
|
||||
def _not_implemented_error(cls) -> NotImplementedError:
|
||||
return NotImplementedError(
|
||||
"This relation type has not been fully configured for this adapter."
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import Hashable
|
||||
|
||||
from dbt.adapters.relation_configs.config_base import RelationConfigBase
|
||||
from dbt.dataclass_schema import StrEnum
|
||||
|
||||
|
||||
class RelationConfigChangeAction(StrEnum):
|
||||
alter = "alter"
|
||||
create = "create"
|
||||
drop = "drop"
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=True, unsafe_hash=True)
|
||||
class RelationConfigChange(RelationConfigBase, ABC):
|
||||
action: RelationConfigChangeAction
|
||||
context: Hashable # this is usually a RelationConfig, e.g. IndexConfig, but shouldn't be limited
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def requires_full_refresh(self) -> bool:
|
||||
raise self._not_implemented_error()
|
||||
@@ -1,57 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Set, Optional
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=True, unsafe_hash=True)
|
||||
class RelationConfigValidationRule:
|
||||
validation_check: bool
|
||||
validation_error: Optional[DbtRuntimeError]
|
||||
|
||||
@property
|
||||
def default_error(self):
|
||||
return DbtRuntimeError(
|
||||
"There was a validation error in preparing this relation config."
|
||||
"No additional context was provided by this adapter."
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RelationConfigValidationMixin:
|
||||
def __post_init__(self):
|
||||
self.run_validation_rules()
|
||||
|
||||
@property
|
||||
def validation_rules(self) -> Set[RelationConfigValidationRule]:
|
||||
"""
|
||||
A set of validation rules to run against the object upon creation.
|
||||
|
||||
A validation rule is a combination of a validation check (bool) and an optional error message.
|
||||
|
||||
This defaults to no validation rules if not implemented. It's recommended to override this with values,
|
||||
but that may not always be necessary.
|
||||
|
||||
Returns: a set of validation rules
|
||||
"""
|
||||
return set()
|
||||
|
||||
def run_validation_rules(self):
|
||||
for validation_rule in self.validation_rules:
|
||||
try:
|
||||
assert validation_rule.validation_check
|
||||
except AssertionError:
|
||||
if validation_rule.validation_error:
|
||||
raise validation_rule.validation_error
|
||||
else:
|
||||
raise validation_rule.default_error
|
||||
self.run_child_validation_rules()
|
||||
|
||||
def run_child_validation_rules(self):
|
||||
for attr_value in vars(self).values():
|
||||
if hasattr(attr_value, "validation_rules"):
|
||||
attr_value.run_validation_rules()
|
||||
if isinstance(attr_value, set):
|
||||
for member in attr_value:
|
||||
if hasattr(member, "validation_rules"):
|
||||
member.run_validation_rules()
|
||||
@@ -1,17 +1,15 @@
|
||||
import abc
|
||||
import time
|
||||
from typing import List, Optional, Tuple, Any, Iterable, Dict, Union
|
||||
from typing import List, Optional, Tuple, Any, Iterable, Dict
|
||||
|
||||
import agate
|
||||
|
||||
import dbt.clients.agate_helper
|
||||
import dbt.exceptions
|
||||
from dbt.adapters.base import BaseConnectionManager
|
||||
from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus
|
||||
from dbt.events.contextvars import get_node_info
|
||||
from dbt.utils import cast_to_str
|
||||
from dbt.contracts.connection import Connection, ConnectionState
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt import flags
|
||||
|
||||
|
||||
class SQLConnectionManager(BaseConnectionManager):
|
||||
@@ -20,14 +18,15 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
Methods to implement:
|
||||
- exception_handler
|
||||
- cancel
|
||||
- get_response
|
||||
- get_status
|
||||
- open
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel(self, connection: Connection):
|
||||
"""Cancel the given connection."""
|
||||
raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`cancel` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
def cancel_open(self) -> List[str]:
|
||||
names = []
|
||||
@@ -39,7 +38,10 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
# if the connection failed, the handle will be None so we have
|
||||
# nothing to cancel.
|
||||
if connection.handle is not None and connection.state == ConnectionState.OPEN:
|
||||
if (
|
||||
connection.handle is not None and
|
||||
connection.state == ConnectionState.OPEN
|
||||
):
|
||||
self.cancel(connection)
|
||||
if connection.name is not None:
|
||||
names.append(connection.name)
|
||||
@@ -50,124 +52,101 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
sql: str,
|
||||
auto_begin: bool = True,
|
||||
bindings: Optional[Any] = None,
|
||||
abridge_sql_log: bool = False,
|
||||
abridge_sql_log: bool = False
|
||||
) -> Tuple[Connection, Any]:
|
||||
connection = self.get_thread_connection()
|
||||
if auto_begin and connection.transaction_open is False:
|
||||
self.begin()
|
||||
fire_event(
|
||||
ConnectionUsed(
|
||||
conn_type=self.TYPE,
|
||||
conn_name=cast_to_str(connection.name),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug('Using {} connection "{}".'
|
||||
.format(self.TYPE, connection.name))
|
||||
|
||||
with self.exception_handler(sql):
|
||||
if abridge_sql_log:
|
||||
log_sql = "{}...".format(sql[:512])
|
||||
log_sql = '{}...'.format(sql[:512])
|
||||
else:
|
||||
log_sql = sql
|
||||
|
||||
fire_event(
|
||||
SQLQuery(
|
||||
conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info()
|
||||
)
|
||||
logger.debug(
|
||||
'On {connection_name}: {sql}',
|
||||
connection_name=connection.name,
|
||||
sql=log_sql,
|
||||
)
|
||||
pre = time.time()
|
||||
|
||||
cursor = connection.handle.cursor()
|
||||
cursor.execute(sql, bindings)
|
||||
|
||||
fire_event(
|
||||
SQLQueryStatus(
|
||||
status=str(self.get_response(cursor)),
|
||||
elapsed=round((time.time() - pre)),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
logger.debug(
|
||||
"SQL status: {status} in {elapsed:0.2f} seconds",
|
||||
status=self.get_status(cursor),
|
||||
elapsed=(time.time() - pre)
|
||||
)
|
||||
|
||||
return connection, cursor
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def get_response(cls, cursor: Any) -> AdapterResponse:
|
||||
@abc.abstractclassmethod
|
||||
def get_status(cls, cursor: Any) -> str:
|
||||
"""Get the status of the cursor."""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`get_response` is not implemented for this adapter!"
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
'`get_status` is not implemented for this adapter!'
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def process_results(
|
||||
cls, column_names: Iterable[str], rows: Iterable[Any]
|
||||
cls,
|
||||
column_names: Iterable[str],
|
||||
rows: Iterable[Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
# TODO CT-211
|
||||
unique_col_names = dict() # type: ignore[var-annotated]
|
||||
# TODO CT-211
|
||||
for idx in range(len(column_names)): # type: ignore[arg-type]
|
||||
# TODO CT-211
|
||||
col_name = column_names[idx] # type: ignore[index]
|
||||
if col_name in unique_col_names:
|
||||
unique_col_names[col_name] += 1
|
||||
# TODO CT-211
|
||||
column_names[idx] = f"{col_name}_{unique_col_names[col_name]}" # type: ignore[index] # noqa
|
||||
else:
|
||||
# TODO CT-211
|
||||
unique_col_names[column_names[idx]] = 1 # type: ignore[index]
|
||||
|
||||
return [dict(zip(column_names, row)) for row in rows]
|
||||
|
||||
@classmethod
|
||||
def get_result_from_cursor(cls, cursor: Any, limit: Optional[int]) -> agate.Table:
|
||||
def get_result_from_cursor(cls, cursor: Any) -> agate.Table:
|
||||
data: List[Any] = []
|
||||
column_names: List[str] = []
|
||||
|
||||
if cursor.description is not None:
|
||||
column_names = [col[0] for col in cursor.description]
|
||||
if limit:
|
||||
rows = cursor.fetchmany(limit)
|
||||
else:
|
||||
rows = cursor.fetchall()
|
||||
rows = cursor.fetchall()
|
||||
data = cls.process_results(column_names, rows)
|
||||
|
||||
return dbt.clients.agate_helper.table_from_data_flat(data, column_names)
|
||||
|
||||
@classmethod
|
||||
def data_type_code_to_name(cls, type_code: Union[int, str]) -> str:
|
||||
"""Get the string representation of the data type from the type_code."""
|
||||
# https://peps.python.org/pep-0249/#type-objects
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`data_type_code_to_name` is not implemented for this adapter!"
|
||||
return dbt.clients.agate_helper.table_from_data_flat(
|
||||
data,
|
||||
column_names
|
||||
)
|
||||
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False, limit: Optional[int] = None
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[str, agate.Table]:
|
||||
sql = self._add_query_comment(sql)
|
||||
_, cursor = self.add_query(sql, auto_begin)
|
||||
response = self.get_response(cursor)
|
||||
status = self.get_status(cursor)
|
||||
if fetch:
|
||||
table = self.get_result_from_cursor(cursor, limit)
|
||||
table = self.get_result_from_cursor(cursor)
|
||||
else:
|
||||
table = dbt.clients.agate_helper.empty_table()
|
||||
return response, table
|
||||
return status, table
|
||||
|
||||
def add_begin_query(self):
|
||||
return self.add_query("BEGIN", auto_begin=False)
|
||||
return self.add_query('BEGIN', auto_begin=False)
|
||||
|
||||
def add_commit_query(self):
|
||||
return self.add_query("COMMIT", auto_begin=False)
|
||||
|
||||
def add_select_query(self, sql: str) -> Tuple[Connection, Any]:
|
||||
sql = self._add_query_comment(sql)
|
||||
return self.add_query(sql, auto_begin=False)
|
||||
return self.add_query('COMMIT', auto_begin=False)
|
||||
|
||||
def begin(self):
|
||||
connection = self.get_thread_connection()
|
||||
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f'In begin, got {connection} - not a Connection!'
|
||||
)
|
||||
|
||||
if connection.transaction_open is True:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Tried to begin a new transaction on connection "{}", but '
|
||||
"it already had one open!".format(connection.name)
|
||||
)
|
||||
'it already had one open!'.format(connection.name))
|
||||
|
||||
self.add_begin_query()
|
||||
|
||||
@@ -176,13 +155,18 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
def commit(self):
|
||||
connection = self.get_thread_connection()
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
'Tried to commit transaction on connection "{}", but '
|
||||
"it does not have one open!".format(connection.name)
|
||||
)
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f'In commit, got {connection} - not a Connection!'
|
||||
)
|
||||
|
||||
fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info()))
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Tried to commit transaction on connection "{}", but '
|
||||
'it does not have one open!'.format(connection.name))
|
||||
|
||||
logger.debug('On {}: COMMIT'.format(connection.name))
|
||||
self.add_commit_query()
|
||||
|
||||
connection.transaction_open = False
|
||||
|
||||
@@ -1,33 +1,30 @@
|
||||
import agate
|
||||
from typing import Any, Optional, Tuple, Type, List
|
||||
|
||||
from dbt.contracts.connection import Connection, AdapterResponse
|
||||
from dbt.exceptions import RelationTypeNullError
|
||||
import dbt.clients.agate_helper
|
||||
from dbt.contracts.connection import Connection
|
||||
import dbt.exceptions
|
||||
from dbt.adapters.base import BaseAdapter, available
|
||||
from dbt.adapters.cache import _make_ref_key_dict
|
||||
from dbt.adapters.sql import SQLConnectionManager
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
|
||||
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
|
||||
from dbt.adapters.base.relation import BaseRelation
|
||||
|
||||
LIST_RELATIONS_MACRO_NAME = "list_relations_without_caching"
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME = "get_columns_in_relation"
|
||||
LIST_SCHEMAS_MACRO_NAME = "list_schemas"
|
||||
CHECK_SCHEMA_EXISTS_MACRO_NAME = "check_schema_exists"
|
||||
CREATE_SCHEMA_MACRO_NAME = "create_schema"
|
||||
DROP_SCHEMA_MACRO_NAME = "drop_schema"
|
||||
RENAME_RELATION_MACRO_NAME = "rename_relation"
|
||||
TRUNCATE_RELATION_MACRO_NAME = "truncate_relation"
|
||||
DROP_RELATION_MACRO_NAME = "drop_relation"
|
||||
ALTER_COLUMN_TYPE_MACRO_NAME = "alter_column_type"
|
||||
VALIDATE_SQL_MACRO_NAME = "validate_sql"
|
||||
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME = 'get_columns_in_relation'
|
||||
LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
|
||||
CHECK_SCHEMA_EXISTS_MACRO_NAME = 'check_schema_exists'
|
||||
CREATE_SCHEMA_MACRO_NAME = 'create_schema'
|
||||
DROP_SCHEMA_MACRO_NAME = 'drop_schema'
|
||||
RENAME_RELATION_MACRO_NAME = 'rename_relation'
|
||||
TRUNCATE_RELATION_MACRO_NAME = 'truncate_relation'
|
||||
DROP_RELATION_MACRO_NAME = 'drop_relation'
|
||||
ALTER_COLUMN_TYPE_MACRO_NAME = 'alter_column_type'
|
||||
|
||||
|
||||
class SQLAdapter(BaseAdapter):
|
||||
"""The default adapter with the common agate conversions and some SQL
|
||||
methods was implemented. This adapter has a different much shorter list of
|
||||
methods implemented. This adapter has a different much shorter list of
|
||||
methods to implement, but some more macros that must be implemented.
|
||||
|
||||
To implement a macro, implement "${adapter_type}__${macro_name}". in the
|
||||
@@ -63,24 +60,30 @@ class SQLAdapter(BaseAdapter):
|
||||
:param abridge_sql_log: If set, limit the raw sql logged to 512
|
||||
characters
|
||||
"""
|
||||
return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
|
||||
return self.connections.add_query(sql, auto_begin, bindings,
|
||||
abridge_sql_log)
|
||||
|
||||
@classmethod
|
||||
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
return "text"
|
||||
|
||||
@classmethod
|
||||
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
# TODO CT-211
|
||||
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore[attr-defined]
|
||||
def convert_number_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> str:
|
||||
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
|
||||
return "float8" if decimals else "integer"
|
||||
|
||||
@classmethod
|
||||
def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
def convert_boolean_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> str:
|
||||
return "boolean"
|
||||
|
||||
@classmethod
|
||||
def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
def convert_datetime_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> str:
|
||||
return "timestamp without time zone"
|
||||
|
||||
@classmethod
|
||||
@@ -96,27 +99,31 @@ class SQLAdapter(BaseAdapter):
|
||||
return True
|
||||
|
||||
def expand_column_types(self, goal, current):
|
||||
reference_columns = {c.name: c for c in self.get_columns_in_relation(goal)}
|
||||
reference_columns = {
|
||||
c.name: c for c in
|
||||
self.get_columns_in_relation(goal)
|
||||
}
|
||||
|
||||
target_columns = {c.name: c for c in self.get_columns_in_relation(current)}
|
||||
target_columns = {
|
||||
c.name: c for c
|
||||
in self.get_columns_in_relation(current)
|
||||
}
|
||||
|
||||
for column_name, reference_column in reference_columns.items():
|
||||
target_column = target_columns.get(column_name)
|
||||
|
||||
if target_column is not None and target_column.can_expand_to(reference_column):
|
||||
if target_column is not None and \
|
||||
target_column.can_expand_to(reference_column):
|
||||
col_string_size = reference_column.string_size()
|
||||
new_type = self.Column.string_type(col_string_size)
|
||||
fire_event(
|
||||
ColTypeChange(
|
||||
orig_type=target_column.data_type,
|
||||
new_type=new_type,
|
||||
table=_make_ref_key_dict(current),
|
||||
)
|
||||
)
|
||||
logger.debug("Changing col type from {} to {} in table {}",
|
||||
target_column.data_type, new_type, current)
|
||||
|
||||
self.alter_column_type(current, column_name, new_type)
|
||||
|
||||
def alter_column_type(self, relation, column_name, new_column_type) -> None:
|
||||
def alter_column_type(
|
||||
self, relation, column_name, new_column_type
|
||||
) -> None:
|
||||
"""
|
||||
1. Create a new column (w/ temp name and correct type)
|
||||
2. Copy data over to it
|
||||
@@ -124,38 +131,53 @@ class SQLAdapter(BaseAdapter):
|
||||
4. Rename the new column to existing column
|
||||
"""
|
||||
kwargs = {
|
||||
"relation": relation,
|
||||
"column_name": column_name,
|
||||
"new_column_type": new_column_type,
|
||||
'relation': relation,
|
||||
'column_name': column_name,
|
||||
'new_column_type': new_column_type,
|
||||
}
|
||||
self.execute_macro(ALTER_COLUMN_TYPE_MACRO_NAME, kwargs=kwargs)
|
||||
self.execute_macro(
|
||||
ALTER_COLUMN_TYPE_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
|
||||
def drop_relation(self, relation):
|
||||
if relation.type is None:
|
||||
raise RelationTypeNullError(relation)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
'Tried to drop relation {}, but its type is null.'
|
||||
.format(relation))
|
||||
|
||||
self.cache_dropped(relation)
|
||||
self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
|
||||
self.execute_macro(
|
||||
DROP_RELATION_MACRO_NAME,
|
||||
kwargs={'relation': relation}
|
||||
)
|
||||
|
||||
def truncate_relation(self, relation):
|
||||
self.execute_macro(TRUNCATE_RELATION_MACRO_NAME, kwargs={"relation": relation})
|
||||
self.execute_macro(
|
||||
TRUNCATE_RELATION_MACRO_NAME,
|
||||
kwargs={'relation': relation}
|
||||
)
|
||||
|
||||
def rename_relation(self, from_relation, to_relation):
|
||||
self.cache_renamed(from_relation, to_relation)
|
||||
|
||||
kwargs = {"from_relation": from_relation, "to_relation": to_relation}
|
||||
self.execute_macro(RENAME_RELATION_MACRO_NAME, kwargs=kwargs)
|
||||
kwargs = {'from_relation': from_relation, 'to_relation': to_relation}
|
||||
self.execute_macro(
|
||||
RENAME_RELATION_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
|
||||
def get_columns_in_relation(self, relation):
|
||||
return self.execute_macro(
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME, kwargs={"relation": relation}
|
||||
GET_COLUMNS_IN_RELATION_MACRO_NAME,
|
||||
kwargs={'relation': relation}
|
||||
)
|
||||
|
||||
def create_schema(self, relation: BaseRelation) -> None:
|
||||
relation = relation.without_identifier()
|
||||
fire_event(SchemaCreation(relation=_make_ref_key_dict(relation)))
|
||||
logger.debug('Creating schema "{}"', relation)
|
||||
kwargs = {
|
||||
"relation": relation,
|
||||
'relation': relation,
|
||||
}
|
||||
self.execute_macro(CREATE_SCHEMA_MACRO_NAME, kwargs=kwargs)
|
||||
self.commit_if_has_connection()
|
||||
@@ -164,46 +186,51 @@ class SQLAdapter(BaseAdapter):
|
||||
|
||||
def drop_schema(self, relation: BaseRelation) -> None:
|
||||
relation = relation.without_identifier()
|
||||
fire_event(SchemaDrop(relation=_make_ref_key_dict(relation)))
|
||||
logger.debug('Dropping schema "{}".', relation)
|
||||
kwargs = {
|
||||
"relation": relation,
|
||||
'relation': relation,
|
||||
}
|
||||
self.execute_macro(DROP_SCHEMA_MACRO_NAME, kwargs=kwargs)
|
||||
self.commit_if_has_connection()
|
||||
# we can update the cache here
|
||||
self.cache.drop_schema(relation.database, relation.schema)
|
||||
|
||||
def list_relations_without_caching(
|
||||
self,
|
||||
schema_relation: BaseRelation,
|
||||
self, schema_relation: BaseRelation,
|
||||
) -> List[BaseRelation]:
|
||||
kwargs = {"schema_relation": schema_relation}
|
||||
results = self.execute_macro(LIST_RELATIONS_MACRO_NAME, kwargs=kwargs)
|
||||
kwargs = {'schema_relation': schema_relation}
|
||||
results = self.execute_macro(
|
||||
LIST_RELATIONS_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
|
||||
relations = []
|
||||
quote_policy = {"database": True, "schema": True, "identifier": True}
|
||||
quote_policy = {
|
||||
'database': True,
|
||||
'schema': True,
|
||||
'identifier': True
|
||||
}
|
||||
for _database, name, _schema, _type in results:
|
||||
try:
|
||||
_type = self.Relation.get_relation_type(_type)
|
||||
except ValueError:
|
||||
_type = self.Relation.External
|
||||
relations.append(
|
||||
self.Relation.create(
|
||||
database=_database,
|
||||
schema=_schema,
|
||||
identifier=name,
|
||||
quote_policy=quote_policy,
|
||||
type=_type,
|
||||
)
|
||||
)
|
||||
relations.append(self.Relation.create(
|
||||
database=_database,
|
||||
schema=_schema,
|
||||
identifier=name,
|
||||
quote_policy=quote_policy,
|
||||
type=_type
|
||||
))
|
||||
return relations
|
||||
|
||||
@classmethod
|
||||
def quote(self, identifier):
|
||||
return '"{}"'.format(identifier)
|
||||
|
||||
def list_schemas(self, database: str) -> List[str]:
|
||||
results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
|
||||
results = self.execute_macro(
|
||||
LIST_SCHEMAS_MACRO_NAME,
|
||||
kwargs={'database': database}
|
||||
)
|
||||
|
||||
return [row[0] for row in results]
|
||||
|
||||
@@ -211,60 +238,13 @@ class SQLAdapter(BaseAdapter):
|
||||
information_schema = self.Relation.create(
|
||||
database=database,
|
||||
schema=schema,
|
||||
identifier="INFORMATION_SCHEMA",
|
||||
quote_policy=self.config.quoting,
|
||||
identifier='INFORMATION_SCHEMA',
|
||||
quote_policy=self.config.quoting
|
||||
).information_schema()
|
||||
|
||||
kwargs = {"information_schema": information_schema, "schema": schema}
|
||||
results = self.execute_macro(CHECK_SCHEMA_EXISTS_MACRO_NAME, kwargs=kwargs)
|
||||
return results[0][0] > 0
|
||||
|
||||
def validate_sql(self, sql: str) -> AdapterResponse:
|
||||
"""Submit the given SQL to the engine for validation, but not execution.
|
||||
|
||||
By default we simply prefix the query with the explain keyword and allow the
|
||||
exceptions thrown by the underlying engine on invalid SQL inputs to bubble up
|
||||
to the exception handler. For adjustments to the explain statement - such as
|
||||
for adapters that have different mechanisms for hinting at query validation
|
||||
or dry-run - callers may be able to override the validate_sql_query macro with
|
||||
the addition of an <adapter>__validate_sql implementation.
|
||||
|
||||
:param sql str: The sql to validate
|
||||
"""
|
||||
kwargs = {
|
||||
"sql": sql,
|
||||
}
|
||||
result = self.execute_macro(VALIDATE_SQL_MACRO_NAME, kwargs=kwargs)
|
||||
# The statement macro always returns an AdapterResponse in the output AttrDict's
|
||||
# `response` property, and we preserve the full payload in case we want to
|
||||
# return fetched output for engines where explain plans are emitted as columnar
|
||||
# results. Any macro override that deviates from this behavior may encounter an
|
||||
# assertion error in the runtime.
|
||||
adapter_response = result.response # type: ignore[attr-defined]
|
||||
assert isinstance(adapter_response, AdapterResponse), (
|
||||
f"Expected AdapterResponse from validate_sql macro execution, "
|
||||
f"got {type(adapter_response)}."
|
||||
kwargs = {'information_schema': information_schema, 'schema': schema}
|
||||
results = self.execute_macro(
|
||||
CHECK_SCHEMA_EXISTS_MACRO_NAME,
|
||||
kwargs=kwargs
|
||||
)
|
||||
return adapter_response
|
||||
|
||||
# This is for use in the test suite
|
||||
def run_sql_for_tests(self, sql, fetch, conn):
|
||||
cursor = conn.handle.cursor()
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
if hasattr(conn.handle, "commit"):
|
||||
conn.handle.commit()
|
||||
if fetch == "one":
|
||||
return cursor.fetchone()
|
||||
elif fetch == "all":
|
||||
return cursor.fetchall()
|
||||
else:
|
||||
return
|
||||
except BaseException as e:
|
||||
if conn.handle and not getattr(conn.handle, "closed", True):
|
||||
conn.handle.rollback()
|
||||
print(sql)
|
||||
print(e)
|
||||
raise
|
||||
finally:
|
||||
conn.transaction_open = False
|
||||
return results[0][0] > 0
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
# Adding a new command
|
||||
|
||||
## `main.py`
|
||||
Add the new command with all necessary decorators. Every command will need at minimum:
|
||||
- a decorator for the click group it belongs to which also names the command
|
||||
- the postflight decorator (must come before other decorators from the `requires` module for error handling)
|
||||
- the preflight decorator
|
||||
```py
|
||||
@cli.command("my-new-command")
|
||||
@requires.postflight
|
||||
@requires.preflight
|
||||
def my_new_command(ctx, **kwargs):
|
||||
...
|
||||
```
|
||||
|
||||
## `types.py`
|
||||
Add an entry to the `Command` enum with your new command. Commands that are sub-commands should have entries
|
||||
that represent their full command path (e.g. `source freshness -> SOURCE_FRESHNESS`, `docs serve -> DOCS_SERVE`).
|
||||
|
||||
## `flags.py`
|
||||
Add the new command to the dictionary within the `command_args` function.
|
||||
|
||||
# Exception Handling
|
||||
|
||||
## `requires.py`
|
||||
|
||||
### `postflight`
|
||||
In the postflight decorator, the click command is invoked (i.e. `func(*args, **kwargs)`) and wrapped in a `try/except` block to handle any exceptions thrown.
|
||||
Any exceptions thrown from `postflight` are wrapped by custom exceptions from the `dbt.cli.exceptions` module (i.e. `ResultExit`, `ExceptionExit`) to instruct click to complete execution with a particular exit code.
|
||||
|
||||
Some `dbt-core` handled exceptions have an attribute named `results` which contains results from running nodes (e.g. `FailFastError`). These are wrapped in the `ResultExit` exception to represent runs that have failed in a way that `dbt-core` expects.
|
||||
If the invocation of the command does not throw any exceptions but does not succeed, `postflight` will still raise the `ResultExit` exception to make use of the exit code.
|
||||
These exceptions produce an exit code of `1`.
|
||||
|
||||
Exceptions wrapped with `ExceptionExit` may be thrown by `dbt-core` intentionally (i.e. an exception that inherits from `dbt.exceptions.Exception`) or unintentionally (i.e. exceptions thrown by the python runtime). In either case these are considered errors that `dbt-core` did not expect and are treated as genuine exceptions.
|
||||
These exceptions produce an exit code of `2`.
|
||||
|
||||
If no exceptions are thrown from invoking the command and the command succeeds, `postflight` will not raise any exceptions.
|
||||
When no exceptions are raised an exit code of `0` is produced.
|
||||
|
||||
## `main.py`
|
||||
|
||||
### `dbtRunner`
|
||||
`dbtRunner` provides a programmatic interface for our click CLI and wraps the invocation of the click commands to handle any exceptions thrown.
|
||||
|
||||
`dbtRunner.invoke` should ideally only ever return an instantiated `dbtRunnerResult` which contains the following fields:
|
||||
- `success`: A boolean representing whether the command invocation was successful
|
||||
- `result`: The optional result of the command invoked. This attribute can have many types, please see the definition of `dbtRunnerResult` for more information
|
||||
- `exception`: If an exception was thrown during command invocation it will be saved here, otherwise it will be `None`. Please note that the exceptions held in this attribute are not the exceptions thrown by `preflight` but instead the exceptions that `ResultExit` and `ExceptionExit` wrap
|
||||
|
||||
Programmatic exception handling might look like the following:
|
||||
```python
|
||||
res = dbtRunner().invoke(["run"])
|
||||
if not res.success:
|
||||
...
|
||||
if type(res.exception) == SomeExceptionType:
|
||||
...
|
||||
```
|
||||
|
||||
## `dbt/tests/util.py`
|
||||
|
||||
### `run_dbt`
|
||||
In many of our functional and integration tests, we want to be sure that an invocation of `dbt` raises a certain exception.
|
||||
A common pattern for these assertions:
|
||||
```python
|
||||
class TestSomething:
|
||||
def test_something(self, project):
|
||||
with pytest.raises(SomeException):
|
||||
run_dbt(["run"])
|
||||
```
|
||||
To allow these tests to assert that exceptions have been thrown, the `run_dbt` function will raise any exceptions it recieves from the invocation of a `dbt` command.
|
||||
@@ -1 +0,0 @@
|
||||
from .main import cli as dbt_cli # noqa
|
||||
@@ -1,16 +0,0 @@
|
||||
import click
|
||||
from typing import Optional
|
||||
|
||||
from dbt.cli.main import cli as dbt
|
||||
|
||||
|
||||
def make_context(args, command=dbt) -> Optional[click.Context]:
|
||||
try:
|
||||
ctx = command.make_context(command.name, args)
|
||||
except click.exceptions.Exit:
|
||||
return None
|
||||
|
||||
ctx.invoked_subcommand = ctx.protected_args[0] if ctx.protected_args else None
|
||||
ctx.obj = {}
|
||||
|
||||
return ctx
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user