forked from repo-mirrors/dbt-core
Compare commits
1 Commits
adding-sem
...
testing-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42058de028 |
@@ -1,39 +1,43 @@
|
||||
[bumpversion]
|
||||
current_version = 1.5.0a1
|
||||
current_version = 0.19.0
|
||||
parse = (?P<major>\d+)
|
||||
\.(?P<minor>\d+)
|
||||
\.(?P<patch>\d+)
|
||||
((?P<prekind>a|b|rc)
|
||||
(?P<pre>\d+) # pre-release version num
|
||||
)?
|
||||
((?P<prerelease>[a-z]+)(?P<num>\d+))?
|
||||
serialize =
|
||||
{major}.{minor}.{patch}{prekind}{pre}
|
||||
{major}.{minor}.{patch}{prerelease}{num}
|
||||
{major}.{minor}.{patch}
|
||||
commit = False
|
||||
tag = False
|
||||
|
||||
[bumpversion:part:prekind]
|
||||
[bumpversion:part:prerelease]
|
||||
first_value = a
|
||||
optional_value = final
|
||||
values =
|
||||
a
|
||||
b
|
||||
rc
|
||||
final
|
||||
|
||||
[bumpversion:part:pre]
|
||||
[bumpversion:part:num]
|
||||
first_value = 1
|
||||
|
||||
[bumpversion:file:setup.py]
|
||||
|
||||
[bumpversion:file:core/setup.py]
|
||||
|
||||
[bumpversion:file:core/dbt/version.py]
|
||||
|
||||
[bumpversion:file:plugins/postgres/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/redshift/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/snowflake/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/bigquery/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
|
||||
|
||||
[bumpversion:file:docker/Dockerfile]
|
||||
[bumpversion:file:plugins/redshift/dbt/adapters/redshift/__version__.py]
|
||||
|
||||
[bumpversion:file:tests/adapter/setup.py]
|
||||
[bumpversion:file:plugins/snowflake/dbt/adapters/snowflake/__version__.py]
|
||||
|
||||
[bumpversion:file:tests/adapter/dbt/tests/adapter/__version__.py]
|
||||
[bumpversion:file:plugins/bigquery/dbt/adapters/bigquery/__version__.py]
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
## Previous Releases
|
||||
|
||||
For information on prior major and minor releases, see their changelogs:
|
||||
|
||||
|
||||
* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
|
||||
* [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
|
||||
* [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)
|
||||
* [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md)
|
||||
* [1.0](https://github.com/dbt-labs/dbt-core/blob/1.0.latest/CHANGELOG.md)
|
||||
* [0.21](https://github.com/dbt-labs/dbt-core/blob/0.21.latest/CHANGELOG.md)
|
||||
* [0.20](https://github.com/dbt-labs/dbt-core/blob/0.20.latest/CHANGELOG.md)
|
||||
* [0.19](https://github.com/dbt-labs/dbt-core/blob/0.19.latest/CHANGELOG.md)
|
||||
* [0.18](https://github.com/dbt-labs/dbt-core/blob/0.18.latest/CHANGELOG.md)
|
||||
* [0.17](https://github.com/dbt-labs/dbt-core/blob/0.17.latest/CHANGELOG.md)
|
||||
* [0.16](https://github.com/dbt-labs/dbt-core/blob/0.16.latest/CHANGELOG.md)
|
||||
* [0.15](https://github.com/dbt-labs/dbt-core/blob/0.15.latest/CHANGELOG.md)
|
||||
* [0.14](https://github.com/dbt-labs/dbt-core/blob/0.14.latest/CHANGELOG.md)
|
||||
* [0.13](https://github.com/dbt-labs/dbt-core/blob/0.13.latest/CHANGELOG.md)
|
||||
* [0.12](https://github.com/dbt-labs/dbt-core/blob/0.12.latest/CHANGELOG.md)
|
||||
* [0.11 and earlier](https://github.com/dbt-labs/dbt-core/blob/0.11.latest/CHANGELOG.md)
|
||||
@@ -1,53 +0,0 @@
|
||||
# CHANGELOG Automation
|
||||
|
||||
We use [changie](https://changie.dev/) to automate `CHANGELOG` generation. For installation and format/command specifics, see the documentation.
|
||||
|
||||
### Quick Tour
|
||||
|
||||
- All new change entries get generated under `/.changes/unreleased` as a yaml file
|
||||
- `header.tpl.md` contains the contents of the entire CHANGELOG file
|
||||
- `0.0.0.md` contains the contents of the footer for the entire CHANGELOG file. changie looks to be in the process of supporting a footer file the same as it supports a header file. Switch to that when available. For now, the 0.0.0 in the file name forces it to the bottom of the changelog no matter what version we are releasing.
|
||||
- `.changie.yaml` contains the fields in a change, the format of a single change, as well as the format of the Contributors section for each version.
|
||||
|
||||
### Workflow
|
||||
|
||||
#### Daily workflow
|
||||
Almost every code change we make associated with an issue will require a `CHANGELOG` entry. After you have created the PR in GitHub, run `changie new` and follow the command prompts to generate a yaml file with your change details. This only needs to be done once per PR.
|
||||
|
||||
The `changie new` command will ensure correct file format and file name. There is a one to one mapping of issues to changes. Multiple issues cannot be lumped into a single entry. If you make a mistake, the yaml file may be directly modified and saved as long as the format is preserved.
|
||||
|
||||
Note: If your PR has been cleared by the Core Team as not needing a changelog entry, the `Skip Changelog` label may be put on the PR to bypass the GitHub action that blacks PRs from being merged when they are missing a `CHANGELOG` entry.
|
||||
|
||||
#### Prerelease Workflow
|
||||
These commands batch up changes in `/.changes/unreleased` to be included in this prerelease and move those files to a directory named for the release version. The `--move-dir` will be created if it does not exist and is created in `/.changes`.
|
||||
|
||||
```
|
||||
changie batch <version> --move-dir '<version>' --prerelease 'rc1'
|
||||
changie merge
|
||||
```
|
||||
|
||||
Example
|
||||
```
|
||||
changie batch 1.0.5 --move-dir '1.0.5' --prerelease 'rc1'
|
||||
changie merge
|
||||
```
|
||||
|
||||
#### Final Release Workflow
|
||||
These commands batch up changes in `/.changes/unreleased` as well as `/.changes/<version>` to be included in this final release and delete all prereleases. This rolls all prereleases up into a single final release. All `yaml` files in `/unreleased` and `<version>` will be deleted at this point.
|
||||
|
||||
```
|
||||
changie batch <version> --include '<version>' --remove-prereleases
|
||||
changie merge
|
||||
```
|
||||
|
||||
Example
|
||||
```
|
||||
changie batch 1.0.5 --include '1.0.5' --remove-prereleases
|
||||
changie merge
|
||||
```
|
||||
|
||||
### A Note on Manual Edits & Gotchas
|
||||
- Changie generates markdown files in the `.changes` directory that are parsed together with the `changie merge` command. Every time `changie merge` is run, it regenerates the entire file. For this reason, any changes made directly to `CHANGELOG.md` will be overwritten on the next run of `changie merge`.
|
||||
- If changes need to be made to the `CHANGELOG.md`, make the changes to the relevant `<version>.md` file located in the `/.changes` directory. You will then run `changie merge` to regenerate the `CHANGELOG.MD`.
|
||||
- Do not run `changie batch` again on released versions. Our final release workflow deletes all of the yaml files associated with individual changes. If for some reason modifications to the `CHANGELOG.md` are required after we've generated the final release `CHANGELOG.md`, the modifications need to be done manually to the `<version>.md` file in the `/.changes` directory.
|
||||
- changie can modify, create and delete files depending on the command you run. This is expected. Be sure to commit everything that has been modified and deleted.
|
||||
@@ -1,6 +0,0 @@
|
||||
# dbt Core Changelog
|
||||
|
||||
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
|
||||
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
|
||||
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
|
||||
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Adding the entity node
|
||||
time: 2023-01-18T13:48:04.487817-06:00
|
||||
custom:
|
||||
Author: callum-mcdata
|
||||
Issue: "6627"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Fix use of ConnectionReused logging event
|
||||
time: 2023-01-13T13:25:13.023168-05:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "6168"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Update deprecated github action command
|
||||
time: 2023-01-17T11:17:37.046095-06:00
|
||||
custom:
|
||||
Author: davidbloss
|
||||
Issue: "6153"
|
||||
127
.changie.yaml
127
.changie.yaml
@@ -1,127 +0,0 @@
|
||||
changesDir: .changes
|
||||
unreleasedDir: unreleased
|
||||
headerPath: header.tpl.md
|
||||
versionHeaderPath: ""
|
||||
changelogPath: CHANGELOG.md
|
||||
versionExt: md
|
||||
versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}'
|
||||
kindFormat: '### {{.Kind}}'
|
||||
changeFormat: |-
|
||||
{{- $IssueList := list }}
|
||||
{{- $changes := splitList " " $.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
|
||||
kinds:
|
||||
- label: Breaking Changes
|
||||
- label: Features
|
||||
- label: Fixes
|
||||
- label: Docs
|
||||
changeFormat: |-
|
||||
{{- $IssueList := list }}
|
||||
{{- $changes := splitList " " $.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
- label: Under the Hood
|
||||
- label: Dependencies
|
||||
changeFormat: |-
|
||||
{{- $PRList := list }}
|
||||
{{- $changes := splitList " " $.Custom.PR }}
|
||||
{{- range $pullrequest := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
|
||||
{{- $PRList = append $PRList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
skipGlobalChoices: true
|
||||
additionalChoices:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
- label: Security
|
||||
changeFormat: |-
|
||||
{{- $PRList := list }}
|
||||
{{- $changes := splitList " " $.Custom.PR }}
|
||||
{{- range $pullrequest := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
|
||||
{{- $PRList = append $PRList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
skipGlobalChoices: true
|
||||
additionalChoices:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
|
||||
newlines:
|
||||
afterChangelogHeader: 1
|
||||
afterKind: 1
|
||||
afterChangelogVersion: 1
|
||||
beforeKind: 1
|
||||
endOfVersion: 1
|
||||
|
||||
custom:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: Issue
|
||||
label: GitHub Issue Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
|
||||
footerFormat: |
|
||||
{{- $contributorDict := dict }}
|
||||
{{- /* any names added to this list should be all lowercase for later matching purposes */}}
|
||||
{{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $authorList := splitList " " $change.Custom.Author }}
|
||||
{{- /* loop through all authors for a single changelog */}}
|
||||
{{- range $author := $authorList }}
|
||||
{{- $authorLower := lower $author }}
|
||||
{{- /* we only want to include non-core team contributors */}}
|
||||
{{- if not (has $authorLower $core_team)}}
|
||||
{{- $changeList := splitList " " $change.Custom.Author }}
|
||||
{{- /* Docs kind link back to dbt-docs instead of dbt-core issues */}}
|
||||
{{- $changeLink := $change.Kind }}
|
||||
{{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
|
||||
{{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $change.Custom.PR }}
|
||||
{{- else if eq $change.Kind "Docs"}}
|
||||
{{- $changeLink = "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $change.Custom.Issue }}
|
||||
{{- else }}
|
||||
{{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $change.Custom.Issue }}
|
||||
{{- end }}
|
||||
{{- /* check if this contributor has other changes associated with them already */}}
|
||||
{{- if hasKey $contributorDict $author }}
|
||||
{{- $contributionList := get $contributorDict $author }}
|
||||
{{- $contributionList = append $contributionList $changeLink }}
|
||||
{{- $contributorDict := set $contributorDict $author $contributionList }}
|
||||
{{- else }}
|
||||
{{- $contributionList := list $changeLink }}
|
||||
{{- $contributorDict := set $contributorDict $author $contributionList }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}}
|
||||
{{- if $contributorDict}}
|
||||
### Contributors
|
||||
{{- range $k,$v := $contributorDict }}
|
||||
- [@{{$k}}](https://github.com/{{$k}}) ({{ range $index, $element := $v }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
218
.circleci/config.yml
Normal file
218
.circleci/config.yml
Normal file
@@ -0,0 +1,218 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
unit:
|
||||
docker: &test_only
|
||||
- image: fishtownanalytics/test-container:9
|
||||
environment:
|
||||
DBT_INVOCATION_ENV: circle
|
||||
steps:
|
||||
- checkout
|
||||
- run: tox -e flake8,mypy,unit-py36,unit-py38
|
||||
build-wheels:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Build wheels
|
||||
command: |
|
||||
python3.8 -m venv "${PYTHON_ENV}"
|
||||
export PYTHON_BIN="${PYTHON_ENV}/bin/python"
|
||||
$PYTHON_BIN -m pip install -U pip setuptools
|
||||
$PYTHON_BIN -m pip install -r requirements.txt
|
||||
$PYTHON_BIN -m pip install -r dev_requirements.txt
|
||||
/bin/bash ./scripts/build-wheels.sh
|
||||
$PYTHON_BIN ./scripts/collect-dbt-contexts.py > ./dist/context_metadata.json
|
||||
$PYTHON_BIN ./scripts/collect-artifact-schema.py > ./dist/artifact_schemas.json
|
||||
environment:
|
||||
PYTHON_ENV: /home/tox/build_venv/
|
||||
- store_artifacts:
|
||||
path: ./dist
|
||||
destination: dist
|
||||
integration-postgres-py36:
|
||||
docker: &test_and_postgres
|
||||
- image: fishtownanalytics/test-container:9
|
||||
environment:
|
||||
DBT_INVOCATION_ENV: circle
|
||||
- image: postgres
|
||||
name: database
|
||||
environment: &pgenv
|
||||
POSTGRES_USER: "root"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
POSTGRES_DB: "dbt"
|
||||
steps:
|
||||
- checkout
|
||||
- run: &setupdb
|
||||
name: Setup postgres
|
||||
command: bash test/setup_db.sh
|
||||
environment:
|
||||
PGHOST: database
|
||||
PGUSER: root
|
||||
PGPASSWORD: password
|
||||
PGDATABASE: postgres
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-postgres-py36
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-snowflake-py36:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-snowflake-py36
|
||||
no_output_timeout: 1h
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-redshift-py36:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-redshift-py36
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-bigquery-py36:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-bigquery-py36
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-postgres-py38:
|
||||
docker: *test_and_postgres
|
||||
steps:
|
||||
- checkout
|
||||
- run: *setupdb
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-postgres-py38
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-snowflake-py38:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-snowflake-py38
|
||||
no_output_timeout: 1h
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-redshift-py38:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-redshift-py38
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-bigquery-py38:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-bigquery-py38
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
|
||||
integration-postgres-py39:
|
||||
docker: *test_and_postgres
|
||||
steps:
|
||||
- checkout
|
||||
- run: *setupdb
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-postgres-py39
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-snowflake-py39:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-snowflake-py39
|
||||
no_output_timeout: 1h
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-redshift-py39:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-redshift-py39
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-bigquery-py39:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Run tests
|
||||
command: tox -e integration-bigquery-py39
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-everything:
|
||||
jobs:
|
||||
- unit
|
||||
- integration-postgres-py36:
|
||||
requires:
|
||||
- unit
|
||||
- integration-redshift-py36:
|
||||
requires:
|
||||
- integration-postgres-py36
|
||||
- integration-bigquery-py36:
|
||||
requires:
|
||||
- integration-postgres-py36
|
||||
- integration-snowflake-py36:
|
||||
requires:
|
||||
- integration-postgres-py36
|
||||
- integration-postgres-py38:
|
||||
requires:
|
||||
- unit
|
||||
- integration-redshift-py38:
|
||||
requires:
|
||||
- integration-postgres-py38
|
||||
- integration-bigquery-py38:
|
||||
requires:
|
||||
- integration-postgres-py38
|
||||
- integration-snowflake-py38:
|
||||
requires:
|
||||
- integration-postgres-py38
|
||||
- integration-postgres-py39:
|
||||
requires:
|
||||
- unit
|
||||
- integration-redshift-py39:
|
||||
requires:
|
||||
- integration-postgres-py39
|
||||
- integration-bigquery-py39:
|
||||
requires:
|
||||
- integration-postgres-py39
|
||||
# - integration-snowflake-py39:
|
||||
# requires:
|
||||
# - integration-postgres-py39
|
||||
- build-wheels:
|
||||
requires:
|
||||
- unit
|
||||
- integration-postgres-py36
|
||||
- integration-redshift-py36
|
||||
- integration-bigquery-py36
|
||||
- integration-snowflake-py36
|
||||
- integration-postgres-py38
|
||||
- integration-redshift-py38
|
||||
- integration-bigquery-py38
|
||||
- integration-snowflake-py38
|
||||
- integration-postgres-py39
|
||||
- integration-redshift-py39
|
||||
- integration-bigquery-py39
|
||||
# - integration-snowflake-py39
|
||||
12
.flake8
12
.flake8
@@ -1,12 +0,0 @@
|
||||
[flake8]
|
||||
select =
|
||||
E
|
||||
W
|
||||
F
|
||||
ignore =
|
||||
W503 # makes Flake8 work like black
|
||||
W504
|
||||
E203 # makes Flake8 work like black
|
||||
E741
|
||||
E501 # long line checking is done in black
|
||||
exclude = test/
|
||||
@@ -1,2 +0,0 @@
|
||||
# Reformatting dbt-core via black, flake8, mypy, and assorted pre-commit hooks.
|
||||
43e3fc22c4eae4d3d901faba05e33c40f1f1dc5a
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,2 +0,0 @@
|
||||
core/dbt/include/index.html binary
|
||||
tests/functional/artifacts/data/state/*/manifest.json binary
|
||||
75
.github/CODEOWNERS
vendored
75
.github/CODEOWNERS
vendored
@@ -1,75 +0,0 @@
|
||||
# This file contains the code owners for the dbt-core repo.
|
||||
# PRs will be automatically assigned for review to the associated
|
||||
# team(s) or person(s) that touches any files that are mapped to them.
|
||||
#
|
||||
# A statement takes precedence over the statements above it so more general
|
||||
# assignments are found at the top with specific assignments being lower in
|
||||
# the ordering (i.e. catch all assignment should be the first item)
|
||||
#
|
||||
# Consult GitHub documentation for formatting guidelines:
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#example-of-a-codeowners-file
|
||||
|
||||
# As a default for areas with no assignment,
|
||||
# the core team as a whole will be assigned
|
||||
* @dbt-labs/core
|
||||
|
||||
# Changes to GitHub configurations including Actions
|
||||
/.github/ @leahwicz
|
||||
|
||||
### LANGUAGE
|
||||
|
||||
# Language core modules
|
||||
/core/dbt/config/ @dbt-labs/core-language
|
||||
/core/dbt/context/ @dbt-labs/core-language
|
||||
/core/dbt/contracts/ @dbt-labs/core-language
|
||||
/core/dbt/deps/ @dbt-labs/core-language
|
||||
/core/dbt/events/ @dbt-labs/core-language # structured logging
|
||||
/core/dbt/parser/ @dbt-labs/core-language
|
||||
|
||||
# Language misc files
|
||||
/core/dbt/dataclass_schema.py @dbt-labs/core-language
|
||||
/core/dbt/hooks.py @dbt-labs/core-language
|
||||
/core/dbt/node_types.py @dbt-labs/core-language
|
||||
/core/dbt/semver.py @dbt-labs/core-language
|
||||
|
||||
|
||||
### EXECUTION
|
||||
|
||||
# Execution core modules
|
||||
/core/dbt/graph/ @dbt-labs/core-execution
|
||||
/core/dbt/task/ @dbt-labs/core-execution
|
||||
|
||||
# Execution misc files
|
||||
/core/dbt/compilation.py @dbt-labs/core-execution
|
||||
/core/dbt/flags.py @dbt-labs/core-execution
|
||||
/core/dbt/lib.py @dbt-labs/core-execution
|
||||
/core/dbt/main.py @dbt-labs/core-execution
|
||||
/core/dbt/profiler.py @dbt-labs/core-execution
|
||||
/core/dbt/selected_resources.py @dbt-labs/core-execution
|
||||
/core/dbt/tracking.py @dbt-labs/core-execution
|
||||
/core/dbt/version.py @dbt-labs/core-execution
|
||||
|
||||
|
||||
### ADAPTERS
|
||||
|
||||
# Adapter interface ("base" + "sql" adapter defaults, cache)
|
||||
/core/dbt/adapters @dbt-labs/core-adapters
|
||||
|
||||
# Global project (default macros + materializations), starter project
|
||||
/core/dbt/include @dbt-labs/core-adapters
|
||||
|
||||
# Postgres plugin
|
||||
/plugins/ @dbt-labs/core-adapters
|
||||
|
||||
# Functional tests for adapter plugins
|
||||
/tests/adapter @dbt-labs/core-adapters
|
||||
|
||||
### TESTS
|
||||
|
||||
# Overlapping ownership for vast majority of unit + functional tests
|
||||
|
||||
# Perf regression testing framework
|
||||
# This excludes the test project files itself since those aren't specific
|
||||
# framework changes (excluded by not setting an owner next to it- no owner)
|
||||
/performance @nathaniel-may
|
||||
/performance/projects
|
||||
97
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
97
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: 🐞 Bug
|
||||
description: Report a bug or an issue you've found with dbt
|
||||
title: "[Bug] <title>"
|
||||
labels: ["bug", "triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a new bug in dbt-core?
|
||||
description: >
|
||||
In other words, is this an error, flaw, failure or fault in our software?
|
||||
|
||||
If this is a bug that broke existing functionality that used to work, please open a regression issue.
|
||||
If this is a bug in an adapter plugin, please open an issue in the adapter's repository.
|
||||
If this is a bug experienced while using dbt Cloud, please report to [support](mailto:support@getdbt.com).
|
||||
If this is a request for help or troubleshooting code in your own dbt project, please join our [dbt Community Slack](https://www.getdbt.com/community/join-the-community/) or open a [Discussion question](https://github.com/dbt-labs/docs.getdbt.com/discussions).
|
||||
|
||||
Please search to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I believe this is a new bug in dbt-core
|
||||
required: true
|
||||
- label: I have searched the existing issues, and I could not find an existing issue for this bug
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: A concise description of what you're experiencing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: Steps to reproduce the behavior.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
2. With this config...
|
||||
3. Run '...'
|
||||
4. See error...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
If applicable, log output to help explain your problem.
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Python**: 3.9.12 (`python3 --version`)
|
||||
- **dbt-core**: 1.1.1 (`dbt --version`)
|
||||
value: |
|
||||
- OS:
|
||||
- Python:
|
||||
- dbt:
|
||||
render: markdown
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: database
|
||||
attributes:
|
||||
label: Which database adapter are you using with dbt?
|
||||
description: If the bug is specific to the database or adapter, please open the issue in that adapter's repository instead
|
||||
multiple: true
|
||||
options:
|
||||
- postgres
|
||||
- redshift
|
||||
- snowflake
|
||||
- bigquery
|
||||
- spark
|
||||
- other (mention it in "Additional Context")
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
41
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug or an issue you've found with dbt
|
||||
title: ''
|
||||
labels: bug, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Describe the bug
|
||||
A clear and concise description of what the bug is. What command did you run? What happened?
|
||||
|
||||
### Steps To Reproduce
|
||||
In as much detail as possible, please provide steps to reproduce the issue. Sample data that triggers the issue, example model code, etc is all very helpful here.
|
||||
|
||||
### Expected behavior
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
### Screenshots and log output
|
||||
If applicable, add screenshots or log output to help explain your problem.
|
||||
|
||||
### System information
|
||||
**Which database are you using dbt with?**
|
||||
- [ ] postgres
|
||||
- [ ] redshift
|
||||
- [ ] bigquery
|
||||
- [ ] snowflake
|
||||
- [ ] other (specify: ____________)
|
||||
|
||||
|
||||
**The output of `dbt --version`:**
|
||||
```
|
||||
<output goes here>
|
||||
```
|
||||
|
||||
**The operating system you're using:**
|
||||
|
||||
**The output of `python --version`:**
|
||||
|
||||
### Additional context
|
||||
Add any other context about the problem here.
|
||||
23
.github/ISSUE_TEMPLATE/config.yml
vendored
23
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,23 +0,0 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Ask the community for help
|
||||
url: https://github.com/dbt-labs/docs.getdbt.com/discussions
|
||||
about: Need help troubleshooting? Check out our guide on how to ask
|
||||
- name: Contact dbt Cloud support
|
||||
url: mailto:support@getdbt.com
|
||||
about: Are you using dbt Cloud? Contact our support team for help!
|
||||
- name: Participate in Discussions
|
||||
url: https://github.com/dbt-labs/dbt-core/discussions
|
||||
about: Do you have a Big Idea for dbt? Read open discussions, or start a new one
|
||||
- name: Create an issue for dbt-redshift
|
||||
url: https://github.com/dbt-labs/dbt-redshift/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-redshift
|
||||
- name: Create an issue for dbt-bigquery
|
||||
url: https://github.com/dbt-labs/dbt-bigquery/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-bigquery
|
||||
- name: Create an issue for dbt-snowflake
|
||||
url: https://github.com/dbt-labs/dbt-snowflake/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-snowflake
|
||||
- name: Create an issue for dbt-spark
|
||||
url: https://github.com/dbt-labs/dbt-spark/issues/new/choose
|
||||
about: Report a bug or request a feature for dbt-spark
|
||||
59
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
59
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: ✨ Feature
|
||||
description: Propose a straightforward extension of dbt functionality
|
||||
title: "[Feature] <title>"
|
||||
labels: ["enhancement", "triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this feature request!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this your first time submitting a feature request?
|
||||
description: >
|
||||
We want to make sure that features are distinct and discoverable,
|
||||
so that other members of the community can find them and offer their thoughts.
|
||||
|
||||
Issues are the right place to request straightforward extensions of existing dbt functionality.
|
||||
For "big ideas" about future capabilities of dbt, we ask that you open a
|
||||
[discussion](https://github.com/dbt-labs/dbt-core/discussions) in the "Ideas" category instead.
|
||||
options:
|
||||
- label: I have read the [expectations for open source contributors](https://docs.getdbt.com/docs/contributing/oss-expectations)
|
||||
required: true
|
||||
- label: I have searched the existing issues, and I could not find an existing issue for this feature
|
||||
required: true
|
||||
- label: I am requesting a straightforward extension of existing dbt functionality, rather than a Big Idea better suited to a discussion
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the feature
|
||||
description: A clear and concise description of what you want to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe alternatives you've considered
|
||||
description: |
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Who will this benefit?
|
||||
description: |
|
||||
What kind of use case will this feature be useful for? Please be specific and provide examples, this will help us prioritize properly.
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
attributes:
|
||||
label: Are you interested in contributing this feature?
|
||||
description: Let us know if you want to write some code, and how we can help.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the feature you are suggesting!
|
||||
validations:
|
||||
required: false
|
||||
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for dbt
|
||||
title: ''
|
||||
labels: enhancement, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Describe the feature
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Describe alternatives you've considered
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
### Additional context
|
||||
Is this feature database-specific? Which database(s) is/are relevant? Please include any other relevant context here.
|
||||
|
||||
### Who will this benefit?
|
||||
What kind of use case will this feature be useful for? Please be specific and provide examples, this will help us prioritize properly.
|
||||
|
||||
### Are you interested in contributing this feature?
|
||||
Let us know if you want to write some code, and how we can help.
|
||||
93
.github/ISSUE_TEMPLATE/regression-report.yml
vendored
93
.github/ISSUE_TEMPLATE/regression-report.yml
vendored
@@ -1,93 +0,0 @@
|
||||
name: ☣️ Regression
|
||||
description: Report a regression you've observed in a newer version of dbt
|
||||
title: "[Regression] <title>"
|
||||
labels: ["bug", "regression", "triage"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this regression report!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is this a regression in a recent version of dbt-core?
|
||||
description: >
|
||||
A regression is when documented functionality works as expected in an older version of dbt-core,
|
||||
and no longer works after upgrading to a newer version of dbt-core
|
||||
options:
|
||||
- label: I believe this is a regression in dbt-core functionality
|
||||
required: true
|
||||
- label: I have searched the existing issues, and I could not find an existing issue for this regression
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: A concise description of what you're experiencing.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected/Previous Behavior
|
||||
description: A concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps To Reproduce
|
||||
description: Steps to reproduce the behavior.
|
||||
placeholder: |
|
||||
1. In this environment...
|
||||
2. With this config...
|
||||
3. Run '...'
|
||||
4. See error...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: |
|
||||
If applicable, log output to help explain your problem.
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 20.04
|
||||
- **Python**: 3.9.12 (`python3 --version`)
|
||||
- **dbt-core (working version)**: 1.1.1 (`dbt --version`)
|
||||
- **dbt-core (regression version)**: 1.2.0 (`dbt --version`)
|
||||
value: |
|
||||
- OS:
|
||||
- Python:
|
||||
- dbt (working version):
|
||||
- dbt (regression version):
|
||||
render: markdown
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: database
|
||||
attributes:
|
||||
label: Which database adapter are you using with dbt?
|
||||
description: If the regression is specific to the database or adapter, please open the issue in that adapter's repository instead
|
||||
multiple: true
|
||||
options:
|
||||
- postgres
|
||||
- redshift
|
||||
- snowflake
|
||||
- bigquery
|
||||
- spark
|
||||
- other (mention it in "Additional Context")
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: |
|
||||
Links? References? Anything that will give us more context about the issue you are encountering!
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
216
.github/_README.md
vendored
216
.github/_README.md
vendored
@@ -1,216 +0,0 @@
|
||||
<!-- GitHub will publish this readme on the main repo page if the name is `README.md` so we've added the leading underscore to prevent this -->
|
||||
<!-- Do not rename this file `README.md` -->
|
||||
<!-- See https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-readmes -->
|
||||
|
||||
## What are GitHub Actions?
|
||||
|
||||
GitHub Actions are used for many different purposes. We use them to run tests in CI, validate PRs are in an expected state, and automate processes.
|
||||
|
||||
- [Overview of GitHub Actions](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions)
|
||||
- [What's a workflow?](https://docs.github.com/en/actions/using-workflows/about-workflows)
|
||||
- [GitHub Actions guides](https://docs.github.com/en/actions/guides)
|
||||
|
||||
___
|
||||
|
||||
## Where do actions and workflows live
|
||||
|
||||
We try to maintain actions that are shared across repositories in a single place so that necesary changes can be made in a single place.
|
||||
|
||||
[dbt-labs/actions](https://github.com/dbt-labs/actions/) is the central repository of actions and workflows we use across repositories.
|
||||
|
||||
GitHub Actions also live locally within a repository. The workflows can be found at `.github/workflows` from the root of the repository. These should be specific to that code base.
|
||||
|
||||
Note: We are actively moving actions into the central Action repository so there is currently some duplication across repositories.
|
||||
|
||||
___
|
||||
|
||||
## Basics of Using Actions
|
||||
|
||||
### Viewing Output
|
||||
|
||||
- View the detailed action output for your PR in the **Checks** tab of the PR. This only shows the most recent run. You can also view high level **Checks** output at the bottom on the PR.
|
||||
|
||||
- View _all_ action output for a repository from the [**Actions**](https://github.com/dbt-labs/dbt-core/actions) tab. Workflow results last 1 year. Artifacts last 90 days, unless specified otherwise in individual workflows.
|
||||
|
||||
This view often shows what seem like duplicates of the same workflow. This occurs when files are renamed but the workflow name has not changed. These are in fact _not_ duplicates.
|
||||
|
||||
You can see the branch the workflow runs from in this view. It is listed in the table between the workflow name and the time/duration of the run. When blank, the workflow is running in the context of the `main` branch.
|
||||
|
||||
### How to view what workflow file is being referenced from a run
|
||||
|
||||
- When viewing the output of a specific workflow run, click the 3 dots at the top right of the display. There will be an option to `View workflow file`.
|
||||
|
||||
### How to manually run a workflow
|
||||
|
||||
- If a workflow has the `on: workflow_dispatch` trigger, it can be manually triggered
|
||||
- From the [**Actions**](https://github.com/dbt-labs/dbt-core/actions) tab, find the workflow you want to run, select it and fill in any inputs requied. That's it!
|
||||
|
||||
### How to re-run jobs
|
||||
|
||||
- Some actions cannot be rerun in the GitHub UI. Namely the snyk checks and the cla check. Snyk checks are rerun by closing and reopening the PR. You can retrigger the cla check by commenting on the PR with `@cla-bot check`
|
||||
|
||||
___
|
||||
|
||||
## General Standards
|
||||
|
||||
### Permissions
|
||||
- By default, workflows have read permissions in the repository for the contents scope only when no permissions are explicitly set.
|
||||
- It is best practice to always define the permissions explicitly. This will allow actions to continue to work when the default permissions on the repository are changed. It also allows explicit grants of the least permissions possible.
|
||||
- There are a lot of permissions available. [Read up on them](https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs) if you're unsure what to use.
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
```
|
||||
|
||||
### Secrets
|
||||
- When to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) vs the [GITHUB_TOKEN](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) generated for the action?
|
||||
|
||||
The `GITHUB_TOKEN` is used by default. In most cases it is sufficient for what you need.
|
||||
|
||||
If you expect the workflow to result in a commit to that should retrigger workflows, you will need to use a Personal Access Token for the bot to commit the file. When using the GITHUB_TOKEN, the resulting commit will not trigger another GitHub Actions Workflow run. This is due to limitations set by GitHub. See [the docs](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) for a more detailed explanation.
|
||||
|
||||
For example, we must use a PAT in our workflow to commit a new changelog yaml file for bot PRs. Once the file has been committed to the branch, it should retrigger the check to validate that a changelog exists on the PR. Otherwise, it would stay in a failed state since the check would never retrigger.
|
||||
|
||||
### Triggers
|
||||
You can configure your workflows to run when specific activity on GitHub happens, at a scheduled time, or when an event outside of GitHub occurs. Read more details in the [GitHub docs](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows).
|
||||
|
||||
These triggers are under the `on` key of the workflow and more than one can be listed.
|
||||
|
||||
```yaml
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
# catch when the PR is opened with the label or when the label is added
|
||||
types: [opened, labeled]
|
||||
workflow_dispatch:
|
||||
```
|
||||
|
||||
Some triggers of note that we use:
|
||||
|
||||
- `push` - Runs your workflow when you push a commit or tag.
|
||||
- `pull_request` - Runs your workflow when activity on a pull request in the workflow's repository occurs. Takes in a list of activity types (opened, labeled, etc) if appropriate.
|
||||
- `pull_request_target` - Same as `pull_request` but runs in the context of the PR target branch.
|
||||
- `workflow_call` - used with reusable workflows. Triggered by another workflow calling it.
|
||||
- `workflow_dispatch` - Gives the ability to manually trigger a workflow from the GitHub API, GitHub CLI, or GitHub browser interface.
|
||||
|
||||
|
||||
### Basic Formatting
|
||||
- Add a description of what your workflow does at the top in this format
|
||||
|
||||
```
|
||||
# **what?**
|
||||
# Describe what the action does.
|
||||
|
||||
# **why?**
|
||||
# Why does this action exist?
|
||||
|
||||
# **when?**
|
||||
# How/when will it be triggered?
|
||||
```
|
||||
|
||||
- Leave blank lines between steps and jobs
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
dependency_changelog:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get File Name Timestamp
|
||||
id: filename_time
|
||||
uses: nanzm/get-time-action@v1.1
|
||||
with:
|
||||
format: 'YYYYMMDD-HHmmss'
|
||||
|
||||
- name: Get File Content Timestamp
|
||||
id: file_content_time
|
||||
uses: nanzm/get-time-action@v1.1
|
||||
with:
|
||||
format: 'YYYY-MM-DDTHH:mm:ss.000000-05:00'
|
||||
|
||||
- name: Generate Filepath
|
||||
id: fp
|
||||
run: |
|
||||
FILEPATH=.changes/unreleased/Dependencies-${{ steps.filename_time.outputs.time }}.yaml
|
||||
echo "FILEPATH=$FILEPATH" >> $GITHUB_OUTPUT
|
||||
```
|
||||
|
||||
- Print out all variables you will reference as the first step of a job. This allows for easier debugging. The first job should log all inputs. Subsequent jobs should reference outputs of other jobs, if present.
|
||||
|
||||
When possible, generate variables at the top of your workflow in a single place to reference later. This is not always strictly possible since you may generate a value to be used later mid-workflow.
|
||||
|
||||
Be sure to use quotes around these logs so special characters are not interpreted.
|
||||
|
||||
```yaml
|
||||
job1:
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo "all variables defined as inputs"
|
||||
echo "The last commit sha in the release: ${{ inputs.sha }}"
|
||||
echo "The release version number: ${{ inputs.version_number }}"
|
||||
echo "The changelog_path: ${{ inputs.changelog_path }}"
|
||||
echo "The build_script_path: ${{ inputs.build_script_path }}"
|
||||
echo "The s3_bucket_name: ${{ inputs.s3_bucket_name }}"
|
||||
echo "The package_test_command: ${{ inputs.package_test_command }}"
|
||||
|
||||
# collect all the variables that need to be used in subsequent jobs
|
||||
- name: Set Variables
|
||||
id: variables
|
||||
run: |
|
||||
echo "important_path='performance/runner/Cargo.toml'" >> $GITHUB_OUTPUT
|
||||
echo "release_id=${{github.event.inputs.release_id}}" >> $GITHUB_OUTPUT
|
||||
echo "open_prs=${{github.event.inputs.open_prs}}" >> $GITHUB_OUTPUT
|
||||
|
||||
job2:
|
||||
needs: [job1]
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo "all variables defined in job1 > Set Variables > outputs"
|
||||
echo "important_path: ${{ needs.job1.outputs.important_path }}"
|
||||
echo "release_id: ${{ needs.job1.outputs.release_id }}"
|
||||
echo "open_prs: ${{ needs.job1.outputs.open_prs }}"
|
||||
```
|
||||
|
||||
- When it's not obvious what something does, add a comment!
|
||||
|
||||
___
|
||||
|
||||
## Tips
|
||||
|
||||
### Context
|
||||
- The [GitHub CLI](https://cli.github.com/) is available in the default runners
|
||||
- Actions run in your context. ie, using an action from the marketplace that uses the GITHUB_TOKEN uses the GITHUB_TOKEN generated by your workflow run.
|
||||
|
||||
### Actions from the Marketplace
|
||||
- Don’t use external actions for things that can easily be accomplished manually.
|
||||
- Always read through what an external action does before using it! Often an action in the GitHub Actions Marketplace can be replaced with a few lines in bash. This is much more maintainable (and won’t change under us) and clear as to what’s actually happening. It also prevents any
|
||||
- Pin actions _we don't control_ to tags.
|
||||
|
||||
### Connecting to AWS
|
||||
- Authenticate with the aws managed workflow
|
||||
|
||||
```yaml
|
||||
- name: Configure AWS credentials from Test account
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-1
|
||||
```
|
||||
|
||||
- Then access with the aws command that comes installed on the action runner machines
|
||||
|
||||
```yaml
|
||||
- name: Copy Artifacts from S3 via CLI
|
||||
run: aws s3 cp ${{ env.s3_bucket }} . --recursive
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
- Depending on what your action does, you may be able to use [`act`](https://github.com/nektos/act) to test the action locally. Some features of GitHub Actions do not work with `act`, among those are reusable workflows. If you can't use `act`, you'll have to push your changes up before being able to test. This can be slow.
|
||||
14
.github/actions/latest-wrangler/Dockerfile
vendored
14
.github/actions/latest-wrangler/Dockerfile
vendored
@@ -1,14 +0,0 @@
|
||||
FROM python:3-slim AS builder
|
||||
ADD . /app
|
||||
WORKDIR /app
|
||||
|
||||
# We are installing a dependency here directly into our app source dir
|
||||
RUN pip install --target=/app requests packaging
|
||||
|
||||
# A distroless container image with Python and some basics like SSL certificates
|
||||
# https://github.com/GoogleContainerTools/distroless
|
||||
FROM gcr.io/distroless/python3-debian10
|
||||
COPY --from=builder /app /app
|
||||
WORKDIR /app
|
||||
ENV PYTHONPATH /app
|
||||
CMD ["/app/main.py"]
|
||||
50
.github/actions/latest-wrangler/README.md
vendored
50
.github/actions/latest-wrangler/README.md
vendored
@@ -1,50 +0,0 @@
|
||||
# Github package 'latest' tag wrangler for containers
|
||||
## Usage
|
||||
|
||||
Plug in the necessary inputs to determine if the container being built should be tagged 'latest; at the package level, for example `dbt-redshift:latest`.
|
||||
|
||||
## Inputs
|
||||
| Input | Description |
|
||||
| - | - |
|
||||
| `package` | Name of the GH package to check against |
|
||||
| `new_version` | Semver of new container |
|
||||
| `gh_token` | GH token with package read scope|
|
||||
| `halt_on_missing` | Return non-zero exit code if requested package does not exist. (defaults to false)|
|
||||
|
||||
|
||||
## Outputs
|
||||
| Output | Description |
|
||||
| - | - |
|
||||
| `latest` | Wether or not the new container should be tagged 'latest'|
|
||||
| `minor_latest` | Wether or not the new container should be tagged major.minor.latest |
|
||||
|
||||
## Example workflow
|
||||
```yaml
|
||||
name: Ship it!
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to publish
|
||||
required: true
|
||||
version_number:
|
||||
description: The version number
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Wrangle latest tag
|
||||
id: is_latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.new_version }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Print the results
|
||||
run: |
|
||||
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
|
||||
echo "Is it minor.latest? Survey says: ${{ steps.is_latest.outputs.minor_latest }} !"
|
||||
```
|
||||
20
.github/actions/latest-wrangler/action.yml
vendored
20
.github/actions/latest-wrangler/action.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: "Github package 'latest' tag wrangler for containers"
|
||||
description: "Determines wether or not a given dbt container should be given a bare 'latest' tag (I.E. dbt-core:latest)"
|
||||
inputs:
|
||||
package_name:
|
||||
description: "Package to check (I.E. dbt-core, dbt-redshift, etc)"
|
||||
required: true
|
||||
new_version:
|
||||
description: "Semver of the container being built (I.E. 1.0.4)"
|
||||
required: true
|
||||
gh_token:
|
||||
description: "Auth token for github (must have view packages scope)"
|
||||
required: true
|
||||
outputs:
|
||||
latest:
|
||||
description: "Wether or not built container should be tagged latest (bool)"
|
||||
minor_latest:
|
||||
description: "Wether or not built container should be tagged minor.latest (bool)"
|
||||
runs:
|
||||
using: "docker"
|
||||
image: "Dockerfile"
|
||||
@@ -1,26 +0,0 @@
|
||||
name: Ship it!
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to publish
|
||||
required: true
|
||||
version_number:
|
||||
description: The version number
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Wrangle latest tag
|
||||
id: is_latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.new_version }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Print the results
|
||||
run: |
|
||||
echo "Is it latest? Survey says: ${{ steps.is_latest.outputs.latest }} !"
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-redshift"
|
||||
}
|
||||
}
|
||||
98
.github/actions/latest-wrangler/main.py
vendored
98
.github/actions/latest-wrangler/main.py
vendored
@@ -1,98 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
from distutils.util import strtobool
|
||||
from typing import Union
|
||||
from packaging.version import parse, Version
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# get inputs
|
||||
package = os.environ["INPUT_PACKAGE"]
|
||||
new_version = parse(os.environ["INPUT_NEW_VERSION"])
|
||||
gh_token = os.environ["INPUT_GH_TOKEN"]
|
||||
halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
|
||||
|
||||
# get package metadata from github
|
||||
package_request = requests.get(
|
||||
f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
|
||||
auth=("", gh_token),
|
||||
)
|
||||
package_meta = package_request.json()
|
||||
|
||||
# Log info if we don't get a 200
|
||||
if package_request.status_code != 200:
|
||||
print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
|
||||
|
||||
# Make an early exit if there is no matching package in github
|
||||
if package_request.status_code == 404:
|
||||
if halt_on_missing:
|
||||
sys.exit(1)
|
||||
# everything is the latest if the package doesn't exist
|
||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
||||
with open(github_output, "at", encoding="utf-8") as gh_output:
|
||||
gh_output.write("latest=True")
|
||||
gh_output.write("minor_latest=True")
|
||||
sys.exit(0)
|
||||
|
||||
# TODO: verify package meta is "correct"
|
||||
# https://github.com/dbt-labs/dbt-core/issues/4640
|
||||
|
||||
# map versions and tags
|
||||
version_tag_map = {
|
||||
version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
|
||||
}
|
||||
|
||||
# is pre-release
|
||||
pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
|
||||
|
||||
# semver of current latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if "latest" in tags:
|
||||
# N.B. This seems counterintuitive, but we expect any version tagged
|
||||
# 'latest' to have exactly three associated tags:
|
||||
# latest, major.minor.latest, and major.minor.patch.
|
||||
# Subtracting everything that contains the string 'latest' gets us
|
||||
# the major.minor.patch which is what's needed for comparison.
|
||||
current_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_latest = False
|
||||
|
||||
# semver of current_minor_latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if f"{new_version.major}.{new_version.minor}.latest" in tags:
|
||||
# Similar to above, only now we expect exactly two tags:
|
||||
# major.minor.patch and major.minor.latest
|
||||
current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_minor_latest = False
|
||||
|
||||
def is_latest(
|
||||
pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
|
||||
) -> bool:
|
||||
"""Determine if a given contaier should be tagged 'latest' based on:
|
||||
- it's pre-release status
|
||||
- it's version
|
||||
- the version of a previously identified container tagged 'latest'
|
||||
|
||||
:param pre_rel: Wether or not the version of the new container is a pre-release
|
||||
:param new_version: The version of the new container
|
||||
:param remote_latest: The version of the previously identified container that's
|
||||
already tagged latest or False
|
||||
"""
|
||||
# is a pre-release = not latest
|
||||
if pre_rel:
|
||||
return False
|
||||
# + no latest tag found = is latest
|
||||
if not remote_latest:
|
||||
return True
|
||||
# + if remote version is lower than current = is latest, else not latest
|
||||
return True if remote_latest <= new_version else False
|
||||
|
||||
latest = is_latest(pre_rel, new_version, current_latest)
|
||||
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
|
||||
|
||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
||||
with open(github_output, "at", encoding="utf-8") as gh_output:
|
||||
gh_output.write(f"latest={latest}")
|
||||
gh_output.write(f"minor_latest={minor_latest}")
|
||||
10
.github/actions/setup-postgres-linux/action.yml
vendored
10
.github/actions/setup-postgres-linux/action.yml
vendored
@@ -1,10 +0,0 @@
|
||||
name: "Set up postgres (linux)"
|
||||
description: "Set up postgres service on linux vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
sudo systemctl start postgresql.service
|
||||
pg_isready
|
||||
sudo -u postgres bash ${{ github.action_path }}/setup_db.sh
|
||||
@@ -1 +0,0 @@
|
||||
../../../test/setup_db.sh
|
||||
24
.github/actions/setup-postgres-macos/action.yml
vendored
24
.github/actions/setup-postgres-macos/action.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: "Set up postgres (macos)"
|
||||
description: "Set up postgres service on macos vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
brew services start postgresql
|
||||
echo "Check PostgreSQL service is running"
|
||||
i=10
|
||||
COMMAND='pg_isready'
|
||||
while [ $i -gt -1 ]; do
|
||||
if [ $i == 0 ]; then
|
||||
echo "PostgreSQL service not ready, all attempts exhausted"
|
||||
exit 1
|
||||
fi
|
||||
echo "Check PostgreSQL service status"
|
||||
eval $COMMAND && break
|
||||
echo "PostgreSQL service not ready, wait 10 more sec, attempts left: $i"
|
||||
sleep 10
|
||||
((i--))
|
||||
done
|
||||
createuser -s postgres
|
||||
bash ${{ github.action_path }}/setup_db.sh
|
||||
@@ -1 +0,0 @@
|
||||
../../../test/setup_db.sh
|
||||
@@ -1,12 +0,0 @@
|
||||
name: "Set up postgres (windows)"
|
||||
description: "Set up postgres service on windows vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: pwsh
|
||||
run: |
|
||||
$pgService = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $pgService -Status running -StartupType automatic
|
||||
Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru
|
||||
$env:Path += ";$env:PGBIN"
|
||||
bash ${{ github.action_path }}/setup_db.sh
|
||||
@@ -1 +0,0 @@
|
||||
../../../test/setup_db.sh
|
||||
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
@@ -11,11 +11,26 @@ updates:
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/plugins/bigquery"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/plugins/postgres"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/plugins/redshift"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/plugins/snowflake"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
|
||||
# docker dependencies
|
||||
- package-ecosystem: "docker"
|
||||
|
||||
21
.github/pull_request_template.md
vendored
21
.github/pull_request_template.md
vendored
@@ -4,20 +4,19 @@ resolves #
|
||||
Include the number of the issue addressed by this PR above if applicable.
|
||||
PRs for code changes without an associated issue *will not be merged*.
|
||||
See CONTRIBUTING.md for more information.
|
||||
|
||||
Example:
|
||||
resolves #1234
|
||||
-->
|
||||
|
||||
|
||||
### Description
|
||||
|
||||
<!---
|
||||
Describe the Pull Request here. Add any references and info to help reviewers
|
||||
understand your changes. Include any tradeoffs you considered.
|
||||
-->
|
||||
<!--- Describe the Pull Request here -->
|
||||
|
||||
|
||||
### Checklist
|
||||
|
||||
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
|
||||
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR
|
||||
- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)
|
||||
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.
|
||||
|
||||
40
.github/workflows/backport.yml
vendored
40
.github/workflows/backport.yml
vendored
@@ -1,40 +0,0 @@
|
||||
# **what?**
|
||||
# When a PR is merged, if it has the backport label, it will create
|
||||
# a new PR to backport those changes to the given branch. If it can't
|
||||
# cleanly do a backport, it will comment on the merged PR of the failure.
|
||||
#
|
||||
# Label naming convention: "backport <branch name to backport to>"
|
||||
# Example: backport 1.0.latest
|
||||
#
|
||||
# You MUST "Squash and merge" the original PR or this won't work.
|
||||
|
||||
# **why?**
|
||||
# Changes sometimes need to be backported to release branches.
|
||||
# This automates the backporting process
|
||||
|
||||
# **when?**
|
||||
# Once a PR is "Squash and merge"'d, by adding a backport label, this is triggered
|
||||
|
||||
name: Backport
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- labeled
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport
|
||||
runs-on: ubuntu-latest
|
||||
# Only react to merged PRs for security reasons.
|
||||
# See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target.
|
||||
if: >
|
||||
github.event.pull_request.merged
|
||||
&& contains(github.event.label.name, 'backport')
|
||||
steps:
|
||||
- uses: tibdex/backport@v2.0.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
61
.github/workflows/bot-changelog.yml
vendored
61
.github/workflows/bot-changelog.yml
vendored
@@ -1,61 +0,0 @@
|
||||
# **what?**
|
||||
# When bots create a PR, this action will add a corresponding changie yaml file to that
|
||||
# PR when a specific label is added.
|
||||
#
|
||||
# The file is created off a template:
|
||||
#
|
||||
# kind: <per action matrix>
|
||||
# body: <PR title>
|
||||
# time: <current timestamp>
|
||||
# custom:
|
||||
# Author: <PR User Login (generally the bot)>
|
||||
# Issue: 4904
|
||||
# PR: <PR number>
|
||||
#
|
||||
# **why?**
|
||||
# Automate changelog generation for more visability with automated bot PRs.
|
||||
#
|
||||
# **when?**
|
||||
# Once a PR is created, label should be added to PR before or after creation. You can also
|
||||
# manually trigger this by adding the appropriate label at any time.
|
||||
#
|
||||
# **how to add another bot?**
|
||||
# Add the label and changie kind to the include matrix. That's it!
|
||||
#
|
||||
|
||||
name: Bot Changelog
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
# catch when the PR is opened with the label or when the label is added
|
||||
types: [labeled]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
generate_changelog:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- label: "dependencies"
|
||||
changie_kind: "Dependencies"
|
||||
- label: "snyk"
|
||||
changie_kind: "Security"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Create and commit changelog on bot PR
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, matrix.label) }}
|
||||
id: bot_changelog
|
||||
uses: emmyoop/changie_bot@v1.0.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
commit_author_name: "Github Build Bot"
|
||||
commit_author_email: "<buildbot@fishtownanalytics.com>"
|
||||
commit_message: "Add automated changelog yaml from template for bot PR"
|
||||
changie_kind: ${{ matrix.changie_kind }}
|
||||
label: ${{ matrix.label }}
|
||||
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}"
|
||||
40
.github/workflows/changelog-existence.yml
vendored
40
.github/workflows/changelog-existence.yml
vendored
@@ -1,40 +0,0 @@
|
||||
# **what?**
|
||||
# Checks that a file has been committed under the /.changes directory
|
||||
# as a new CHANGELOG entry. Cannot check for a specific filename as
|
||||
# it is dynamically generated by change type and timestamp.
|
||||
# This workflow should not require any secrets since it runs for PRs
|
||||
# from forked repos.
|
||||
# By default, secrets are not passed to workflows running from
|
||||
# a forked repo.
|
||||
|
||||
# **why?**
|
||||
# Ensure code change gets reflected in the CHANGELOG.
|
||||
|
||||
# **when?**
|
||||
# This will run for all PRs going into main and *.latest. It will
|
||||
# run when they are opened, reopened, when any label is added or removed
|
||||
# and when new code is pushed to the branch. The action will then get
|
||||
# skipped if the 'Skip Changelog' label is present is any of the labels.
|
||||
|
||||
name: Check Changelog Entry
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, labeled, unlabeled, synchronize]
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
uses: dbt-labs/actions/.github/workflows/changelog-existence.yml@main
|
||||
with:
|
||||
changelog_comment: 'Thank you for your pull request! We could not find a changelog entry for this change. For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry).'
|
||||
skip_label: 'Skip Changelog'
|
||||
secrets: inherit
|
||||
165
.github/workflows/generate-cli-api-docs.yml
vendored
165
.github/workflows/generate-cli-api-docs.yml
vendored
@@ -1,165 +0,0 @@
|
||||
# **what?**
|
||||
# On push, if anything in core/dbt/docs or core/dbt/cli has been
|
||||
# created or modified, regenerate the CLI API docs using sphinx.
|
||||
|
||||
# **why?**
|
||||
# We watch for changes in core/dbt/cli because the CLI API docs rely on click
|
||||
# and all supporting flags/params to be generated. We watch for changes in
|
||||
# core/dbt/docs since any changes to sphinx configuration or any of the
|
||||
# .rst files there could result in a differently build final index.html file.
|
||||
|
||||
# **when?**
|
||||
# Whenever a change has been pushed to a branch, and only if there is a diff
|
||||
# between the PR branch and main's core/dbt/cli and or core/dbt/docs dirs.
|
||||
|
||||
# TODO: add bot comment to PR informing contributor that the docs have been committed
|
||||
# TODO: figure out why github action triggered pushes cause github to fail to report
|
||||
# the status of jobs
|
||||
|
||||
name: Generate CLI API docs
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
CLI_DIR: ${{ github.workspace }}/core/dbt/cli
|
||||
DOCS_DIR: ${{ github.workspace }}/core/dbt/docs
|
||||
DOCS_BUILD_DIR: ${{ github.workspace }}/core/dbt/docs/build
|
||||
|
||||
jobs:
|
||||
check_gen:
|
||||
name: check if generation needed
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.pull_request.head.repo.fork == false }}
|
||||
outputs:
|
||||
cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }}
|
||||
docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }}
|
||||
|
||||
steps:
|
||||
- name: "[DEBUG] print variables"
|
||||
run: |
|
||||
echo "env.CLI_DIR: ${{ env.CLI_DIR }}"
|
||||
echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}"
|
||||
echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}"
|
||||
|
||||
- name: git checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.head_ref }}
|
||||
|
||||
- name: set shas
|
||||
id: set_shas
|
||||
run: |
|
||||
THIS_SHA=$(git rev-parse @)
|
||||
LAST_SHA=$(git rev-parse @~1)
|
||||
|
||||
echo "this sha: $THIS_SHA"
|
||||
echo "last sha: $LAST_SHA"
|
||||
|
||||
echo "this_sha=$THIS_SHA" >> $GITHUB_OUTPUT
|
||||
echo "last_sha=$LAST_SHA" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: check for changes in core/dbt/cli
|
||||
id: check_cli
|
||||
run: |
|
||||
CLI_DIR_CHANGES=$(git diff \
|
||||
${{ steps.set_shas.outputs.last_sha }} \
|
||||
${{ steps.set_shas.outputs.this_sha }} \
|
||||
-- ${{ env.CLI_DIR }})
|
||||
|
||||
if [ -n "$CLI_DIR_CHANGES" ]; then
|
||||
echo "changes found"
|
||||
echo $CLI_DIR_CHANGES
|
||||
echo "cli_dir_changed=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
echo "cli_dir_changed=false" >> $GITHUB_OUTPUT
|
||||
echo "no changes found"
|
||||
|
||||
- name: check for changes in core/dbt/docs
|
||||
id: check_docs
|
||||
if: steps.check_cli.outputs.cli_dir_changed == 'false'
|
||||
run: |
|
||||
DOCS_DIR_CHANGES=$(git diff --name-only \
|
||||
${{ steps.set_shas.outputs.last_sha }} \
|
||||
${{ steps.set_shas.outputs.this_sha }} \
|
||||
-- ${{ env.DOCS_DIR }} ':!${{ env.DOCS_BUILD_DIR }}')
|
||||
|
||||
DOCS_BUILD_DIR_CHANGES=$(git diff --name-only \
|
||||
${{ steps.set_shas.outputs.last_sha }} \
|
||||
${{ steps.set_shas.outputs.this_sha }} \
|
||||
-- ${{ env.DOCS_BUILD_DIR }})
|
||||
|
||||
if [ -n "$DOCS_DIR_CHANGES" ] && [ -z "$DOCS_BUILD_DIR_CHANGES" ]; then
|
||||
echo "changes found"
|
||||
echo $DOCS_DIR_CHANGES
|
||||
echo "docs_dir_changed=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
echo "docs_dir_changed=false" >> $GITHUB_OUTPUT
|
||||
echo "no changes found"
|
||||
|
||||
gen_docs:
|
||||
name: generate docs
|
||||
runs-on: ubuntu-latest
|
||||
needs: [check_gen]
|
||||
if: |
|
||||
needs.check_gen.outputs.cli_dir_changed == 'true'
|
||||
|| needs.check_gen.outputs.docs_dir_changed == 'true'
|
||||
|
||||
steps:
|
||||
- name: "[DEBUG] print variables"
|
||||
run: |
|
||||
echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}"
|
||||
echo "github head_ref: ${{ github.head_ref }}"
|
||||
|
||||
- name: git checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
|
||||
- name: install python
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: install dev requirements
|
||||
run: |
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt -r dev-requirements.txt
|
||||
|
||||
- name: generate docs
|
||||
run: |
|
||||
source env/bin/activate
|
||||
cd ${{ env.DOCS_DIR }}
|
||||
|
||||
echo "cleaning existing docs"
|
||||
make clean
|
||||
|
||||
echo "creating docs"
|
||||
make html
|
||||
|
||||
- name: debug
|
||||
run: |
|
||||
echo ">>>>> status"
|
||||
git status
|
||||
echo ">>>>> remotes"
|
||||
git remote -v
|
||||
echo ">>>>> branch"
|
||||
git branch -v
|
||||
echo ">>>>> log"
|
||||
git log --pretty=oneline | head -5
|
||||
|
||||
- name: commit docs
|
||||
run: |
|
||||
git config user.name 'Github Build Bot'
|
||||
git config user.email 'buildbot@fishtownanalytics.com'
|
||||
git commit -am "Add generated CLI API docs"
|
||||
git push -u origin ${{ github.head_ref }}
|
||||
26
.github/workflows/jira-creation.yml
vendored
26
.github/workflows/jira-creation.yml
vendored
@@ -1,26 +0,0 @@
|
||||
# **what?**
|
||||
# Mirrors issues into Jira. Includes the information: title,
|
||||
# GitHub Issue ID and URL
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these issues in there
|
||||
|
||||
# **when?**
|
||||
# On issue creation or when an issue is labeled `Jira`
|
||||
|
||||
name: Jira Issue Creation
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-creation.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
26
.github/workflows/jira-label.yml
vendored
26
.github/workflows/jira-label.yml
vendored
@@ -1,26 +0,0 @@
|
||||
# **what?**
|
||||
# Calls mirroring Jira label Action. Includes adding a new label
|
||||
# to an existing issue or removing a label as well
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these labels in there
|
||||
|
||||
# **when?**
|
||||
# On labels being added or removed from issues
|
||||
|
||||
name: Jira Label Mirroring
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
issues: read
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-label.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
27
.github/workflows/jira-transition.yml
vendored
27
.github/workflows/jira-transition.yml
vendored
@@ -1,27 +0,0 @@
|
||||
# **what?**
|
||||
# Transition a Jira issue to a new state
|
||||
# Only supports these GitHub Issue transitions:
|
||||
# closed, deleted, reopened
|
||||
|
||||
# **why?**
|
||||
# Jira needs to be kept up-to-date
|
||||
|
||||
# **when?**
|
||||
# On issue closing, deletion, reopened
|
||||
|
||||
name: Jira Issue Transition
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [closed, deleted, reopened]
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/jira-actions/.github/workflows/jira-transition.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
238
.github/workflows/main.yml
vendored
238
.github/workflows/main.yml
vendored
@@ -1,238 +0,0 @@
|
||||
# **what?**
|
||||
# Runs code quality checks, unit tests, integration tests and
|
||||
# verifies python build on all code commited to the repository. This workflow
|
||||
# should not require any secrets since it runs for PRs from forked repos. By
|
||||
# default, secrets are not passed to workflows running from a forked repos.
|
||||
|
||||
# **why?**
|
||||
# Ensure code for dbt meets a certain quality standard.
|
||||
|
||||
# **when?**
|
||||
# This will run for all PRs, when code is pushed to a release
|
||||
# branch, and when manually triggered.
|
||||
|
||||
name: Tests and Code Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
code-quality:
|
||||
name: code-quality
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install pre-commit
|
||||
pre-commit --version
|
||||
python -m pip install mypy==0.942
|
||||
mypy --version
|
||||
python -m pip install -r requirements.txt
|
||||
python -m pip install -r dev-requirements.txt
|
||||
dbt --version
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
run: pre-commit run --all-files --show-diff-on-failure
|
||||
|
||||
unit:
|
||||
name: unit test / python ${{ matrix.python-version }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: |
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv
|
||||
path: unit_results.csv
|
||||
|
||||
integration:
|
||||
name: integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 45
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
|
||||
os: [ubuntu-20.04]
|
||||
include:
|
||||
- python-version: 3.8
|
||||
os: windows-latest
|
||||
- python-version: 3.8
|
||||
os: macos-latest
|
||||
|
||||
env:
|
||||
TOXENV: integration
|
||||
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
DBT_TEST_USER_3: dbt_test_user_3
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up postgres (linux)
|
||||
if: runner.os == 'Linux'
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: Set up postgres (macos)
|
||||
if: runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: Set up postgres (windows)
|
||||
if: runner.os == 'Windows'
|
||||
uses: ./.github/actions/setup-postgres-windows
|
||||
|
||||
- name: Install python tools
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run tests
|
||||
run: tox
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: |
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}
|
||||
path: ./logs
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}.csv
|
||||
path: integration_results.csv
|
||||
|
||||
build:
|
||||
name: build packages
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip install --upgrade setuptools wheel twine check-wheel-contents
|
||||
python -m pip --version
|
||||
|
||||
- name: Build distributions
|
||||
run: ./scripts/build-dist.sh
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Check distribution descriptions
|
||||
run: |
|
||||
twine check dist/*
|
||||
|
||||
- name: Check wheel contents
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check wheel distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
- name: Install source distributions
|
||||
# ignore dbt-1.0.0, which intentionally raises an error when installed from source
|
||||
run: |
|
||||
find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
dbt --version
|
||||
62
.github/workflows/release-branch-tests.yml
vendored
62
.github/workflows/release-branch-tests.yml
vendored
@@ -1,62 +0,0 @@
|
||||
# **what?**
|
||||
# The purpose of this workflow is to trigger CI to run for each
|
||||
# release branch and main branch on a regular cadence. If the CI workflow
|
||||
# fails for a branch, it will post to dev-core-alerts to raise awareness.
|
||||
# The 'aurelien-baudet/workflow-dispatch' Action triggers the existing
|
||||
# CI worklow file on the given branch to run so that even if we change the
|
||||
# CI workflow file in the future, the one that is tailored for the given
|
||||
# release branch will be used.
|
||||
|
||||
# **why?**
|
||||
# Ensures release branches and main are always shippable and not broken.
|
||||
# Also, can catch any dependencies shifting beneath us that might
|
||||
# introduce breaking changes (could also impact Cloud).
|
||||
|
||||
# **when?**
|
||||
# Mainly on a schedule of 9:00, 13:00, 18:00 UTC everyday.
|
||||
# Manual trigger can also test on demand
|
||||
|
||||
name: Release branch scheduled testing
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 9,13,18 * * *' # 9:00, 13:00, 18:00 UTC
|
||||
|
||||
workflow_dispatch: # for manual triggering
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
kick-off-ci:
|
||||
name: Kick-off CI
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
# must run CI 1 branch at a time b/c the workflow-dispatch Action polls for
|
||||
# latest run for results and it gets confused when we kick off multiple runs
|
||||
# at once. There is a race condition so we will just run in sequential order.
|
||||
max-parallel: 1
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, main]
|
||||
|
||||
steps:
|
||||
- name: Call CI workflow for ${{ matrix.branch }} branch
|
||||
id: trigger-step
|
||||
uses: aurelien-baudet/workflow-dispatch@v2.1.1
|
||||
with:
|
||||
workflow: main.yml
|
||||
ref: ${{ matrix.branch }}
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: Post failure to Slack
|
||||
uses: ravsamhq/notify-slack-action@v1
|
||||
if: ${{ always() && !contains(steps.trigger-step.outputs.workflow-conclusion,'success') }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
notification_title: 'dbt-core scheduled run of "${{ matrix.branch }}" branch not successful'
|
||||
message_format: ':x: CI on branch "${{ matrix.branch }}" ${{ steps.trigger-step.outputs.workflow-conclusion }}'
|
||||
footer: 'Linked failed CI run ${{ steps.trigger-step.outputs.workflow-url }}'
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
|
||||
118
.github/workflows/release-docker.yml
vendored
118
.github/workflows/release-docker.yml
vendored
@@ -1,118 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will generate a series of docker images for dbt and push them to the github container registry
|
||||
|
||||
# **why?**
|
||||
# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. This is how we keep those images up-to-date.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
# **next steps**
|
||||
# - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
|
||||
|
||||
name: Docker release
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to release. _One_ of [dbt-core, dbt-redshift, dbt-bigquery, dbt-snowflake, dbt-spark, dbt-postgres]
|
||||
required: true
|
||||
version_number:
|
||||
description: The release version number (i.e. 1.0.0b1). Do not include `latest` tags or a leading `v`!
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
get_version_meta:
|
||||
name: Get version meta
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
major: ${{ steps.version.outputs.major }}
|
||||
minor: ${{ steps.version.outputs.minor }}
|
||||
patch: ${{ steps.version.outputs.patch }}
|
||||
latest: ${{ steps.latest.outputs.latest }}
|
||||
minor_latest: ${{ steps.latest.outputs.minor_latest }}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Split version
|
||||
id: version
|
||||
run: |
|
||||
IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
|
||||
echo "major=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "minor=$MINOR" >> $GITHUB_OUTPUT
|
||||
echo "patch=$PATCH" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Is pkg 'latest'
|
||||
id: latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.version_number }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
halt_on_missing: False
|
||||
|
||||
setup_image_builder:
|
||||
name: Set up docker image builder
|
||||
runs-on: ubuntu-latest
|
||||
needs: [get_version_meta]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
build_and_push:
|
||||
name: Build images and push to GHCR
|
||||
runs-on: ubuntu-latest
|
||||
needs: [setup_image_builder, get_version_meta]
|
||||
steps:
|
||||
- name: Get docker build arg
|
||||
id: build_arg
|
||||
run: |
|
||||
BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
|
||||
BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
|
||||
echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT
|
||||
echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Log in to the GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push MAJOR.MINOR.PATCH tag
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ github.event.inputs.version_number }}
|
||||
|
||||
- name: Build and push MINOR.latest tag
|
||||
uses: docker/build-push-action@v2
|
||||
if: ${{ needs.get_version_meta.outputs.minor_latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ needs.get_version_meta.outputs.major }}.${{ needs.get_version_meta.outputs.minor }}.latest
|
||||
|
||||
- name: Build and push latest tag
|
||||
uses: docker/build-push-action@v2
|
||||
if: ${{ needs.get_version_meta.outputs.latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest
|
||||
202
.github/workflows/release.yml
vendored
202
.github/workflows/release.yml
vendored
@@ -1,202 +0,0 @@
|
||||
# **what?**
|
||||
# Take the given commit, run unit tests specifically on that sha, build and
|
||||
# package it, and then release to GitHub and PyPi with that specific build
|
||||
|
||||
# **why?**
|
||||
# Ensure an automated and tested release process
|
||||
|
||||
# **when?**
|
||||
# This will only run manually with a given sha and version
|
||||
|
||||
name: Release to GitHub and PyPi
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: 'The last commit sha in the release'
|
||||
required: true
|
||||
version_number:
|
||||
description: 'The release version number (i.e. 1.0.0b1)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
unit:
|
||||
name: Unit test
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.inputs.sha }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
|
||||
build:
|
||||
name: build packages
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.inputs.sha }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install --upgrade setuptools wheel twine check-wheel-contents
|
||||
pip --version
|
||||
|
||||
- name: Build distributions
|
||||
run: ./scripts/build-dist.sh
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Check distribution descriptions
|
||||
run: |
|
||||
twine check dist/*
|
||||
|
||||
- name: Check wheel contents
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: |
|
||||
dist/
|
||||
!dist/dbt-${{github.event.inputs.version_number}}.tar.gz
|
||||
|
||||
test-build:
|
||||
name: verify packages
|
||||
|
||||
needs: [build, unit]
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install --upgrade wheel
|
||||
pip --version
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check wheel distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
- name: Install source distributions
|
||||
run: |
|
||||
find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
github-release:
|
||||
name: GitHub Release
|
||||
|
||||
needs: test-build
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: '.'
|
||||
|
||||
# Need to set an output variable because env variables can't be taken as input
|
||||
# This is needed for the next step with releasing to GitHub
|
||||
- name: Find release type
|
||||
id: release_type
|
||||
env:
|
||||
IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') || contains(github.event.inputs.version_number, 'b') }}
|
||||
run: |
|
||||
echo "isPrerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Creating GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
name: dbt-core v${{github.event.inputs.version_number}}
|
||||
tag_name: v${{github.event.inputs.version_number}}
|
||||
prerelease: ${{ steps.release_type.outputs.isPrerelease }}
|
||||
target_commitish: ${{github.event.inputs.sha}}
|
||||
body: |
|
||||
[Release notes](https://github.com/dbt-labs/dbt-core/blob/main/CHANGELOG.md)
|
||||
files: |
|
||||
dbt_postgres-${{github.event.inputs.version_number}}-py3-none-any.whl
|
||||
dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
|
||||
dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
|
||||
dbt-core-${{github.event.inputs.version_number}}.tar.gz
|
||||
|
||||
pypi-release:
|
||||
name: Pypi release
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
needs: github-release
|
||||
|
||||
environment: PypiProd
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: 'dist'
|
||||
|
||||
- name: Publish distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.4.2
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
90
.github/workflows/schema-check.yml
vendored
90
.github/workflows/schema-check.yml
vendored
@@ -1,90 +0,0 @@
|
||||
# **what?**
|
||||
# Compares the schema of the dbt version of the given ref vs
|
||||
# the latest official schema releases found in schemas.getdbt.com.
|
||||
# If there are differences, the workflow will fail and upload the
|
||||
# diff as an artifact. The metadata team should be alerted to the change.
|
||||
#
|
||||
# **why?**
|
||||
# Reaction work may need to be done if artifact schema changes
|
||||
# occur so we want to proactively alert to it.
|
||||
#
|
||||
# **when?**
|
||||
# On pushes to `develop` and release branches. Manual runs are also enabled.
|
||||
name: Artifact Schema Check
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request: #TODO: remove before merging
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
|
||||
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
|
||||
DBT_REPO_DIRECTORY: ${{ github.workspace }}/dbt
|
||||
SCHEMA_REPO_DIRECTORY: ${{ github.workspace }}/schemas.getdbt.com
|
||||
|
||||
jobs:
|
||||
checking-schemas:
|
||||
name: "Checking schemas"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Checkout dbt repo
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
path: ${{ env.DBT_REPO_DIRECTORY }}
|
||||
|
||||
- name: Checkout schemas.getdbt.com repo
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: dbt-labs/schemas.getdbt.com
|
||||
ref: 'main'
|
||||
ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
|
||||
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
|
||||
- name: Generate current schema
|
||||
run: |
|
||||
cd ${{ env.DBT_REPO_DIRECTORY }}
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
python scripts/collect-artifact-schema.py --path ${{ env.LATEST_SCHEMA_PATH }}
|
||||
|
||||
# Copy generated schema files into the schemas.getdbt.com repo
|
||||
# Do a git diff to find any changes
|
||||
# Ignore any date or version changes though
|
||||
- name: Compare schemas
|
||||
run: |
|
||||
cp -r ${{ env.LATEST_SCHEMA_PATH }}/dbt ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
cd ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
diff_results=$(git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
|
||||
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' --compact-summary)
|
||||
if [[ $(echo diff_results) ]]; then
|
||||
echo $diff_results
|
||||
echo "Schema changes detected!"
|
||||
git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
|
||||
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' > ${{ env.SCHEMA_DIFF_ARTIFACT }}
|
||||
exit 1
|
||||
else
|
||||
echo "No schema changes detected"
|
||||
fi
|
||||
|
||||
- name: Upload schema diff
|
||||
uses: actions/upload-artifact@v2.2.4
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
name: 'schema_schanges.txt'
|
||||
path: '${{ env.SCHEMA_DIFF_ARTIFACT }}'
|
||||
12
.github/workflows/stale.yml
vendored
12
.github/workflows/stale.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: "Close stale issues and PRs"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main
|
||||
@@ -1,65 +0,0 @@
|
||||
# This Action checks makes a dbt run to sample json structured logs
|
||||
# and checks that they conform to the currently documented schema.
|
||||
#
|
||||
# If this action fails it either means we have unintentionally deviated
|
||||
# from our documented structured logging schema, or we need to bump the
|
||||
# version of our structured logging and add new documentation to
|
||||
# communicate these changes.
|
||||
|
||||
name: Structured Logging Schema Check
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
# run the performance measurements on the current or default branch
|
||||
test-schema:
|
||||
name: Test Log Schema
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
# turns warnings into errors
|
||||
RUSTFLAGS: "-D warnings"
|
||||
# points tests to the log file
|
||||
LOG_DIR: "/home/runner/work/dbt-core/dbt-core/logs"
|
||||
# tells integration tests to output into json format
|
||||
DBT_LOG_FORMAT: "json"
|
||||
# Additional test users
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
DBT_TEST_USER_3: dbt_test_user_3
|
||||
|
||||
steps:
|
||||
- name: checkout dev
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip --version
|
||||
pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Set up postgres
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: ls
|
||||
run: ls
|
||||
|
||||
# integration tests generate a ton of logs in different files. the next step will find them all.
|
||||
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
|
||||
- name: Run integration tests
|
||||
run: tox -e integration -- -nauto
|
||||
1
.github/workflows/test/.actrc
vendored
1
.github/workflows/test/.actrc
vendored
@@ -1 +0,0 @@
|
||||
-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest
|
||||
1
.github/workflows/test/.gitignore
vendored
1
.github/workflows/test/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
.secrets
|
||||
1
.github/workflows/test/.secrets.EXAMPLE
vendored
1
.github/workflows/test/.secrets.EXAMPLE
vendored
@@ -1 +0,0 @@
|
||||
GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-postgres"
|
||||
}
|
||||
}
|
||||
33
.github/workflows/triage-labels.yml
vendored
33
.github/workflows/triage-labels.yml
vendored
@@ -1,33 +0,0 @@
|
||||
# **what?**
|
||||
# When the core team triages, we sometimes need more information from the issue creator. In
|
||||
# those cases we remove the `triage` label and add the `awaiting_response` label. Once we
|
||||
# recieve a response in the form of a comment, we want the `awaiting_response` label removed
|
||||
# in favor of the `triage` label so we are aware that the issue needs action.
|
||||
|
||||
# **why?**
|
||||
# To help with out team triage issue tracking
|
||||
|
||||
# **when?**
|
||||
# This will run when a comment is added to an issue and that issue has to `awaiting_response` label.
|
||||
|
||||
name: Update Triage Label
|
||||
|
||||
on: issue_comment
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
triage_label:
|
||||
if: contains(github.event.issue.labels.*.name, 'awaiting_response')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: initial labeling
|
||||
uses: andymckay/labeler@master
|
||||
with:
|
||||
add-labels: "triage"
|
||||
remove-labels: "awaiting_response"
|
||||
125
.github/workflows/version-bump.yml
vendored
125
.github/workflows/version-bump.yml
vendored
@@ -1,125 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will take the new version number to bump to. With that
|
||||
# it will run versionbump to update the version number everywhere in the
|
||||
# code base and then run changie to create the corresponding changelog.
|
||||
# A PR will be created with the changes that can be reviewed before committing.
|
||||
|
||||
# **why?**
|
||||
# This is to aid in releasing dbt and making sure we have updated
|
||||
# the version in all places and generated the changelog.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
name: Version Bump
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_number:
|
||||
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo "all variables defined as inputs"
|
||||
echo The version_number: ${{ github.event.inputs.version_number }}
|
||||
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
pip install --upgrade pip
|
||||
|
||||
- name: Add Homebrew to PATH
|
||||
run: |
|
||||
echo "/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install Homebrew packages
|
||||
run: |
|
||||
brew install pre-commit
|
||||
brew tap miniscruff/changie https://github.com/miniscruff/changie
|
||||
brew install changie
|
||||
|
||||
- name: Audit Version and Parse Into Parts
|
||||
id: semver
|
||||
uses: dbt-labs/actions/parse-semver@v1
|
||||
with:
|
||||
version: ${{ github.event.inputs.version_number }}
|
||||
|
||||
- name: Set branch value
|
||||
id: variables
|
||||
run: |
|
||||
echo "BRANCH_NAME=prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create PR branch
|
||||
run: |
|
||||
git checkout -b ${{ steps.variables.outputs.BRANCH_NAME }}
|
||||
git push origin ${{ steps.variables.outputs.BRANCH_NAME }}
|
||||
git branch --set-upstream-to=origin/${{ steps.variables.outputs.BRANCH_NAME }} ${{ steps.variables.outputs.BRANCH_NAME }}
|
||||
|
||||
- name: Bump version
|
||||
run: |
|
||||
source env/bin/activate
|
||||
pip install -r dev-requirements.txt
|
||||
env/bin/bumpversion --allow-dirty --new-version ${{ github.event.inputs.version_number }} major
|
||||
git status
|
||||
|
||||
- name: Run changie
|
||||
run: |
|
||||
if [[ ${{ steps.semver.outputs.is-pre-release }} -eq 1 ]]
|
||||
then
|
||||
changie batch ${{ steps.semver.outputs.base-version }} --move-dir '${{ steps.semver.outputs.base-version }}' --prerelease '${{ steps.semver.outputs.pre-release }}'
|
||||
else
|
||||
changie batch ${{ steps.semver.outputs.base-version }} --include '${{ steps.semver.outputs.base-version }}' --remove-prereleases
|
||||
fi
|
||||
changie merge
|
||||
git status
|
||||
|
||||
# this step will fail on whitespace errors but also correct them
|
||||
- name: Remove trailing whitespace
|
||||
continue-on-error: true
|
||||
run: |
|
||||
pre-commit run trailing-whitespace --files .bumpversion.cfg CHANGELOG.md .changes/*
|
||||
git status
|
||||
|
||||
# this step will fail on newline errors but also correct them
|
||||
- name: Removing extra newlines
|
||||
continue-on-error: true
|
||||
run: |
|
||||
pre-commit run end-of-file-fixer --files .bumpversion.cfg CHANGELOG.md .changes/*
|
||||
git status
|
||||
|
||||
- name: Commit version bump to branch
|
||||
uses: EndBug/add-and-commit@v7
|
||||
with:
|
||||
author_name: 'Github Build Bot'
|
||||
author_email: 'buildbot@fishtownanalytics.com'
|
||||
message: 'Bumping version to ${{ github.event.inputs.version_number }} and generate CHANGELOG'
|
||||
branch: '${{ steps.variables.outputs.BRANCH_NAME }}'
|
||||
push: 'origin origin/${{ steps.variables.outputs.BRANCH_NAME }}'
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
|
||||
base: ${{github.ref}}
|
||||
title: 'Bumping version to ${{ github.event.inputs.version_number }} and generate changelog'
|
||||
branch: '${{ steps.variables.outputs.BRANCH_NAME }}'
|
||||
labels: |
|
||||
Skip Changelog
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -11,7 +11,6 @@ __pycache__/
|
||||
env*/
|
||||
dbt_env/
|
||||
build/
|
||||
!core/dbt/docs/build
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
@@ -25,8 +24,7 @@ var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
*.mypy_cache/
|
||||
logs/
|
||||
|
||||
# PyInstaller
|
||||
@@ -51,8 +49,9 @@ coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
test.env
|
||||
*.pytest_cache/
|
||||
|
||||
# Mypy
|
||||
.mypy_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
@@ -67,10 +66,10 @@ docs/_build/
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Ipython Notebook
|
||||
#Ipython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# Emacs
|
||||
#Emacs
|
||||
*~
|
||||
|
||||
# Sublime Text
|
||||
@@ -79,7 +78,6 @@ target/
|
||||
# Vim
|
||||
*.sw*
|
||||
|
||||
# Pyenv
|
||||
.python-version
|
||||
|
||||
# Vim
|
||||
@@ -87,17 +85,11 @@ target/
|
||||
|
||||
# pycharm
|
||||
.idea/
|
||||
venv/
|
||||
|
||||
# AWS credentials
|
||||
.aws/
|
||||
|
||||
# MacOS
|
||||
.DS_Store
|
||||
|
||||
# vscode
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
|
||||
# poetry
|
||||
poetry.lock
|
||||
|
||||
@@ -1,63 +1,20 @@
|
||||
# Configuration for pre-commit hooks (see https://pre-commit.com/).
|
||||
# Eventually the hooks described here will be run as tests before merging each PR.
|
||||
|
||||
# TODO: remove global exclusion of tests when testing overhaul is complete
|
||||
exclude: ^(test/|core/dbt/docs/build/)
|
||||
|
||||
# Force all unspecified python hooks to run python 3.8
|
||||
default_language_version:
|
||||
python: python3
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
args: [--unsafe]
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
exclude_types:
|
||||
- "markdown"
|
||||
- id: check-case-conflict
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
- id: black
|
||||
alias: black-check
|
||||
stages: [manual]
|
||||
args:
|
||||
- "--check"
|
||||
- "--diff"
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 4.0.1
|
||||
hooks:
|
||||
- id: flake8
|
||||
- id: flake8
|
||||
alias: flake8-check
|
||||
stages: [manual]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.942
|
||||
hooks:
|
||||
- id: mypy
|
||||
# N.B.: Mypy is... a bit fragile.
|
||||
#
|
||||
# By using `language: system` we run this hook in the local
|
||||
# environment instead of a pre-commit isolated one. This is needed
|
||||
# to ensure mypy correctly parses the project.
|
||||
|
||||
# It may cause trouble
|
||||
# in that it adds environmental variables out of our control to the
|
||||
# mix. Unfortunately, there's nothing we can do about per pre-commit's
|
||||
# author.
|
||||
# See https://github.com/pre-commit/pre-commit/issues/730 for details.
|
||||
args: [--show-error-codes]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
- id: mypy
|
||||
alias: mypy-check
|
||||
stages: [manual]
|
||||
args: [--show-error-codes, --pretty]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v2.3.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 20.8b1
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://gitlab.com/PyCQA/flake8
|
||||
rev: 3.9.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.812
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: ^core/dbt/
|
||||
|
||||
@@ -2,36 +2,31 @@ The core function of dbt is SQL compilation and execution. Users create projects
|
||||
|
||||
## dbt-core
|
||||
|
||||
Most of the python code in the repository is within the `core/dbt` directory.
|
||||
- [`single python files`](core/dbt/README.md): A number of individual files, such as 'compilation.py' and 'exceptions.py'
|
||||
|
||||
The main subdirectories of core/dbt:
|
||||
- [`adapters`](core/dbt/adapters/README.md): Define base classes for behavior that is likely to differ across databases
|
||||
- [`clients`](core/dbt/clients/README.md): Interface with dependencies (agate, jinja) or across operating systems
|
||||
- [`config`](core/dbt/config/README.md): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
|
||||
- [`context`](core/dbt/context/README.md): Build and expose dbt-specific Jinja functionality
|
||||
- [`contracts`](core/dbt/contracts/README.md): Define Python objects (dataclasses) that dbt expects to create and validate
|
||||
- [`deps`](core/dbt/deps/README.md): Package installation and dependency resolution
|
||||
- [`events`](core/dbt/events/README.md): Logging events
|
||||
- [`graph`](core/dbt/graph/README.md): Produce a `networkx` DAG of project resources, and selecting those resources given user-supplied criteria
|
||||
- [`include`](core/dbt/include/README.md): The dbt "global project," which defines default implementations of Jinja2 macros
|
||||
- [`parser`](core/dbt/parser/README.md): Read project files, validate, construct python objects
|
||||
- [`task`](core/dbt/task/README.md): Set forth the actions that dbt can perform when invoked
|
||||
|
||||
Legacy tests are found in the 'test' directory:
|
||||
- [`unit tests`](core/dbt/test/unit/README.md): Unit tests
|
||||
- [`integration tests`](core/dbt/test/integration/README.md): Integration tests
|
||||
Most of the python code in the repository is within the `core/dbt` directory. Currently the main subdirectories are:
|
||||
- [`adapters`](core/dbt/adapters): Define base classes for behavior that is likely to differ across databases
|
||||
- [`clients`](core/dbt/clients): Interface with dependencies (agate, jinja) or across operating systems
|
||||
- [`config`](core/dbt/config): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
|
||||
- [`context`](core/dbt/context): Build and expose dbt-specific Jinja functionality
|
||||
- [`contracts`](core/dbt/contracts): Define Python objects (dataclasses) that dbt expects to create and validate
|
||||
- [`deps`](core/dbt/deps): Package installation and dependency resolution
|
||||
- [`graph`](core/dbt/graph): Produce a `networkx` DAG of project resources, and selecting those resources given user-supplied criteria
|
||||
- [`include`](core/dbt/include): The dbt "global project," which defines default implementations of Jinja2 macros
|
||||
- [`parser`](core/dbt/parser): Read project files, validate, construct python objects
|
||||
- [`rpc`](core/dbt/rpc): Provide remote procedure call server for invoking dbt, following JSON-RPC 2.0 spec
|
||||
- [`task`](core/dbt/task): Set forth the actions that dbt can perform when invoked
|
||||
|
||||
### Invoking dbt
|
||||
|
||||
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
|
||||
There are two supported ways of invoking dbt: from the command line and using an RPC server.
|
||||
|
||||
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task/rpc should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
|
||||
|
||||
core/dbt/include/index.html
|
||||
This is the docs website code. It comes from the dbt-docs repository, and is generated when a release is packaged.
|
||||
|
||||
## Adapters
|
||||
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. For testing and development purposes, the dbt-postgres plugin lives alongside the dbt-core codebase, in the [`plugins`](plugins) subdirectory. Like other adapter plugins, it is a self-contained codebase and package that builds on top of dbt-core.
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. The four core adapters that are in the main repository, contained within the [`plugins`](plugins) subdirectory, are: Postgres Redshift, Snowflake and BigQuery. Other warehouses use adapter plugins defined in separate repositories (e.g. [dbt-spark](https://github.com/fishtown-analytics/dbt-spark), [dbt-presto](https://github.com/fishtown-analytics/dbt-presto)).
|
||||
|
||||
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.
|
||||
|
||||
@@ -51,4 +46,4 @@ The [`test/`](test/) subdirectory includes unit and integration tests that run a
|
||||
|
||||
- [docker](docker/): All dbt versions are published as Docker images on DockerHub. This subfolder contains the `Dockerfile` (constant) and `requirements.txt` (one for each version).
|
||||
- [etc](etc/): Images for README
|
||||
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, nor are they rigorously tested—they're just handy tools for the dbt maintainers :)
|
||||
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, not are they rigorously tested—they're just handy tools for the dbt maintainers :)
|
||||
|
||||
2718
CHANGELOG.md
Executable file → Normal file
2718
CHANGELOG.md
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
294
CONTRIBUTING.md
294
CONTRIBUTING.md
@@ -1,233 +1,227 @@
|
||||
# Contributing to `dbt-core`
|
||||
|
||||
`dbt-core` is open source software. It is what it is today because community members have opened issues, provided feedback, and [contributed to the knowledge loop](https://www.getdbt.com/dbt-labs/values/). Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
# Contributing to dbt
|
||||
|
||||
1. [About this document](#about-this-document)
|
||||
2. [Getting the code](#getting-the-code)
|
||||
3. [Setting up an environment](#setting-up-an-environment)
|
||||
4. [Running `dbt` in development](#running-dbt-core-in-development)
|
||||
5. [Testing dbt-core](#testing)
|
||||
6. [Debugging](#debugging)
|
||||
7. [Adding a changelog entry](#adding-a-changelog-entry)
|
||||
8. [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
2. [Proposing a change](#proposing-a-change)
|
||||
3. [Getting the code](#getting-the-code)
|
||||
4. [Setting up an environment](#setting-up-an-environment)
|
||||
5. [Running dbt in development](#running-dbt-in-development)
|
||||
6. [Testing](#testing)
|
||||
7. [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
|
||||
## About this document
|
||||
|
||||
There are many ways to contribute to the ongoing development of `dbt-core`, such as by participating in discussions and issues. We encourage you to first read our higher-level document: ["Expectations for Open Source Contributors"](https://docs.getdbt.com/docs/contributing/oss-expectations).
|
||||
This document is a guide intended for folks interested in contributing to dbt. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using dbt, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
|
||||
|
||||
The rest of this document serves as a more granular guide for contributing code changes to `dbt-core` (this repository). It is not intended as a guide for using `dbt-core`, and some pieces assume a level of familiarity with Python development (virtualenvs, `pip`, etc). Specific code snippets in this guide assume you are using macOS or Linux and are comfortable with the command line.
|
||||
If you're new to python development or contributing to open-source software, we encourage you to read this document from start to finish. If you get stuck, drop us a line in the #development channel on [slack](community.getdbt.com).
|
||||
|
||||
If you get stuck, we're happy to help! Drop us a line in the `#dbt-core-development` channel in the [dbt Community Slack](https://community.getdbt.com).
|
||||
### Signing the CLA
|
||||
|
||||
### Notes
|
||||
Please note that all contributors to dbt must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the dbt codebase. If you are unable to sign the CLA, then the dbt maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
|
||||
|
||||
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead. The sole exception is Postgres; the `dbt-postgres` plugin lives in this repository (`dbt-core`).
|
||||
- **CLA:** Please note that anyone contributing code to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements). If you are unable to sign the CLA, the `dbt-core` maintainers will unfortunately be unable to merge any of your Pull Requests. We welcome you to participate in discussions, open issues, and comment on existing ones.
|
||||
- **Branches:** All pull requests from community contributors should target the `main` branch (default). If the change is needed as a patch for a minor version of dbt that has already been released (or is already a release candidate), a maintainer will backport the changes in your PR to the relevant "latest" release branch (`1.0.latest`, `1.1.latest`, ...). If an issue fix applies to a release branch, that fix should be first committed to the development branch and then to the release branch (rarely release-branch fixes may not apply to `main`).
|
||||
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via pip, homebrew, and dbt Cloud.
|
||||
## Proposing a change
|
||||
|
||||
dbt is Apache 2.0-licensed open source software. dbt is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
|
||||
### Defining the problem
|
||||
|
||||
If you have an idea for a new feature or if you've discovered a bug in dbt, the first step is to open an issue. Please check the list of [open issues](https://github.com/fishtown-analytics/dbt/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The dbt maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
|
||||
|
||||
**Note:** All community-contributed Pull Requests _must_ be associated with an open issue. If you submit a Pull Request that does not pertain to an open issue, you will be asked to create an issue describing the problem before the Pull Request can be reviewed.
|
||||
|
||||
### Discussing the idea
|
||||
|
||||
After you open an issue, a dbt maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The dbt maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
|
||||
|
||||
### Submitting a change
|
||||
|
||||
If an issue is appropriately well scoped and describes a beneficial change to the dbt codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
|
||||
|
||||
The dbt maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/fishtown-analytics/dbt/contribute) page.
|
||||
|
||||
Here's a good workflow:
|
||||
- Comment on the open issue, expressing your interest in contributing the required code change
|
||||
- Outline your planned implementation. If you want help getting started, ask!
|
||||
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the dbt maintainers will work with you to review your code.
|
||||
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding dbt's [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
|
||||
|
||||
In some cases, the right resolution to an open issue might be tangential to the dbt codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the dbt maintainers are unwilling or unable to incorporate into the dbt codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the dbt repository, the issue will be tagged with the `wontfix` label (see below) and closed.
|
||||
|
||||
### Using issue labels
|
||||
|
||||
The dbt maintainers use labels to categorize open issues. Some labels indicate the databases impacted by the issue, while others describe the domain in the dbt codebase germane to the discussion. While most of these labels are self-explanatory (eg. `snowflake` or `bigquery`), there are others that are worth describing.
|
||||
|
||||
| tag | description |
|
||||
| --- | ----------- |
|
||||
| [triage](https://github.com/fishtown-analytics/dbt/labels/triage) | This is a new issue which has not yet been reviewed by a dbt maintainer. This label is removed when a maintainer reviews and responds to the issue. |
|
||||
| [bug](https://github.com/fishtown-analytics/dbt/labels/bug) | This issue represents a defect or regression in dbt |
|
||||
| [enhancement](https://github.com/fishtown-analytics/dbt/labels/enhancement) | This issue represents net-new functionality in dbt |
|
||||
| [good first issue](https://github.com/fishtown-analytics/dbt/labels/good%20first%20issue) | This issue does not require deep knowledge of the dbt codebase to implement. This issue is appropriate for a first-time contributor. |
|
||||
| [help wanted](https://github.com/fishtown-analytics/dbt/labels/help%20wanted) / [discussion](https://github.com/fishtown-analytics/dbt/labels/discussion) | Conversation around this issue in ongoing, and there isn't yet a clear path forward. Input from community members is most welcome. |
|
||||
| [duplicate](https://github.com/fishtown-analytics/dbt/issues/duplicate) | This issue is functionally identical to another open issue. The dbt maintainers will close this issue and encourage community members to focus conversation on the other one. |
|
||||
| [snoozed](https://github.com/fishtown-analytics/dbt/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The dbt maintainers will revist these issues periodically and re-prioritize them accordingly. |
|
||||
| [stale](https://github.com/fishtown-analytics/dbt/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by dbt maintainers, but they can be re-opened if the discussion is restarted. |
|
||||
| [wontfix](https://github.com/fishtown-analytics/dbt/labels/wontfix) | This issue does not require a code change in the dbt repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
|
||||
|
||||
#### Branching Strategy
|
||||
|
||||
dbt has three types of branches:
|
||||
|
||||
- **Trunks** are where active development of the next release takes place. There is one trunk named `develop` at the time of writing this, and will be the default branch of the repository.
|
||||
- **Release Branches** track a specific, not yet complete release of dbt. Each minor version release has a corresponding release branch. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of dbt.
|
||||
- **Feature Branches** track individual features and fixes. On completion they should be merged into the trunk brnach or a specific release branch.
|
||||
|
||||
## Getting the code
|
||||
|
||||
### Installing git
|
||||
|
||||
You will need `git` in order to download and modify the `dbt-core` source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
|
||||
You will need `git` in order to download and modify the dbt source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
|
||||
|
||||
### External contributors
|
||||
|
||||
If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt-core` by forking the `dbt-core` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
|
||||
If you are not a member of the `fishtown-analytics` GitHub organization, you can contribute to dbt by forking the dbt repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
|
||||
|
||||
1. Fork the `dbt-core` repository
|
||||
2. Clone your fork locally
|
||||
3. Check out a new branch for your proposed changes
|
||||
4. Push changes to your fork
|
||||
5. Open a pull request against `dbt-labs/dbt-core` from your forked repository
|
||||
1. fork the dbt repository
|
||||
2. clone your fork locally
|
||||
3. check out a new branch for your proposed changes
|
||||
4. push changes to your fork
|
||||
5. open a pull request against `fishtown-analytics/dbt` from your forked repository
|
||||
|
||||
### dbt Labs contributors
|
||||
### Core contributors
|
||||
|
||||
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch. Branch names should be fixed by `CT-XXX/` where:
|
||||
* CT stands for 'core team'
|
||||
* XXX stands for a JIRA ticket number
|
||||
If you are a member of the `fishtown-analytics` GitHub organization, you will have push access to the dbt repo. Rather than
|
||||
forking dbt to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
|
||||
|
||||
## Setting up an environment
|
||||
|
||||
There are some tools that will be helpful to you in developing locally. While this is the list relevant for `dbt-core` development, many of these tools are used commonly across open-source python projects.
|
||||
There are some tools that will be helpful to you in developing locally. While this is the list relevant for dbt development, many of these tools are used commonly across open-source python projects.
|
||||
|
||||
### Tools
|
||||
|
||||
These are the tools used in `dbt-core` development and testing:
|
||||
A short list of tools used in dbt testing that will be helpful to your understanding:
|
||||
|
||||
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, 3.10 and 3.11
|
||||
- [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests
|
||||
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
|
||||
- [`black`](https://github.com/psf/black) for code formatting
|
||||
- [`mypy`](https://mypy.readthedocs.io/en/stable/) for static type checking
|
||||
- [`pre-commit`](https://pre-commit.com) to easily run those checks
|
||||
- [`changie`](https://changie.dev/) to create changelog entries, without merge conflicts
|
||||
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) to run multiple setup or test steps in combination. Don't worry too much, nobody _really_ understands how `make` works, and our Makefile aims to be super simple.
|
||||
- [GitHub Actions](https://github.com/features/actions) for automating tests and checks, once a PR is pushed to the `dbt-core` repository
|
||||
- [virtualenv](https://virtualenv.pypa.io/en/stable/) to manage dependencies
|
||||
- [tox](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions
|
||||
- [pytest](https://docs.pytest.org/en/latest/) to discover/run tests
|
||||
- [make](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) - but don't worry too much, nobody _really_ understands how make works and our Makefile is super simple
|
||||
- [flake8](https://gitlab.com/pycqa/flake8) for code linting
|
||||
- [CircleCI](https://circleci.com/product/) and [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/)
|
||||
|
||||
A deep understanding of these tools in not required to effectively contribute to `dbt-core`, but we recommend checking out the attached documentation if you're interested in learning more about each one.
|
||||
A deep understanding of these tools in not required to effectively contribute to dbt, but we recommend checking out the attached documentation if you're interested in learning more about them.
|
||||
|
||||
#### Virtual environments
|
||||
#### virtual environments
|
||||
|
||||
We strongly recommend using virtual environments when developing code in `dbt-core`. We recommend creating this virtualenv
|
||||
in the root of the `dbt-core` repository. To create a new virtualenv, run:
|
||||
```sh
|
||||
We strongly recommend using virtual environments when developing code in dbt. We recommend creating this virtualenv
|
||||
in the root of the dbt repository. To create a new virtualenv, run:
|
||||
```
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
```
|
||||
|
||||
This will create and activate a new Python virtual environment.
|
||||
|
||||
#### Docker and `docker-compose`
|
||||
#### docker and docker-compose
|
||||
|
||||
Docker and `docker-compose` are both used in testing. Specific instructions for you OS can be found [here](https://docs.docker.com/get-docker/).
|
||||
Docker and docker-compose are both used in testing. Specific instructions for you OS can be found [here](https://docs.docker.com/get-docker/).
|
||||
|
||||
|
||||
#### Postgres (optional)
|
||||
#### postgres (optional)
|
||||
|
||||
For testing, and later in the examples in this document, you may want to have `psql` available so you can poke around in the database and see what happened. We recommend that you use [homebrew](https://brew.sh/) for that on macOS, and your package manager on Linux. You can install any version of the postgres client that you'd like. On macOS, with homebrew setup, you can run:
|
||||
|
||||
```sh
|
||||
```
|
||||
brew install postgresql
|
||||
```
|
||||
|
||||
## Running `dbt-core` in development
|
||||
## Running dbt in development
|
||||
|
||||
### Installation
|
||||
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies):
|
||||
First make sure that you set up your `virtualenv` as described in section _Setting up an environment_. Next, install dbt (and its dependencies) with:
|
||||
|
||||
```sh
|
||||
make dev
|
||||
```
|
||||
or, alternatively:
|
||||
```sh
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
pre-commit install
|
||||
pip install -r requirements-editable.txt
|
||||
```
|
||||
|
||||
When installed in this way, any changes you make to your local copy of the source code will be reflected immediately in your next `dbt` run.
|
||||
When dbt is installed from source in this way, any changes you make to the dbt source code will be reflected immediately in your next `dbt` run.
|
||||
|
||||
### Running `dbt-core`
|
||||
### Running dbt
|
||||
|
||||
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
|
||||
|
||||
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local Postgres instance, or a specific test sandbox within your data warehouse if appropriate.
|
||||
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local postgres instance, or a specific test sandbox within your data warehouse if appropriate.
|
||||
|
||||
## Testing
|
||||
|
||||
Once you're able to manually test that your code change is working as expected, it's important to run existing automated tests, as well as adding some new ones. These tests will ensure that:
|
||||
- Your code changes do not unexpectedly break other established functionality
|
||||
- Your code changes can handle all known edge cases
|
||||
- The functionality you're adding will _keep_ working in the future
|
||||
Getting the dbt integration tests set up in your local environment will be very helpful as you start to make changes to your local version of dbt. The section that follows outlines some helpful tips for setting up the test environment.
|
||||
|
||||
Although `dbt-core` works with a number of different databases, you won't need to supply credentials for every one of these databases in your test environment. Instead, you can test most `dbt-core` code changes with Python and Postgres.
|
||||
### Running tests via Docker
|
||||
|
||||
### Initial setup
|
||||
dbt's unit and integration tests run in Docker. Because dbt works with a number of different databases, you will need to supply credentials for one or more of these databases in your test environment. Most organizations don't have access to each of a BigQuery, Redshift, Snowflake, and Postgres database, so it's likely that you will be unable to run every integration test locally. Fortunately, Fishtown Analytics provides a CI environment with access to sandboxed Redshift, Snowflake, BigQuery, and Postgres databases. See the section on [_Submitting a Pull Request_](#submitting-a-pull-request) below for more information on this CI setup.
|
||||
|
||||
Postgres offers the easiest way to test most `dbt-core` functionality today. They are the fastest to run, and the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
|
||||
|
||||
```sh
|
||||
make setup-db
|
||||
### Specifying your test credentials
|
||||
|
||||
dbt uses test credentials specified in a `test.env` file in the root of the repository. This `test.env` file is git-ignored, but please be _extra_ careful to never check in credentials or other sensitive information when developing against dbt. To create your `test.env` file, copy the provided sample file, then supply your relevant credentials:
|
||||
|
||||
```
|
||||
cp test.env.sample test.env
|
||||
```
|
||||
|
||||
We recommend starting with dbt's Postgres tests. These tests cover most of the functionality in dbt, are the fastest to run, and are the easiest to set up. dbt's test suite runs Postgres in a Docker container, so no setup should be required to run these tests.
|
||||
|
||||
If you additionally want to test Snowflake, Bigquery, or Redshift, locally you'll need to get credentials and add them to the `test.env` file. In general, it's most important to have successful unit and Postgres tests. Once you open a PR, dbt will automatically run integration tests for the other three core database adapters. Of course, if you are a BigQuery user, contributing a BigQuery-only feature, it's important to run BigQuery tests as well.
|
||||
|
||||
### Test commands
|
||||
|
||||
dbt's unit tests and Python linter can be run with:
|
||||
|
||||
```
|
||||
make test-unit
|
||||
```
|
||||
|
||||
To run the Postgres + Python 3.6 integration tests, you'll have to do one extra step of setting up the test database:
|
||||
|
||||
```
|
||||
or, alternatively:
|
||||
```sh
|
||||
docker-compose up -d database
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
```
|
||||
|
||||
### Test commands
|
||||
To run a quick test for Python3 integration tests on Postgres, you can run:
|
||||
|
||||
There are a few methods for running tests locally.
|
||||
|
||||
#### Makefile
|
||||
|
||||
There are multiple targets in the Makefile to run common test suites and code
|
||||
checks, most notably:
|
||||
|
||||
```sh
|
||||
# Runs unit tests with py38 and code checks in parallel.
|
||||
make test
|
||||
# Runs postgres integration tests with py38 in "fail fast" mode.
|
||||
make integration
|
||||
```
|
||||
> These make targets assume you have a local installation of a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) for unit/integration testing and pre-commit for code quality checks,
|
||||
> unless you use choose a Docker container to run tests. Run `make help` for more info.
|
||||
|
||||
Check out the other targets in the Makefile to see other commonly used test
|
||||
suites.
|
||||
|
||||
#### `pre-commit`
|
||||
[`pre-commit`](https://pre-commit.com) takes care of running all code-checks for formatting and linting. Run `make dev` to install `pre-commit` in your local environment (we recommend running this command with a python virtual environment active). This command installs several pip executables including black, mypy, and flake8. Once this is done you can use any of the linter-based make targets as well as a git pre-commit hook that will ensure proper formatting and linting.
|
||||
|
||||
#### `tox`
|
||||
|
||||
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`.
|
||||
|
||||
#### `pytest`
|
||||
|
||||
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. With a virtualenv active and dev dependencies installed you can do things like:
|
||||
|
||||
```sh
|
||||
# run all unit tests in a file
|
||||
python3 -m pytest test/unit/test_graph.py
|
||||
# run a specific unit test
|
||||
python3 -m pytest test/unit/test_graph.py::GraphTest::test__dependency_list
|
||||
# run specific Postgres integration tests (old way)
|
||||
python3 -m pytest -m profile_postgres test/integration/074_postgres_unlogged_table_tests
|
||||
# run specific Postgres integration tests (new way)
|
||||
python3 -m pytest tests/functional/sources
|
||||
make test-quick
|
||||
```
|
||||
|
||||
> See [pytest usage docs](https://docs.pytest.org/en/6.2.x/usage.html) for an overview of useful command-line options.
|
||||
To run tests for a specific database, invoke `tox` directly with the required flags:
|
||||
```
|
||||
# Run Postgres py36 tests
|
||||
docker-compose run test tox -e integration-postgres-py36 -- -x
|
||||
|
||||
### Unit, Integration, Functional?
|
||||
# Run Snowflake py36 tests
|
||||
docker-compose run test tox -e integration-snowflake-py36 -- -x
|
||||
|
||||
Here are some general rules for adding tests:
|
||||
* unit tests (`test/unit` & `tests/unit`) don’t need to access a database; "pure Python" tests should be written as unit tests
|
||||
* functional tests (`test/integration` & `tests/functional`) cover anything that interacts with a database, namely adapter
|
||||
* *everything in* `test/*` *is being steadily migrated to* `tests/*`
|
||||
# Run BigQuery py36 tests
|
||||
docker-compose run test tox -e integration-bigquery-py36 -- -x
|
||||
|
||||
## Debugging
|
||||
|
||||
1. The logs for a `dbt run` have stack traces and other information for debugging errors (in `logs/dbt.log` in your project directory).
|
||||
2. Try using a debugger, like `ipdb`. For pytest: `--pdb --pdbcls=IPython.terminal.debugger:pdb`
|
||||
3. Sometimes, it’s easier to debug on a single thread: `dbt --single-threaded run`
|
||||
4. To make print statements from Jinja macros: `{{ log(msg, info=true) }}`
|
||||
5. You can also add `{{ debug() }}` statements, which will drop you into some auto-generated code that the macro wrote.
|
||||
6. The dbt “artifacts” are written out to the ‘target’ directory of your dbt project. They are in unformatted json, which can be hard to read. Format them with:
|
||||
> python -m json.tool target/run_results.json > run_results.json
|
||||
|
||||
### Assorted development tips
|
||||
* Append `# type: ignore` to the end of a line if you need to disable `mypy` on that line.
|
||||
* Sometimes flake8 complains about lines that are actually fine, in which case you can put a comment on the line such as: # noqa or # noqa: ANNN, where ANNN is the error code that flake8 issues.
|
||||
* To collect output for `CProfile`, run dbt with the `-r` option and the name of an output file, i.e. `dbt -r dbt.cprof run`. If you just want to profile parsing, you can do: `dbt -r dbt.cprof parse`. `pip` install `snakeviz` to view the output. Run `snakeviz dbt.cprof` and output will be rendered in a browser window.
|
||||
|
||||
## Adding or modifying a CHANGELOG Entry
|
||||
|
||||
We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost.
|
||||
|
||||
Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system.
|
||||
|
||||
Once changie is installed and your PR is created for a new feature, simply run the following command and changie will walk you through the process of creating a changelog entry:
|
||||
|
||||
```shell
|
||||
changie new
|
||||
# Run Redshift py36 tests
|
||||
docker-compose run test tox -e integration-redshift-py36 -- -x
|
||||
```
|
||||
|
||||
Commit the file that's created and your changelog entry is complete!
|
||||
To run a specific test by itself:
|
||||
```
|
||||
docker-compose run test tox -e explicit-py36 -- -s -x -m profile_{adapter} {path_to_test_file_or_folder}
|
||||
```
|
||||
E.g.
|
||||
```
|
||||
docker-compose run test tox -e explicit-py36 -- -s -x -m profile_snowflake test/integration/001_simple_copy_test
|
||||
```
|
||||
|
||||
If you are contributing to a feature already in progress, you will modify the changie yaml file in dbt/.changes/unreleased/ related to your change. If you need help finding this file, please ask within the discussion for the pull request!
|
||||
|
||||
You don't need to worry about which `dbt-core` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-core`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-core`.
|
||||
See the `Makefile` contents for more some other examples of ways to run `tox`.
|
||||
|
||||
## Submitting a Pull Request
|
||||
|
||||
Code can be merged into the current development branch `main` by opening a pull request. A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
Fishtown Analytics provides a sandboxed Redshift, Snowflake, and BigQuery database for use in a CI environment. When pull requests are submitted to the `fishtown-analytics/dbt` repo, GitHub will trigger automated tests in CircleCI and Azure Pipelines.
|
||||
|
||||
Automated tests run via GitHub Actions. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-core` repository trigger integration tests against Postgres. dbt Labs also provides CI environments in which to test changes to other adapters, triggered by PRs in those adapters' repositories, as well as periodic maintenance checks of each adapter in concert with the latest `dbt-core` code changes.
|
||||
A dbt maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
|
||||
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
Sometimes, the content license agreement auto-check bot doesn't find a user's entry in its roster. If you need to force a rerun, add `@cla-bot check` in a comment on the pull request.
|
||||
Once all tests are passing and your PR has been approved, a dbt maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
@@ -1,16 +1,8 @@
|
||||
##
|
||||
# This dockerfile is used for local development and adapter testing only.
|
||||
# See `/docker` for a generic and production-ready docker file
|
||||
##
|
||||
|
||||
FROM ubuntu:22.04
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
software-properties-common \
|
||||
&& add-apt-repository ppa:git-core/ppa -y \
|
||||
&& apt-get dist-upgrade -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
netcat \
|
||||
@@ -32,7 +24,7 @@ RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
python \
|
||||
python-dev \
|
||||
python3-pip \
|
||||
python-pip \
|
||||
python3.6 \
|
||||
python3.6-dev \
|
||||
python3-pip \
|
||||
@@ -46,12 +38,6 @@ RUN apt-get update \
|
||||
python3.9 \
|
||||
python3.9-dev \
|
||||
python3.9-venv \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3.10-venv \
|
||||
python3.11 \
|
||||
python3.11-dev \
|
||||
python3.11-venv \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
@@ -60,7 +46,9 @@ RUN curl -LO https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_V
|
||||
&& tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
|
||||
&& rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
|
||||
|
||||
RUN pip3 install -U tox wheel six setuptools
|
||||
RUN pip3 install -U "tox==3.14.4" wheel "six>=1.14.0,<1.15.0" "virtualenv==20.0.3" setuptools
|
||||
# tox fails if the 'python' interpreter (python2) doesn't have `tox` installed
|
||||
RUN pip install -U "tox==3.14.4" "six>=1.14.0,<1.15.0" "virtualenv==20.0.3" setuptools
|
||||
|
||||
# These args are passed in via docker-compose, which reads then from the .env file.
|
||||
# On Linux, run `make .env` to create the .env file for the current user.
|
||||
|
||||
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2021 dbt Labs, Inc.
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
142
Makefile
142
Makefile
@@ -1,94 +1,24 @@
|
||||
.DEFAULT_GOAL:=help
|
||||
.PHONY: install test test-unit test-integration
|
||||
|
||||
# Optional flag to run target in a docker container.
|
||||
# (example `make test USE_DOCKER=true`)
|
||||
ifeq ($(USE_DOCKER),true)
|
||||
DOCKER_CMD := docker-compose run --rm test
|
||||
endif
|
||||
test: .env
|
||||
@echo "Full test run starting..."
|
||||
@time docker-compose run --rm test tox
|
||||
|
||||
LOGS_DIR := ./logs
|
||||
test-unit: .env
|
||||
@echo "Unit test run starting..."
|
||||
@time docker-compose run --rm test tox -e unit-py36,flake8
|
||||
|
||||
# Optional flag to invoke tests using our CI env.
|
||||
# But we always want these active for structured
|
||||
# log testing.
|
||||
CI_FLAGS =\
|
||||
DBT_TEST_USER_1=dbt_test_user_1\
|
||||
DBT_TEST_USER_2=dbt_test_user_2\
|
||||
DBT_TEST_USER_3=dbt_test_user_3\
|
||||
RUSTFLAGS="-D warnings"\
|
||||
LOG_DIR=./logs\
|
||||
DBT_LOG_FORMAT=json
|
||||
test-integration: .env
|
||||
@echo "Integration test run starting..."
|
||||
@time docker-compose run --rm test tox -e integration-postgres-py36,integration-redshift-py36,integration-snowflake-py36,integration-bigquery-py36
|
||||
|
||||
.PHONY: dev_req
|
||||
dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
|
||||
@\
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
|
||||
.PHONY: dev
|
||||
dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
|
||||
@\
|
||||
pre-commit install
|
||||
|
||||
.PHONY: mypy
|
||||
mypy: .env ## Runs mypy against staged changes for static type checking.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual mypy-check | grep -v "INFO"
|
||||
|
||||
.PHONY: flake8
|
||||
flake8: .env ## Runs flake8 against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual flake8-check | grep -v "INFO"
|
||||
|
||||
.PHONY: black
|
||||
black: .env ## Runs black against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual black-check -v | grep -v "INFO"
|
||||
|
||||
.PHONY: lint
|
||||
lint: .env ## Runs flake8 and mypy code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: unit
|
||||
unit: .env ## Runs unit tests with py
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py
|
||||
|
||||
.PHONY: test
|
||||
test: .env ## Runs unit tests with py and code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py; \
|
||||
$(DOCKER_CMD) pre-commit run black-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: integration
|
||||
integration: .env ## Runs postgres integration tests with py-integration
|
||||
@\
|
||||
$(if $(USE_CI_FLAGS), $(CI_FLAGS)) $(DOCKER_CMD) tox -e py-integration -- -nauto
|
||||
|
||||
.PHONY: integration-fail-fast
|
||||
integration-fail-fast: .env ## Runs postgres integration tests with py-integration in "fail fast" mode.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py-integration -- -x -nauto
|
||||
|
||||
.PHONY: interop
|
||||
interop: clean
|
||||
@\
|
||||
mkdir $(LOGS_DIR) && \
|
||||
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto && \
|
||||
LOG_DIR=$(LOGS_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
|
||||
|
||||
.PHONY: setup-db
|
||||
setup-db: ## Setup Postgres database with docker-compose for system testing.
|
||||
@\
|
||||
docker-compose up -d database && \
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
test-quick: .env
|
||||
@echo "Integration test run starting, will exit on first failure..."
|
||||
@time docker-compose run --rm test tox -e integration-postgres-py36 -- -x
|
||||
|
||||
# This rule creates a file named .env that is used by docker-compose for passing
|
||||
# the USER_ID and GROUP_ID arguments to the Docker image.
|
||||
.env: ## Setup step for using using docker-compose with make target.
|
||||
.env:
|
||||
@touch .env
|
||||
ifneq ($(OS),Windows_NT)
|
||||
ifneq ($(shell uname -s), Darwin)
|
||||
@@ -96,33 +26,19 @@ ifneq ($(shell uname -s), Darwin)
|
||||
@echo GROUP_ID=$(shell id -g) >> .env
|
||||
endif
|
||||
endif
|
||||
@time docker-compose build
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Resets development environment.
|
||||
@echo 'cleaning repo...'
|
||||
@rm -f .coverage
|
||||
@rm -f .coverage.*
|
||||
@rm -rf .eggs/
|
||||
@rm -f .env
|
||||
@rm -rf .tox/
|
||||
@rm -rf build/
|
||||
@rm -rf dbt.egg-info/
|
||||
@rm -f dbt_project.yml
|
||||
@rm -rf dist/
|
||||
@rm -f htmlcov/*.{css,html,js,json,png}
|
||||
@rm -rf logs/
|
||||
@rm -rf target/
|
||||
@find . -type f -name '*.pyc' -delete
|
||||
@find . -type d -name '__pycache__' -depth -delete
|
||||
@echo 'done.'
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help message.
|
||||
@echo 'usage: make [target] [USE_DOCKER=true]'
|
||||
@echo
|
||||
@echo 'targets:'
|
||||
@grep -E '^[8+a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
@echo
|
||||
@echo 'options:'
|
||||
@echo 'use USE_DOCKER=true to run target in a docker container'
|
||||
clean:
|
||||
rm -f .coverage
|
||||
rm -rf .eggs/
|
||||
rm -f .env
|
||||
rm -rf .tox/
|
||||
rm -rf build/
|
||||
rm -rf dbt.egg-info/
|
||||
rm -f dbt_project.yml
|
||||
rm -rf dist/
|
||||
rm -f htmlcov/*.{css,html,js,json,png}
|
||||
rm -rf logs/
|
||||
rm -rf target/
|
||||
find . -type f -name '*.pyc' -delete
|
||||
find . -type d -name '__pycache__' -depth -delete
|
||||
|
||||
45
README.md
45
README.md
@@ -1,15 +1,28 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt-core/fa1ea14ddfb1d5ae319d5141844910dd53ab2834/etc/dbt-core.svg" alt="dbt logo" width="750"/>
|
||||
<img src="https://raw.githubusercontent.com/fishtown-analytics/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
<a href="https://codeclimate.com/github/fishtown-analytics/dbt">
|
||||
<img src="https://codeclimate.com/github/fishtown-analytics/dbt/badges/gpa.svg" alt="Code Climate"/>
|
||||
</a>
|
||||
<a href="https://circleci.com/gh/fishtown-analytics/dbt/tree/master">
|
||||
<img src="https://circleci.com/gh/fishtown-analytics/dbt/tree/master.svg?style=svg" alt="CircleCI" />
|
||||
</a>
|
||||
<a href="https://ci.appveyor.com/project/DrewBanin/dbt/branch/development">
|
||||
<img src="https://ci.appveyor.com/api/projects/status/v01rwd3q91jnwp9m/branch/development?svg=true" alt="AppVeyor" />
|
||||
</a>
|
||||
<a href="https://community.getdbt.com">
|
||||
<img src="https://community.getdbt.com/badge.svg" alt="Slack" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
**[dbt](https://www.getdbt.com/)** (data build tool) enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||

|
||||
dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
|
||||
|
||||

|
||||
|
||||
dbt can be used to [aggregate pageviews into sessions](https://github.com/fishtown-analytics/snowplow), calculate [ad spend ROI](https://github.com/fishtown-analytics/facebook-ads), or report on [email campaign performance](https://github.com/fishtown-analytics/mailchimp).
|
||||
|
||||
## Understanding dbt
|
||||
|
||||
@@ -17,22 +30,28 @@ Analysts using dbt can transform their data by simply writing select statements,
|
||||
|
||||
These select statements, or "models", form a dbt project. Models frequently build on top of one another – dbt makes it easy to [manage relationships](https://docs.getdbt.com/docs/ref) between models, and [visualize these relationships](https://docs.getdbt.com/docs/documentation), as well as assure the quality of your transformations through [testing](https://docs.getdbt.com/docs/testing).
|
||||
|
||||

|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
- [Install dbt](https://docs.getdbt.com/docs/installation)
|
||||
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
|
||||
- [Install dbt](https://docs.getdbt.com/docs/installation)
|
||||
- Read the [documentation](https://docs.getdbt.com/).
|
||||
- Productionize your dbt project with [dbt Cloud](https://www.getdbt.com)
|
||||
|
||||
## Join the dbt Community
|
||||
## Find out more
|
||||
|
||||
- Be part of the conversation in the [dbt Community Slack](http://community.getdbt.com/)
|
||||
- Read more on the [dbt Community Discourse](https://discourse.getdbt.com)
|
||||
- Check out the [Introduction to dbt](https://docs.getdbt.com/docs/introduction/).
|
||||
- Read the [dbt Viewpoint](https://docs.getdbt.com/docs/about/viewpoint/).
|
||||
|
||||
## Join thousands of analysts in the dbt community
|
||||
|
||||
- Join the [chat](http://community.getdbt.com/) on Slack.
|
||||
- Find community posts on [dbt Discourse](https://discourse.getdbt.com).
|
||||
|
||||
## Reporting bugs and contributing code
|
||||
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-core/issues/new)
|
||||
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md)
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/fishtown-analytics/dbt/issues/new).
|
||||
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](https://github.com/fishtown-analytics/dbt/blob/HEAD/CONTRIBUTING.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
154
azure-pipelines.yml
Normal file
154
azure-pipelines.yml
Normal file
@@ -0,0 +1,154 @@
|
||||
# Python package
|
||||
# Create and test a Python package on multiple Python versions.
|
||||
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- master
|
||||
- dev/*
|
||||
- pr/*
|
||||
|
||||
jobs:
|
||||
- job: UnitTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-unit
|
||||
displayName: Run unit tests
|
||||
|
||||
- job: PostgresIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: UnitTest
|
||||
|
||||
steps:
|
||||
- pwsh: |
|
||||
$serviceName = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $serviceName -StartupType Automatic
|
||||
Start-Service -InputObject $serviceName
|
||||
|
||||
& $env:PGBIN\createdb.exe -U postgres dbt
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE root WITH PASSWORD 'password';"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE root WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CREATE, CONNECT ON DATABASE dbt TO root WITH GRANT OPTION;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE noaccess WITH PASSWORD 'password' NOSUPERUSER;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE noaccess WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CONNECT ON DATABASE dbt TO noaccess;"
|
||||
displayName: Install postgresql and set up database
|
||||
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-postgres
|
||||
displayName: Run integration tests
|
||||
|
||||
# These three are all similar except secure environment variables, which MUST be passed along to their tasks,
|
||||
# but there's probably a better way to do this!
|
||||
- job: SnowflakeIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: PostgresIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-snowflake
|
||||
env:
|
||||
SNOWFLAKE_TEST_ACCOUNT: $(SNOWFLAKE_TEST_ACCOUNT)
|
||||
SNOWFLAKE_TEST_PASSWORD: $(SNOWFLAKE_TEST_PASSWORD)
|
||||
SNOWFLAKE_TEST_USER: $(SNOWFLAKE_TEST_USER)
|
||||
SNOWFLAKE_TEST_WAREHOUSE: $(SNOWFLAKE_TEST_WAREHOUSE)
|
||||
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: $(SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN)
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: $(SNOWFLAKE_TEST_OAUTH_CLIENT_ID)
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: $(SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: BigQueryIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: PostgresIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
- script: python -m tox -e pywin-bigquery
|
||||
env:
|
||||
BIGQUERY_SERVICE_ACCOUNT_JSON: $(BIGQUERY_SERVICE_ACCOUNT_JSON)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: RedshiftIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: PostgresIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e pywin-redshift
|
||||
env:
|
||||
REDSHIFT_TEST_DBNAME: $(REDSHIFT_TEST_DBNAME)
|
||||
REDSHIFT_TEST_PASS: $(REDSHIFT_TEST_PASS)
|
||||
REDSHIFT_TEST_USER: $(REDSHIFT_TEST_USER)
|
||||
REDSHIFT_TEST_PORT: $(REDSHIFT_TEST_PORT)
|
||||
REDSHIFT_TEST_HOST: $(REDSHIFT_TEST_HOST)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: BuildWheel
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn:
|
||||
- UnitTest
|
||||
- PostgresIntegrationTest
|
||||
- RedshiftIntegrationTest
|
||||
- SnowflakeIntegrationTest
|
||||
- BigQueryIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
- script: python -m pip install --upgrade pip setuptools && python -m pip install -r requirements.txt && python -m pip install -r requirements-dev.txt
|
||||
displayName: Install dependencies
|
||||
- task: ShellScript@2
|
||||
inputs:
|
||||
scriptPath: scripts/build-wheels.sh
|
||||
- task: CopyFiles@2
|
||||
inputs:
|
||||
contents: 'dist\?(*.whl|*.tar.gz)'
|
||||
TargetFolder: '$(Build.ArtifactStagingDirectory)'
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
|
||||
artifactName: dists
|
||||
75
converter.py
Executable file
75
converter.py
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
import json
|
||||
import yaml
|
||||
import sys
|
||||
import argparse
|
||||
from datetime import datetime, timezone
|
||||
import dbt.clients.registry as registry
|
||||
|
||||
|
||||
def yaml_type(fname):
|
||||
with open(fname) as f:
|
||||
return yaml.load(f)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--project", type=yaml_type, default="dbt_project.yml")
|
||||
parser.add_argument("--namespace", required=True)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_full_name(args):
|
||||
return "{}/{}".format(args.namespace, args.project["name"])
|
||||
|
||||
|
||||
def init_project_in_packages(args, packages):
|
||||
full_name = get_full_name(args)
|
||||
if full_name not in packages:
|
||||
packages[full_name] = {
|
||||
"name": args.project["name"],
|
||||
"namespace": args.namespace,
|
||||
"latest": args.project["version"],
|
||||
"assets": {},
|
||||
"versions": {},
|
||||
}
|
||||
return packages[full_name]
|
||||
|
||||
|
||||
def add_version_to_package(args, project_json):
|
||||
project_json["versions"][args.project["version"]] = {
|
||||
"id": "{}/{}".format(get_full_name(args), args.project["version"]),
|
||||
"name": args.project["name"],
|
||||
"version": args.project["version"],
|
||||
"description": "",
|
||||
"published_at": datetime.now(timezone.utc).astimezone().isoformat(),
|
||||
"packages": args.project.get("packages") or [],
|
||||
"works_with": [],
|
||||
"_source": {
|
||||
"type": "github",
|
||||
"url": "",
|
||||
"readme": "",
|
||||
},
|
||||
"downloads": {
|
||||
"tarball": "",
|
||||
"format": "tgz",
|
||||
"sha1": "",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
packages = registry.packages()
|
||||
project_json = init_project_in_packages(args, packages)
|
||||
if args.project["version"] in project_json["versions"]:
|
||||
raise Exception(
|
||||
"Version {} already in packages JSON".format(args.project["version"]),
|
||||
file=sys.stderr,
|
||||
)
|
||||
add_version_to_package(args, project_json)
|
||||
print(json.dumps(packages, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,2 +1 @@
|
||||
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore
|
||||
include dbt/py.typed
|
||||
recursive-include dbt/include *.py *.sql *.yml *.html *.md
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt-core/fa1ea14ddfb1d5ae319d5141844910dd53ab2834/etc/dbt-core.svg" alt="dbt logo" width="750"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||

|
||||
|
||||
## Understanding dbt
|
||||
|
||||
Analysts using dbt can transform their data by simply writing select statements, while dbt handles turning these statements into tables and views in a data warehouse.
|
||||
|
||||
These select statements, or "models", form a dbt project. Models frequently build on top of one another – dbt makes it easy to [manage relationships](https://docs.getdbt.com/docs/ref) between models, and [visualize these relationships](https://docs.getdbt.com/docs/documentation), as well as assure the quality of your transformations through [testing](https://docs.getdbt.com/docs/testing).
|
||||
|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
- [Install dbt](https://docs.getdbt.com/docs/installation)
|
||||
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
|
||||
|
||||
## Join the dbt Community
|
||||
|
||||
- Be part of the conversation in the [dbt Community Slack](http://community.getdbt.com/)
|
||||
- Read more on the [dbt Community Discourse](https://discourse.getdbt.com)
|
||||
|
||||
## Reporting bugs and contributing code
|
||||
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-core/issues/new)
|
||||
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Everyone interacting in the dbt project's codebases, issue trackers, chat rooms, and mailing lists is expected to follow the [dbt Code of Conduct](https://community.getdbt.com/code-of-conduct).
|
||||
@@ -1,60 +0,0 @@
|
||||
# core/dbt directory README
|
||||
|
||||
## The following are individual files in this directory.
|
||||
|
||||
### compilation.py
|
||||
|
||||
### constants.py
|
||||
|
||||
### dataclass_schema.py
|
||||
|
||||
### deprecations.py
|
||||
|
||||
### exceptions.py
|
||||
|
||||
### flags.py
|
||||
|
||||
### helper_types.py
|
||||
|
||||
### hooks.py
|
||||
|
||||
### lib.py
|
||||
|
||||
### links.py
|
||||
|
||||
### logger.py
|
||||
|
||||
### main.py
|
||||
|
||||
### node_types.py
|
||||
|
||||
### profiler.py
|
||||
|
||||
### selected_resources.py
|
||||
|
||||
### semver.py
|
||||
|
||||
### tracking.py
|
||||
|
||||
### ui.py
|
||||
|
||||
### utils.py
|
||||
|
||||
### version.py
|
||||
|
||||
|
||||
## The subdirectories will be documented in a README in the subdirectory
|
||||
* adapters
|
||||
* cli
|
||||
* clients
|
||||
* config
|
||||
* context
|
||||
* contracts
|
||||
* deps
|
||||
* docs
|
||||
* events
|
||||
* graph
|
||||
* include
|
||||
* parser
|
||||
* task
|
||||
* tests
|
||||
@@ -1,7 +0,0 @@
|
||||
# N.B.
|
||||
# This will add to the package’s __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
|
||||
# The matching statement is in plugins/postgres/dbt/__init__.py
|
||||
|
||||
from pkgutil import extend_path
|
||||
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
@@ -1,30 +0,0 @@
|
||||
# Adapters README
|
||||
|
||||
The Adapters module is responsible for defining database connection methods, caching information from databases, how relations are defined, and the two major connection types we have - base and sql.
|
||||
|
||||
# Directories
|
||||
|
||||
## `base`
|
||||
|
||||
Defines the base implementation Adapters can use to build out full functionality.
|
||||
|
||||
## `sql`
|
||||
|
||||
Defines a sql implementation for adapters that initially inherits the above base implementation and comes with some premade methods and macros that can be overwritten as needed per adapter. (most common type of adapter.)
|
||||
|
||||
# Files
|
||||
|
||||
## `cache.py`
|
||||
|
||||
Cached information from the database.
|
||||
|
||||
## `factory.py`
|
||||
Defines how we generate adapter objects
|
||||
|
||||
## `protocol.py`
|
||||
|
||||
Defines various interfaces for various adapter objects. Helps mypy correctly resolve methods.
|
||||
|
||||
## `reference_keys.py`
|
||||
|
||||
Configures naming scheme for cache elements to be universal.
|
||||
@@ -1,7 +0,0 @@
|
||||
# N.B.
|
||||
# This will add to the package’s __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
|
||||
# The matching statement is in plugins/postgres/dbt/adapters/__init__.py
|
||||
|
||||
from pkgutil import extend_path
|
||||
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
@@ -1,10 +0,0 @@
|
||||
|
||||
## Base adapters
|
||||
|
||||
### impl.py
|
||||
|
||||
The class `SQLAdapter` in [base/imply.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/adapters/base/impl.py) is a (mostly) abstract object that adapter objects inherit from. The base class scaffolds out methods that every adapter project usually should implement for smooth communication between dbt and database.
|
||||
|
||||
Some target databases require more or fewer methods--it all depends on what the warehouse's featureset is.
|
||||
|
||||
Look into the class for function-level comments.
|
||||
@@ -10,5 +10,5 @@ from dbt.adapters.base.relation import ( # noqa
|
||||
SchemaSearchMap,
|
||||
)
|
||||
from dbt.adapters.base.column import Column # noqa
|
||||
from dbt.adapters.base.impl import AdapterConfig, BaseAdapter, PythonJobHelper # noqa
|
||||
from dbt.adapters.base.impl import AdapterConfig, BaseAdapter # noqa
|
||||
from dbt.adapters.base.plugin import AdapterPlugin # noqa
|
||||
|
||||
@@ -2,7 +2,7 @@ from dataclasses import dataclass
|
||||
import re
|
||||
from typing import Dict, ClassVar, Any, Optional
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
from dbt.exceptions import RuntimeException
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -12,7 +12,6 @@ class Column:
|
||||
"TIMESTAMP": "TIMESTAMP",
|
||||
"FLOAT": "FLOAT",
|
||||
"INTEGER": "INT",
|
||||
"BOOLEAN": "BOOLEAN",
|
||||
}
|
||||
column: str
|
||||
dtype: str
|
||||
@@ -40,14 +39,21 @@ class Column:
|
||||
@property
|
||||
def data_type(self) -> str:
|
||||
if self.is_string():
|
||||
return self.string_type(self.string_size())
|
||||
return Column.string_type(self.string_size())
|
||||
elif self.is_numeric():
|
||||
return self.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
|
||||
return Column.numeric_type(
|
||||
self.dtype, self.numeric_precision, self.numeric_scale
|
||||
)
|
||||
else:
|
||||
return self.dtype
|
||||
|
||||
def is_string(self) -> bool:
|
||||
return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
|
||||
return self.dtype.lower() in [
|
||||
"text",
|
||||
"character varying",
|
||||
"character",
|
||||
"varchar",
|
||||
]
|
||||
|
||||
def is_number(self):
|
||||
return any([self.is_integer(), self.is_numeric(), self.is_float()])
|
||||
@@ -85,7 +91,7 @@ class Column:
|
||||
|
||||
def string_size(self) -> int:
|
||||
if not self.is_string():
|
||||
raise DbtRuntimeError("Called string_size() on non-string field!")
|
||||
raise RuntimeException("Called string_size() on non-string field!")
|
||||
|
||||
if self.dtype == "text" or self.char_size is None:
|
||||
# char_size should never be None. Handle it reasonably just in case
|
||||
@@ -124,7 +130,7 @@ class Column:
|
||||
def from_description(cls, name: str, raw_data_type: str) -> "Column":
|
||||
match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
|
||||
if match is None:
|
||||
raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"')
|
||||
raise RuntimeException(f'Could not interpret data type "{raw_data_type}"')
|
||||
data_type, size_info = match.groups()
|
||||
char_size = None
|
||||
numeric_precision = None
|
||||
@@ -137,7 +143,7 @@ class Column:
|
||||
try:
|
||||
char_size = int(parts[0])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[0]}" to an integer'
|
||||
)
|
||||
@@ -145,14 +151,14 @@ class Column:
|
||||
try:
|
||||
numeric_precision = int(parts[0])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[0]}" to an integer'
|
||||
)
|
||||
try:
|
||||
numeric_scale = int(parts[1])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[1]}" to an integer'
|
||||
)
|
||||
|
||||
@@ -1,25 +1,10 @@
|
||||
import abc
|
||||
import os
|
||||
from time import sleep
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
# multiprocessing.RLock is a function returning this type
|
||||
from multiprocessing.synchronize import RLock
|
||||
from threading import get_ident
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Tuple,
|
||||
Hashable,
|
||||
Optional,
|
||||
ContextManager,
|
||||
List,
|
||||
Type,
|
||||
Union,
|
||||
Iterable,
|
||||
Callable,
|
||||
)
|
||||
from typing import Dict, Tuple, Hashable, Optional, ContextManager, List, Union
|
||||
|
||||
import agate
|
||||
|
||||
@@ -36,24 +21,8 @@ from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.adapters.base.query_headers import (
|
||||
MacroQueryStringSetter,
|
||||
)
|
||||
from dbt.events import AdapterLogger
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
NewConnection,
|
||||
ConnectionReused,
|
||||
ConnectionLeftOpenInCleanup,
|
||||
ConnectionLeftOpen,
|
||||
ConnectionClosedInCleanup,
|
||||
ConnectionClosed,
|
||||
Rollback,
|
||||
RollbackFailed,
|
||||
)
|
||||
from dbt.events.contextvars import get_node_info
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt import flags
|
||||
from dbt.utils import cast_to_str
|
||||
|
||||
SleepTime = Union[int, float] # As taken by time.sleep.
|
||||
AdapterHandle = Any # Adapter connection handle objects can be any class.
|
||||
|
||||
|
||||
class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
@@ -91,13 +60,15 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
key = self.get_thread_identifier()
|
||||
with self.lock:
|
||||
if key not in self.thread_connections:
|
||||
raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections))
|
||||
raise dbt.exceptions.InvalidConnectionException(
|
||||
key, list(self.thread_connections)
|
||||
)
|
||||
return self.thread_connections[key]
|
||||
|
||||
def set_thread_connection(self, conn: Connection) -> None:
|
||||
key = self.get_thread_identifier()
|
||||
if key in self.thread_connections:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
raise dbt.exceptions.InternalException(
|
||||
"In set_thread_connection, existing connection exists for {}"
|
||||
)
|
||||
self.thread_connections[key] = conn
|
||||
@@ -137,148 +108,61 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
:return: A context manager that handles exceptions raised by the
|
||||
underlying database.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`exception_handler` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
def set_connection_name(self, name: Optional[str] = None) -> Connection:
|
||||
"""Called by 'acquire_connection' in BaseAdapter, which is called by
|
||||
'connection_named', called by 'connection_for(node)'.
|
||||
Creates a connection for this thread if one doesn't already
|
||||
exist, and will rename an existing connection."""
|
||||
conn_name: str
|
||||
if name is None:
|
||||
# if a name isn't specified, we'll re-use a single handle
|
||||
# named 'master'
|
||||
conn_name = "master"
|
||||
else:
|
||||
if not isinstance(name, str):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f"For connection name, got {name} - not a string!"
|
||||
)
|
||||
assert isinstance(name, str)
|
||||
conn_name = name
|
||||
|
||||
conn_name: str = "master" if name is None else name
|
||||
|
||||
# Get a connection for this thread
|
||||
conn = self.get_if_exists()
|
||||
|
||||
if conn and conn.name == conn_name and conn.state == "open":
|
||||
# Found a connection and nothing to do, so just return it
|
||||
return conn
|
||||
|
||||
if conn is None:
|
||||
# Create a new connection
|
||||
conn = Connection(
|
||||
type=Identifier(self.TYPE),
|
||||
name=conn_name,
|
||||
name=None,
|
||||
state=ConnectionState.INIT,
|
||||
transaction_open=False,
|
||||
handle=None,
|
||||
credentials=self.profile.credentials,
|
||||
)
|
||||
conn.handle = LazyHandle(self.open)
|
||||
# Add the connection to thread_connections for this thread
|
||||
self.set_thread_connection(conn)
|
||||
fire_event(
|
||||
NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
|
||||
)
|
||||
else: # existing connection either wasn't open or didn't have the right name
|
||||
if conn.state != "open":
|
||||
conn.handle = LazyHandle(self.open)
|
||||
if conn.name != conn_name:
|
||||
orig_conn_name: str = conn.name or ""
|
||||
conn.name = conn_name
|
||||
fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name))
|
||||
|
||||
return conn
|
||||
if conn.name == conn_name and conn.state == "open":
|
||||
return conn
|
||||
|
||||
@classmethod
|
||||
def retry_connection(
|
||||
cls,
|
||||
connection: Connection,
|
||||
connect: Callable[[], AdapterHandle],
|
||||
logger: AdapterLogger,
|
||||
retryable_exceptions: Iterable[Type[Exception]],
|
||||
retry_limit: int = 1,
|
||||
retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1,
|
||||
_attempts: int = 0,
|
||||
) -> Connection:
|
||||
"""Given a Connection, set its handle by calling connect.
|
||||
|
||||
The calls to connect will be retried up to retry_limit times to deal with transient
|
||||
connection errors. By default, one retry will be attempted if retryable_exceptions is set.
|
||||
|
||||
:param Connection connection: An instance of a Connection that needs a handle to be set,
|
||||
usually when attempting to open it.
|
||||
:param connect: A callable that returns the appropiate connection handle for a
|
||||
given adapter. This callable will be retried retry_limit times if a subclass of any
|
||||
Exception in retryable_exceptions is raised by connect.
|
||||
:type connect: Callable[[], AdapterHandle]
|
||||
:param AdapterLogger logger: A logger to emit messages on retry attempts or errors. When
|
||||
handling expected errors, we call debug, and call warning on unexpected errors or when
|
||||
all retry attempts have been exhausted.
|
||||
:param retryable_exceptions: An iterable of exception classes that if raised by
|
||||
connect should trigger a retry.
|
||||
:type retryable_exceptions: Iterable[Type[Exception]]
|
||||
:param int retry_limit: How many times to retry the call to connect. If this limit
|
||||
is exceeded before a successful call, a FailedToConnectError will be raised.
|
||||
Must be non-negative.
|
||||
:param retry_timeout: Time to wait between attempts to connect. Can also take a
|
||||
Callable that takes the number of attempts so far, beginning at 0, and returns an int
|
||||
or float to be passed to time.sleep.
|
||||
:type retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1
|
||||
:param int _attempts: Parameter used to keep track of the number of attempts in calling the
|
||||
connect function across recursive calls. Passed as an argument to retry_timeout if it
|
||||
is a Callable. This parameter should not be set by the initial caller.
|
||||
:raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without
|
||||
successfully acquiring a handle.
|
||||
:return: The given connection with its appropriate state and handle attributes set
|
||||
depending on whether we successfully acquired a handle or not.
|
||||
"""
|
||||
timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout
|
||||
if timeout < 0:
|
||||
raise dbt.exceptions.FailedToConnectError(
|
||||
"retry_timeout cannot be negative or return a negative time."
|
||||
)
|
||||
|
||||
if retry_limit < 0 or retry_limit > sys.getrecursionlimit():
|
||||
# This guard is not perfect others may add to the recursion limit (e.g. built-ins).
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative")
|
||||
|
||||
try:
|
||||
connection.handle = connect()
|
||||
connection.state = ConnectionState.OPEN
|
||||
return connection
|
||||
|
||||
except tuple(retryable_exceptions) as e:
|
||||
if retry_limit <= 0:
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError(str(e))
|
||||
logger.debug('Acquiring new {} connection "{}".'.format(self.TYPE, conn_name))
|
||||
|
||||
if conn.state == "open":
|
||||
logger.debug(
|
||||
f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n"
|
||||
f"{retry_limit} attempts remaining. Retrying in {timeout} seconds.\n"
|
||||
f"Error:\n{e}"
|
||||
"Re-using an available connection from the pool (formerly {}).".format(
|
||||
conn.name
|
||||
)
|
||||
)
|
||||
else:
|
||||
conn.handle = LazyHandle(self.open)
|
||||
|
||||
sleep(timeout)
|
||||
return cls.retry_connection(
|
||||
connection=connection,
|
||||
connect=connect,
|
||||
logger=logger,
|
||||
retry_limit=retry_limit - 1,
|
||||
retry_timeout=retry_timeout,
|
||||
retryable_exceptions=retryable_exceptions,
|
||||
_attempts=_attempts + 1,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError(str(e))
|
||||
conn.name = conn_name
|
||||
return conn
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel_open(self) -> Optional[List[str]]:
|
||||
"""Cancel all open connections on the adapter. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`cancel_open` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def open(cls, connection: Connection) -> Connection:
|
||||
"""Open the given connection on the adapter and return it.
|
||||
|
||||
@@ -288,7 +172,9 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
This should be thread-safe, or hold the lock if necessary. The given
|
||||
connection should not be in either in_use or available.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`open` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
def release(self) -> None:
|
||||
with self.lock:
|
||||
@@ -309,9 +195,13 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
with self.lock:
|
||||
for connection in self.thread_connections.values():
|
||||
if connection.state not in {"closed", "init"}:
|
||||
fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name)))
|
||||
logger.debug(
|
||||
"Connection '{}' was left open.".format(connection.name)
|
||||
)
|
||||
else:
|
||||
fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name)))
|
||||
logger.debug(
|
||||
"Connection '{}' was properly closed.".format(connection.name)
|
||||
)
|
||||
self.close(connection)
|
||||
|
||||
# garbage collect these connections
|
||||
@@ -320,12 +210,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def begin(self) -> None:
|
||||
"""Begin a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`begin` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def commit(self) -> None:
|
||||
"""Commit a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`commit` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _rollback_handle(cls, connection: Connection) -> None:
|
||||
@@ -333,52 +227,52 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
try:
|
||||
connection.handle.rollback()
|
||||
except Exception:
|
||||
fire_event(
|
||||
RollbackFailed(
|
||||
conn_name=cast_to_str(connection.name),
|
||||
exc_info=traceback.format_exc(),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
)
|
||||
logger.debug("Failed to rollback {}".format(connection.name), exc_info=True)
|
||||
|
||||
@classmethod
|
||||
def _close_handle(cls, connection: Connection) -> None:
|
||||
"""Perform the actual close operation."""
|
||||
# On windows, sometimes connection handles don't have a close() attr.
|
||||
if hasattr(connection.handle, "close"):
|
||||
fire_event(
|
||||
ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info())
|
||||
)
|
||||
logger.debug(f"On {connection.name}: Close")
|
||||
connection.handle.close()
|
||||
else:
|
||||
fire_event(
|
||||
ConnectionLeftOpen(
|
||||
conn_name=cast_to_str(connection.name), node_info=get_node_info()
|
||||
)
|
||||
)
|
||||
logger.debug(f"On {connection.name}: No close available on handle")
|
||||
|
||||
@classmethod
|
||||
def _rollback(cls, connection: Connection) -> None:
|
||||
"""Roll back the given connection."""
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f"In _rollback, got {connection} - not a Connection!"
|
||||
)
|
||||
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
raise dbt.exceptions.InternalException(
|
||||
f"Tried to rollback transaction on connection "
|
||||
f'"{connection.name}", but it does not have one open!'
|
||||
)
|
||||
|
||||
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
|
||||
logger.debug(f"On {connection.name}: ROLLBACK")
|
||||
cls._rollback_handle(connection)
|
||||
|
||||
connection.transaction_open = False
|
||||
|
||||
@classmethod
|
||||
def close(cls, connection: Connection) -> Connection:
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f"In close, got {connection} - not a Connection!"
|
||||
)
|
||||
|
||||
# if the connection is in closed or init, there's nothing to do
|
||||
if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}:
|
||||
return connection
|
||||
|
||||
if connection.transaction_open and connection.handle:
|
||||
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
|
||||
logger.debug("On {}: ROLLBACK".format(connection.name))
|
||||
cls._rollback_handle(connection)
|
||||
connection.transaction_open = False
|
||||
|
||||
@@ -401,14 +295,16 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
) -> Tuple[Union[str, AdapterResponse], agate.Table]:
|
||||
"""Execute the given SQL.
|
||||
|
||||
:param str sql: The sql to execute.
|
||||
:param bool auto_begin: If set, and dbt is not currently inside a
|
||||
transaction, automatically begin one.
|
||||
:param bool fetch: If set, fetch results.
|
||||
:return: A tuple of the query status and results (empty if fetch=False).
|
||||
:rtype: Tuple[AdapterResponse, agate.Table]
|
||||
:return: A tuple of the status and the results (empty if fetch=False).
|
||||
:rtype: Tuple[Union[str, AdapterResponse], agate.Table]
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`execute` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@@ -2,7 +2,6 @@ import abc
|
||||
from concurrent.futures import as_completed, Future
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
import time
|
||||
from itertools import chain
|
||||
from typing import (
|
||||
Optional,
|
||||
@@ -15,6 +14,7 @@ from typing import (
|
||||
List,
|
||||
Mapping,
|
||||
Iterator,
|
||||
Union,
|
||||
Set,
|
||||
)
|
||||
|
||||
@@ -22,39 +22,30 @@ import agate
|
||||
import pytz
|
||||
|
||||
from dbt.exceptions import (
|
||||
DbtInternalError,
|
||||
MacroArgTypeError,
|
||||
MacroResultError,
|
||||
QuoteConfigTypeError,
|
||||
NotImplementedError,
|
||||
NullRelationCacheAttemptedError,
|
||||
NullRelationDropAttemptedError,
|
||||
RelationReturnedMultipleResultsError,
|
||||
RenameToNoneAttemptedError,
|
||||
DbtRuntimeError,
|
||||
SnapshotTargetIncompleteError,
|
||||
SnapshotTargetNotSnapshotTableError,
|
||||
UnexpectedNullError,
|
||||
UnexpectedNonTimestampError,
|
||||
raise_database_error,
|
||||
raise_compiler_error,
|
||||
invalid_type_error,
|
||||
get_relation_returned_multiple_results,
|
||||
InternalException,
|
||||
NotImplementedException,
|
||||
RuntimeException,
|
||||
)
|
||||
from dbt import flags
|
||||
|
||||
from dbt import deprecations
|
||||
from dbt.adapters.protocol import (
|
||||
AdapterConfig,
|
||||
ConnectionManagerProtocol,
|
||||
)
|
||||
from dbt.clients.agate_helper import empty_table, merge_tables, table_from_rows
|
||||
from dbt.clients.jinja import MacroGenerator
|
||||
from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode
|
||||
from dbt.contracts.graph.manifest import Manifest, MacroManifest
|
||||
from dbt.contracts.graph.nodes import ResultNode
|
||||
from dbt.events.functions import fire_event, warn_or_error
|
||||
from dbt.events.types import (
|
||||
CacheMiss,
|
||||
ListRelations,
|
||||
CodeExecution,
|
||||
CodeExecutionStatus,
|
||||
CatalogGenerationError,
|
||||
)
|
||||
from dbt.utils import filter_null_values, executor, cast_to_str
|
||||
from dbt.contracts.graph.parsed import ParsedSeedNode
|
||||
from dbt.exceptions import warn_or_error
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt.utils import filter_null_values, executor
|
||||
|
||||
from dbt.adapters.base.connections import Connection, AdapterResponse
|
||||
from dbt.adapters.base.meta import AdapterMeta, available
|
||||
@@ -65,8 +56,10 @@ from dbt.adapters.base.relation import (
|
||||
SchemaSearchMap,
|
||||
)
|
||||
from dbt.adapters.base import Column as BaseColumn
|
||||
from dbt.adapters.base import Credentials
|
||||
from dbt.adapters.cache import RelationsCache, _make_ref_key_msg
|
||||
from dbt.adapters.cache import RelationsCache
|
||||
|
||||
|
||||
SeedModel = Union[ParsedSeedNode, CompiledSeedNode]
|
||||
|
||||
|
||||
GET_CATALOG_MACRO_NAME = "get_catalog"
|
||||
@@ -75,7 +68,7 @@ FRESHNESS_MACRO_NAME = "collect_freshness"
|
||||
|
||||
def _expect_row_value(key: str, row: agate.Row):
|
||||
if key not in row.keys():
|
||||
raise DbtInternalError(
|
||||
raise InternalException(
|
||||
'Got a row without "{}" column, columns: {}'.format(key, row.keys())
|
||||
)
|
||||
return row[key]
|
||||
@@ -104,10 +97,18 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet
|
||||
assume the datetime is already for UTC and add the timezone.
|
||||
"""
|
||||
if dt is None:
|
||||
raise UnexpectedNullError(field_name, source)
|
||||
raise raise_database_error(
|
||||
"Expected a non-null value when querying field '{}' of table "
|
||||
" {} but received value 'null' instead".format(field_name, source)
|
||||
)
|
||||
|
||||
elif not hasattr(dt, "tzinfo"):
|
||||
raise UnexpectedNonTimestampError(field_name, source, dt)
|
||||
raise raise_database_error(
|
||||
"Expected a timestamp value when querying field '{}' of table "
|
||||
"{} but received value of type '{}' instead".format(
|
||||
field_name, source, type(dt).__name__
|
||||
)
|
||||
)
|
||||
|
||||
elif dt.tzinfo:
|
||||
return dt.astimezone(pytz.UTC)
|
||||
@@ -122,35 +123,6 @@ def _relation_name(rel: Optional[BaseRelation]) -> str:
|
||||
return str(rel)
|
||||
|
||||
|
||||
def log_code_execution(code_execution_function):
|
||||
# decorator to log code and execution time
|
||||
if code_execution_function.__name__ != "submit_python_job":
|
||||
raise ValueError("this should be only used to log submit_python_job now")
|
||||
|
||||
def execution_with_log(*args):
|
||||
self = args[0]
|
||||
connection_name = self.connections.get_thread_connection().name
|
||||
fire_event(CodeExecution(conn_name=connection_name, code_content=args[2]))
|
||||
start_time = time.time()
|
||||
response = code_execution_function(*args)
|
||||
fire_event(
|
||||
CodeExecutionStatus(
|
||||
status=response._message, elapsed=round((time.time() - start_time), 2)
|
||||
)
|
||||
)
|
||||
return response
|
||||
|
||||
return execution_with_log
|
||||
|
||||
|
||||
class PythonJobHelper:
|
||||
def __init__(self, parsed_model: Dict, credential: Credentials) -> None:
|
||||
raise NotImplementedError("PythonJobHelper is not implemented yet")
|
||||
|
||||
def submit(self, compiled_code: str) -> Any:
|
||||
raise NotImplementedError("PythonJobHelper submit function is not implemented yet")
|
||||
|
||||
|
||||
class BaseAdapter(metaclass=AdapterMeta):
|
||||
"""The BaseAdapter provides an abstract base class for adapters.
|
||||
|
||||
@@ -160,15 +132,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
methods are marked with a (passable) in their docstrings. Check docstrings
|
||||
for type information, etc.
|
||||
|
||||
To implement a macro, implement "${adapter_type}__${macro_name}" in the
|
||||
To implement a macro, implement "${adapter_type}__${macro_name}". in the
|
||||
adapter's internal project.
|
||||
|
||||
To invoke a method in an adapter macro, call it on the 'adapter' Jinja
|
||||
object using dot syntax.
|
||||
|
||||
To invoke a method in model code, add the @available decorator atop a method
|
||||
declaration. Methods are invoked as macros.
|
||||
|
||||
Methods:
|
||||
- exception_handler
|
||||
- date_function
|
||||
@@ -189,7 +155,6 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
- convert_datetime_type
|
||||
- convert_date_type
|
||||
- convert_time_type
|
||||
- standardize_grants_dict
|
||||
|
||||
Macros:
|
||||
- get_catalog
|
||||
@@ -237,7 +202,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
return conn.name
|
||||
|
||||
@contextmanager
|
||||
def connection_named(self, name: str, node: Optional[ResultNode] = None) -> Iterator[None]:
|
||||
def connection_named(
|
||||
self, name: str, node: Optional[CompileResultNode] = None
|
||||
) -> Iterator[None]:
|
||||
try:
|
||||
if self.connections.query_header is not None:
|
||||
self.connections.query_header.set(name, node)
|
||||
@@ -249,14 +216,14 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
self.connections.query_header.reset()
|
||||
|
||||
@contextmanager
|
||||
def connection_for(self, node: ResultNode) -> Iterator[None]:
|
||||
def connection_for(self, node: CompileResultNode) -> Iterator[None]:
|
||||
with self.connection_named(node.unique_id, node):
|
||||
yield
|
||||
|
||||
@available.parse(lambda *a, **k: ("", empty_table()))
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
) -> Tuple[Union[str, AdapterResponse], agate.Table]:
|
||||
"""Execute the given SQL. This is a thin wrapper around
|
||||
ConnectionManager.execute.
|
||||
|
||||
@@ -264,8 +231,8 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param bool auto_begin: If set, and dbt is not currently inside a
|
||||
transaction, automatically begin one.
|
||||
:param bool fetch: If set, fetch results.
|
||||
:return: A tuple of the query status and results (empty if fetch=False).
|
||||
:rtype: Tuple[AdapterResponse, agate.Table]
|
||||
:return: A tuple of the status and the results (empty if fetch=False).
|
||||
:rtype: Tuple[Union[str, AdapterResponse], agate.Table]
|
||||
"""
|
||||
return self.connections.execute(sql=sql, auto_begin=auto_begin, fetch=fetch)
|
||||
|
||||
@@ -305,21 +272,16 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
"""
|
||||
return self._macro_manifest_lazy
|
||||
|
||||
def load_macro_manifest(self, base_macros_only=False) -> MacroManifest:
|
||||
# base_macros_only is for the test framework
|
||||
def load_macro_manifest(self) -> MacroManifest:
|
||||
if self._macro_manifest_lazy is None:
|
||||
# avoid a circular import
|
||||
from dbt.parser.manifest import ManifestLoader
|
||||
from dbt.parser.manifest import load_macro_manifest
|
||||
|
||||
manifest = ManifestLoader.load_macros(
|
||||
self.config,
|
||||
self.connections.set_query_header,
|
||||
base_macros_only=base_macros_only,
|
||||
manifest = load_macro_manifest(
|
||||
self.config, self.connections.set_query_header
|
||||
)
|
||||
# TODO CT-211
|
||||
self._macro_manifest_lazy = manifest # type: ignore[assignment]
|
||||
# TODO CT-211
|
||||
return self._macro_manifest_lazy # type: ignore[return-value]
|
||||
self._macro_manifest_lazy = manifest
|
||||
return self._macro_manifest_lazy
|
||||
|
||||
def clear_macro_manifest(self):
|
||||
if self._macro_manifest_lazy is not None:
|
||||
@@ -331,12 +293,12 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
def _schema_is_cached(self, database: Optional[str], schema: str) -> bool:
|
||||
"""Check if the schema is cached, and by default logs if it is not."""
|
||||
|
||||
if (database, schema) not in self.cache:
|
||||
fire_event(
|
||||
CacheMiss(
|
||||
conn_name=self.nice_connection_name(),
|
||||
database=cast_to_str(database),
|
||||
schema=schema,
|
||||
if flags.USE_CACHE is False:
|
||||
return False
|
||||
elif (database, schema) not in self.cache:
|
||||
logger.debug(
|
||||
'On "{}": cache miss for schema "{}.{}", this is inefficient'.format(
|
||||
self.nice_connection_name(), database, schema
|
||||
)
|
||||
)
|
||||
return False
|
||||
@@ -351,7 +313,10 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
return {
|
||||
self.Relation.create_from(self.config, node).without_identifier()
|
||||
for node in manifest.nodes.values()
|
||||
if (node.is_relational and not node.is_ephemeral_model)
|
||||
if (
|
||||
node.resource_type in NodeType.executable()
|
||||
and not node.is_ephemeral_model
|
||||
)
|
||||
}
|
||||
|
||||
def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap:
|
||||
@@ -364,12 +329,8 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
lowercase strings.
|
||||
"""
|
||||
info_schema_name_map = SchemaSearchMap()
|
||||
nodes: Iterator[ResultNode] = chain(
|
||||
[
|
||||
node
|
||||
for node in manifest.nodes.values()
|
||||
if (node.is_relational and not node.is_ephemeral_model)
|
||||
],
|
||||
nodes: Iterator[CompileResultNode] = chain(
|
||||
manifest.nodes.values(),
|
||||
manifest.sources.values(),
|
||||
)
|
||||
for node in nodes:
|
||||
@@ -381,14 +342,14 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
# databases
|
||||
return info_schema_name_map
|
||||
|
||||
def _relations_cache_for_schemas(
|
||||
self, manifest: Manifest, cache_schemas: Set[BaseRelation] = None
|
||||
) -> None:
|
||||
def _relations_cache_for_schemas(self, manifest: Manifest) -> None:
|
||||
"""Populate the relations cache for the given schemas. Returns an
|
||||
iterable of the schemas populated, as strings.
|
||||
"""
|
||||
if not cache_schemas:
|
||||
cache_schemas = self._get_cache_schemas(manifest)
|
||||
if not flags.USE_CACHE:
|
||||
return
|
||||
|
||||
cache_schemas = self._get_cache_schemas(manifest)
|
||||
with executor(self.config) as tpe:
|
||||
futures: List[Future[List[BaseRelation]]] = []
|
||||
for cache_schema in cache_schemas:
|
||||
@@ -414,27 +375,28 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
cache_update.add((relation.database, relation.schema))
|
||||
self.cache.update_schemas(cache_update)
|
||||
|
||||
def set_relations_cache(
|
||||
self,
|
||||
manifest: Manifest,
|
||||
clear: bool = False,
|
||||
required_schemas: Set[BaseRelation] = None,
|
||||
) -> None:
|
||||
def set_relations_cache(self, manifest: Manifest, clear: bool = False) -> None:
|
||||
"""Run a query that gets a populated cache of the relations in the
|
||||
database and set the cache on this adapter.
|
||||
"""
|
||||
if not flags.USE_CACHE:
|
||||
return
|
||||
|
||||
with self.cache.lock:
|
||||
if clear:
|
||||
self.cache.clear()
|
||||
self._relations_cache_for_schemas(manifest, required_schemas)
|
||||
self._relations_cache_for_schemas(manifest)
|
||||
|
||||
@available
|
||||
def cache_added(self, relation: Optional[BaseRelation]) -> str:
|
||||
"""Cache a new relation in dbt. It will show up in `list relations`."""
|
||||
if relation is None:
|
||||
name = self.nice_connection_name()
|
||||
raise NullRelationCacheAttemptedError(name)
|
||||
self.cache.add(relation)
|
||||
raise_compiler_error(
|
||||
"Attempted to cache a null relation for {}".format(name)
|
||||
)
|
||||
if flags.USE_CACHE:
|
||||
self.cache.add(relation)
|
||||
# so jinja doesn't render things
|
||||
return ""
|
||||
|
||||
@@ -445,8 +407,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
"""
|
||||
if relation is None:
|
||||
name = self.nice_connection_name()
|
||||
raise NullRelationDropAttemptedError(name)
|
||||
self.cache.drop(relation)
|
||||
raise_compiler_error(
|
||||
"Attempted to drop a null relation for {}".format(name)
|
||||
)
|
||||
if flags.USE_CACHE:
|
||||
self.cache.drop(relation)
|
||||
return ""
|
||||
|
||||
@available
|
||||
@@ -462,24 +427,29 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
name = self.nice_connection_name()
|
||||
src_name = _relation_name(from_relation)
|
||||
dst_name = _relation_name(to_relation)
|
||||
raise RenameToNoneAttemptedError(src_name, dst_name, name)
|
||||
raise_compiler_error(
|
||||
"Attempted to rename {} to {} for {}".format(src_name, dst_name, name)
|
||||
)
|
||||
|
||||
self.cache.rename(from_relation, to_relation)
|
||||
if flags.USE_CACHE:
|
||||
self.cache.rename(from_relation, to_relation)
|
||||
return ""
|
||||
|
||||
###
|
||||
# Abstract methods for database-specific values, attributes, and types
|
||||
###
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def date_function(cls) -> str:
|
||||
"""Get the date function used by this adapter's database."""
|
||||
raise NotImplementedError("`date_function` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`date_function` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def is_cancelable(cls) -> bool:
|
||||
raise NotImplementedError("`is_cancelable` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`is_cancelable` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
###
|
||||
# Abstract methods about schemas
|
||||
@@ -487,7 +457,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
@abc.abstractmethod
|
||||
def list_schemas(self, database: str) -> List[str]:
|
||||
"""Get a list of existing schemas in database"""
|
||||
raise NotImplementedError("`list_schemas` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`list_schemas` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@available.parse(lambda *a, **k: False)
|
||||
def check_schema_exists(self, database: str, schema: str) -> bool:
|
||||
@@ -510,28 +482,38 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
|
||||
*Implementors must call self.cache.drop() to preserve cache state!*
|
||||
"""
|
||||
raise NotImplementedError("`drop_relation` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`drop_relation` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
@available.parse_none
|
||||
def truncate_relation(self, relation: BaseRelation) -> None:
|
||||
"""Truncate the given relation."""
|
||||
raise NotImplementedError("`truncate_relation` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`truncate_relation` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
@available.parse_none
|
||||
def rename_relation(self, from_relation: BaseRelation, to_relation: BaseRelation) -> None:
|
||||
def rename_relation(
|
||||
self, from_relation: BaseRelation, to_relation: BaseRelation
|
||||
) -> None:
|
||||
"""Rename the relation from from_relation to to_relation.
|
||||
|
||||
Implementors must call self.cache.rename() to preserve cache state.
|
||||
"""
|
||||
raise NotImplementedError("`rename_relation` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`rename_relation` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
@available.parse_list
|
||||
def get_columns_in_relation(self, relation: BaseRelation) -> List[BaseColumn]:
|
||||
"""Get a list of the columns in the given Relation."""
|
||||
raise NotImplementedError("`get_columns_in_relation` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`get_columns_in_relation` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@available.deprecated("get_columns_in_relation", lambda *a, **k: [])
|
||||
def get_columns_in_table(self, schema: str, identifier: str) -> List[BaseColumn]:
|
||||
@@ -553,12 +535,14 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param self.Relation current: A relation that currently exists in the
|
||||
database with columns of unspecified types.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
raise NotImplementedException(
|
||||
"`expand_target_column_types` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[BaseRelation]:
|
||||
def list_relations_without_caching(
|
||||
self, schema_relation: BaseRelation
|
||||
) -> List[BaseRelation]:
|
||||
"""List relations in the given schema, bypassing the cache.
|
||||
|
||||
This is used as the underlying behavior to fill the cache.
|
||||
@@ -568,37 +552,10 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:return: The relations in schema
|
||||
:rtype: List[self.Relation]
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"`list_relations_without_caching` is not implemented for this adapter!"
|
||||
raise NotImplementedException(
|
||||
"`list_relations_without_caching` is not implemented for this " "adapter!"
|
||||
)
|
||||
|
||||
###
|
||||
# Methods about grants
|
||||
###
|
||||
@available
|
||||
def standardize_grants_dict(self, grants_table: agate.Table) -> dict:
|
||||
"""Translate the result of `show grants` (or equivalent) to match the
|
||||
grants which a user would configure in their project.
|
||||
|
||||
Ideally, the SQL to show grants should also be filtering:
|
||||
filter OUT any grants TO the current user/role (e.g. OWNERSHIP).
|
||||
If that's not possible in SQL, it can be done in this method instead.
|
||||
|
||||
:param grants_table: An agate table containing the query result of
|
||||
the SQL returned by get_show_grant_sql
|
||||
:return: A standardized dictionary matching the `grants` config
|
||||
:rtype: dict
|
||||
"""
|
||||
grants_dict: Dict[str, List[str]] = {}
|
||||
for row in grants_table:
|
||||
grantee = row["grantee"]
|
||||
privilege = row["privilege_type"]
|
||||
if privilege in grants_dict.keys():
|
||||
grants_dict[privilege].append(grantee)
|
||||
else:
|
||||
grants_dict.update({privilege: [grantee]})
|
||||
return grants_dict
|
||||
|
||||
###
|
||||
# Provided methods about relations
|
||||
###
|
||||
@@ -610,7 +567,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
to_relation.
|
||||
"""
|
||||
if not isinstance(from_relation, self.Relation):
|
||||
raise MacroArgTypeError(
|
||||
invalid_type_error(
|
||||
method_name="get_missing_columns",
|
||||
arg_name="from_relation",
|
||||
got_value=from_relation,
|
||||
@@ -618,20 +575,28 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
)
|
||||
|
||||
if not isinstance(to_relation, self.Relation):
|
||||
raise MacroArgTypeError(
|
||||
invalid_type_error(
|
||||
method_name="get_missing_columns",
|
||||
arg_name="to_relation",
|
||||
got_value=to_relation,
|
||||
expected_type=self.Relation,
|
||||
)
|
||||
|
||||
from_columns = {col.name: col for col in self.get_columns_in_relation(from_relation)}
|
||||
from_columns = {
|
||||
col.name: col for col in self.get_columns_in_relation(from_relation)
|
||||
}
|
||||
|
||||
to_columns = {col.name: col for col in self.get_columns_in_relation(to_relation)}
|
||||
to_columns = {
|
||||
col.name: col for col in self.get_columns_in_relation(to_relation)
|
||||
}
|
||||
|
||||
missing_columns = set(from_columns.keys()) - set(to_columns.keys())
|
||||
|
||||
return [col for (col_name, col) in from_columns.items() if col_name in missing_columns]
|
||||
return [
|
||||
col
|
||||
for (col_name, col) in from_columns.items()
|
||||
if col_name in missing_columns
|
||||
]
|
||||
|
||||
@available.parse_none
|
||||
def valid_snapshot_target(self, relation: BaseRelation) -> None:
|
||||
@@ -639,11 +604,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
expected columns.
|
||||
|
||||
:param Relation relation: The relation to check
|
||||
:raises InvalidMacroArgType: If the columns are
|
||||
:raises CompilationException: If the columns are
|
||||
incorrect.
|
||||
"""
|
||||
if not isinstance(relation, self.Relation):
|
||||
raise MacroArgTypeError(
|
||||
invalid_type_error(
|
||||
method_name="valid_snapshot_target",
|
||||
arg_name="relation",
|
||||
got_value=relation,
|
||||
@@ -664,16 +629,24 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
|
||||
if missing:
|
||||
if extra:
|
||||
raise SnapshotTargetIncompleteError(extra, missing)
|
||||
msg = (
|
||||
'Snapshot target has ("{}") but not ("{}") - is it an '
|
||||
"unmigrated previous version archive?".format(
|
||||
'", "'.join(extra), '", "'.join(missing)
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise SnapshotTargetNotSnapshotTableError(missing)
|
||||
msg = 'Snapshot target is not a snapshot table (missing "{}")'.format(
|
||||
'", "'.join(missing)
|
||||
)
|
||||
raise_compiler_error(msg)
|
||||
|
||||
@available.parse_none
|
||||
def expand_target_column_types(
|
||||
self, from_relation: BaseRelation, to_relation: BaseRelation
|
||||
) -> None:
|
||||
if not isinstance(from_relation, self.Relation):
|
||||
raise MacroArgTypeError(
|
||||
invalid_type_error(
|
||||
method_name="expand_target_column_types",
|
||||
arg_name="from_relation",
|
||||
got_value=from_relation,
|
||||
@@ -681,7 +654,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
)
|
||||
|
||||
if not isinstance(to_relation, self.Relation):
|
||||
raise MacroArgTypeError(
|
||||
invalid_type_error(
|
||||
method_name="expand_target_column_types",
|
||||
arg_name="to_relation",
|
||||
got_value=to_relation,
|
||||
@@ -690,7 +663,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
|
||||
self.expand_column_types(from_relation, to_relation)
|
||||
|
||||
def list_relations(self, database: Optional[str], schema: str) -> List[BaseRelation]:
|
||||
def list_relations(
|
||||
self, database: Optional[str], schema: str
|
||||
) -> List[BaseRelation]:
|
||||
if self._schema_is_cached(database, schema):
|
||||
return self.cache.get_relations(database, schema)
|
||||
|
||||
@@ -704,17 +679,17 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
# we can't build the relations cache because we don't have a
|
||||
# manifest so we can't run any operations.
|
||||
relations = self.list_relations_without_caching(schema_relation)
|
||||
fire_event(
|
||||
ListRelations(
|
||||
database=cast_to_str(database),
|
||||
schema=schema,
|
||||
relations=[_make_ref_key_msg(x) for x in relations],
|
||||
|
||||
logger.debug(
|
||||
"with database={}, schema={}, relations={}".format(
|
||||
database, schema, relations
|
||||
)
|
||||
)
|
||||
|
||||
return relations
|
||||
|
||||
def _make_match_kwargs(self, database: str, schema: str, identifier: str) -> Dict[str, str]:
|
||||
def _make_match_kwargs(
|
||||
self, database: str, schema: str, identifier: str
|
||||
) -> Dict[str, str]:
|
||||
quoting = self.config.quoting
|
||||
if identifier is not None and quoting["identifier"] is False:
|
||||
identifier = identifier.lower()
|
||||
@@ -752,7 +727,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
return matches
|
||||
|
||||
@available.parse_none
|
||||
def get_relation(self, database: str, schema: str, identifier: str) -> Optional[BaseRelation]:
|
||||
def get_relation(
|
||||
self, database: str, schema: str, identifier: str
|
||||
) -> Optional[BaseRelation]:
|
||||
relations_list = self.list_relations(database, schema)
|
||||
|
||||
matches = self._make_match(relations_list, database, schema, identifier)
|
||||
@@ -763,7 +740,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
"schema": schema,
|
||||
"database": database,
|
||||
}
|
||||
raise RelationReturnedMultipleResultsError(kwargs, matches)
|
||||
get_relation_returned_multiple_results(kwargs, matches)
|
||||
|
||||
elif matches:
|
||||
return matches[0]
|
||||
@@ -785,20 +762,23 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
@available.parse_none
|
||||
def create_schema(self, relation: BaseRelation):
|
||||
"""Create the given schema if it does not exist."""
|
||||
raise NotImplementedError("`create_schema` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`create_schema` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
@available.parse_none
|
||||
def drop_schema(self, relation: BaseRelation):
|
||||
"""Drop the given schema (and everything in it) if it exists."""
|
||||
raise NotImplementedError("`drop_schema` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`drop_schema` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@available
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def quote(cls, identifier: str) -> str:
|
||||
"""Quote the given identifier, as appropriate for the database."""
|
||||
raise NotImplementedError("`quote` is not implemented for this adapter!")
|
||||
raise NotImplementedException("`quote` is not implemented for this adapter!")
|
||||
|
||||
@available
|
||||
def quote_as_configured(self, identifier: str, quote_key: str) -> str:
|
||||
@@ -821,13 +801,17 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
|
||||
@available
|
||||
def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str:
|
||||
quote_columns: bool = True
|
||||
# this is the default for now
|
||||
quote_columns: bool = False
|
||||
if isinstance(quote_config, bool):
|
||||
quote_columns = quote_config
|
||||
elif quote_config is None:
|
||||
pass
|
||||
deprecations.warn("column-quoting-unset")
|
||||
else:
|
||||
raise QuoteConfigTypeError(quote_config)
|
||||
raise_compiler_error(
|
||||
f'The seed configuration value of "quote_columns" has an '
|
||||
f"invalid type {type(quote_config)}"
|
||||
)
|
||||
|
||||
if quote_columns:
|
||||
return self.quote(column)
|
||||
@@ -838,8 +822,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
# Conversions: These must be implemented by concrete implementations, for
|
||||
# converting agate types into their sql equivalents.
|
||||
###
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
"""Return the type in the database that best maps to the agate.Text
|
||||
type for the given agate table and column index.
|
||||
@@ -848,10 +831,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param col_idx: The index into the agate table for the column.
|
||||
:return: The name of the type in the database
|
||||
"""
|
||||
raise NotImplementedError("`convert_text_type` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`convert_text_type` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
"""Return the type in the database that best maps to the agate.Number
|
||||
type for the given agate table and column index.
|
||||
@@ -860,10 +844,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param col_idx: The index into the agate table for the column.
|
||||
:return: The name of the type in the database
|
||||
"""
|
||||
raise NotImplementedError("`convert_number_type` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`convert_number_type` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
"""Return the type in the database that best maps to the agate.Boolean
|
||||
type for the given agate table and column index.
|
||||
@@ -872,10 +857,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param col_idx: The index into the agate table for the column.
|
||||
:return: The name of the type in the database
|
||||
"""
|
||||
raise NotImplementedError("`convert_boolean_type` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`convert_boolean_type` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
"""Return the type in the database that best maps to the agate.DateTime
|
||||
type for the given agate table and column index.
|
||||
@@ -884,10 +870,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param col_idx: The index into the agate table for the column.
|
||||
:return: The name of the type in the database
|
||||
"""
|
||||
raise NotImplementedError("`convert_datetime_type` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`convert_datetime_type` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
"""Return the type in the database that best maps to the agate.Date
|
||||
type for the given agate table and column index.
|
||||
@@ -896,10 +883,11 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param col_idx: The index into the agate table for the column.
|
||||
:return: The name of the type in the database
|
||||
"""
|
||||
raise NotImplementedError("`convert_date_type` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`convert_date_type` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
@abc.abstractclassmethod
|
||||
def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
"""Return the type in the database that best maps to the
|
||||
agate.TimeDelta type for the given agate table and column index.
|
||||
@@ -908,7 +896,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
:param col_idx: The index into the agate table for the column.
|
||||
:return: The name of the type in the database
|
||||
"""
|
||||
raise NotImplementedError("`convert_time_type` is not implemented for this adapter!")
|
||||
raise NotImplementedException(
|
||||
"`convert_time_type` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@available
|
||||
@classmethod
|
||||
@@ -916,7 +906,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
return cls.convert_agate_type(agate_table, col_idx)
|
||||
|
||||
@classmethod
|
||||
def convert_agate_type(cls, agate_table: agate.Table, col_idx: int) -> Optional[str]:
|
||||
def convert_agate_type(
|
||||
cls, agate_table: agate.Table, col_idx: int
|
||||
) -> Optional[str]:
|
||||
agate_type: Type = agate_table.column_types[col_idx]
|
||||
conversions: List[Tuple[Type, Callable[..., str]]] = [
|
||||
(agate.Text, cls.convert_text_type),
|
||||
@@ -942,6 +934,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
project: Optional[str] = None,
|
||||
context_override: Optional[Dict[str, Any]] = None,
|
||||
kwargs: Dict[str, Any] = None,
|
||||
release: bool = False,
|
||||
text_only_columns: Optional[Iterable[str]] = None,
|
||||
) -> agate.Table:
|
||||
"""Look macro_name up in the manifest and execute its results.
|
||||
@@ -955,18 +948,19 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
execution context.
|
||||
:param kwargs: An optional dict of keyword args used to pass to the
|
||||
macro.
|
||||
:param release: Ignored.
|
||||
"""
|
||||
|
||||
if release is not False:
|
||||
deprecations.warn("execute-macro-release")
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
if context_override is None:
|
||||
context_override = {}
|
||||
|
||||
if manifest is None:
|
||||
# TODO CT-211
|
||||
manifest = self._macro_manifest # type: ignore[assignment]
|
||||
# TODO CT-211
|
||||
macro = manifest.find_macro_by_name( # type: ignore[union-attr]
|
||||
manifest = self._macro_manifest
|
||||
|
||||
macro = manifest.find_macro_by_name(
|
||||
macro_name, self.config.project_name, project
|
||||
)
|
||||
if macro is None:
|
||||
@@ -975,21 +969,17 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
else:
|
||||
package_name = 'the "{}" package'.format(project)
|
||||
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
'dbt could not find a macro with the name "{}" in {}'.format(
|
||||
macro_name, package_name
|
||||
)
|
||||
)
|
||||
# This causes a reference cycle, as generate_runtime_macro_context()
|
||||
# This causes a reference cycle, as generate_runtime_macro()
|
||||
# ends up calling get_adapter, so the import has to be here.
|
||||
from dbt.context.providers import generate_runtime_macro_context
|
||||
from dbt.context.providers import generate_runtime_macro
|
||||
|
||||
macro_context = generate_runtime_macro_context(
|
||||
# TODO CT-211
|
||||
macro=macro,
|
||||
config=self.config,
|
||||
manifest=manifest, # type: ignore[arg-type]
|
||||
package_name=project,
|
||||
macro_context = generate_runtime_macro(
|
||||
macro=macro, config=self.config, manifest=manifest, package_name=project
|
||||
)
|
||||
macro_context.update(context_override)
|
||||
|
||||
@@ -1000,7 +990,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def _catalog_filter_table(cls, table: agate.Table, manifest: Manifest) -> agate.Table:
|
||||
def _catalog_filter_table(
|
||||
cls, table: agate.Table, manifest: Manifest
|
||||
) -> agate.Table:
|
||||
"""Filter the table as appropriate for catalog entries. Subclasses can
|
||||
override this to change filtering rules on a per-adapter basis.
|
||||
"""
|
||||
@@ -1069,11 +1061,17 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
}
|
||||
|
||||
# run the macro
|
||||
table = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest)
|
||||
table = self.execute_macro(
|
||||
FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest
|
||||
)
|
||||
# now we have a 1-row table of the maximum `loaded_at_field` value and
|
||||
# the current time according to the db.
|
||||
if len(table) != 1 or len(table[0]) != 2:
|
||||
raise MacroResultError(FRESHNESS_MACRO_NAME, table)
|
||||
raise_compiler_error(
|
||||
'Got an invalid result from "{}" macro: {}'.format(
|
||||
FRESHNESS_MACRO_NAME, [tuple(r) for r in table]
|
||||
)
|
||||
)
|
||||
if table[0][0] is None:
|
||||
# no records in the table, so really the max_loaded_at was
|
||||
# infinitely long ago. Just call it 0:00 January 1 year UTC
|
||||
@@ -1132,7 +1130,9 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
clause += f" where {where_clause}"
|
||||
return clause
|
||||
|
||||
def timestamp_add_sql(self, add_to: str, number: int = 1, interval: str = "hour") -> str:
|
||||
def timestamp_add_sql(
|
||||
self, add_to: str, number: int = 1, interval: str = "hour"
|
||||
) -> str:
|
||||
# for backwards compatibility, we're compelled to set some sort of
|
||||
# default. A lot of searching has lead me to believe that the
|
||||
# '+ interval' syntax used in postgres/redshift is relatively common
|
||||
@@ -1150,7 +1150,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
elif location == "prepend":
|
||||
return f"'{value}' || {add_to}"
|
||||
else:
|
||||
raise DbtRuntimeError(f'Got an unexpected location value of "{location}"')
|
||||
raise RuntimeException(f'Got an unexpected location value of "{location}"')
|
||||
|
||||
def get_rows_different_sql(
|
||||
self,
|
||||
@@ -1181,74 +1181,6 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
|
||||
return sql
|
||||
|
||||
@property
|
||||
def python_submission_helpers(self) -> Dict[str, Type[PythonJobHelper]]:
|
||||
raise NotImplementedError("python_submission_helpers is not specified")
|
||||
|
||||
@property
|
||||
def default_python_submission_method(self) -> str:
|
||||
raise NotImplementedError("default_python_submission_method is not specified")
|
||||
|
||||
@log_code_execution
|
||||
def submit_python_job(self, parsed_model: dict, compiled_code: str) -> AdapterResponse:
|
||||
submission_method = parsed_model["config"].get(
|
||||
"submission_method", self.default_python_submission_method
|
||||
)
|
||||
if submission_method not in self.python_submission_helpers:
|
||||
raise NotImplementedError(
|
||||
"Submission method {} is not supported for current adapter".format(
|
||||
submission_method
|
||||
)
|
||||
)
|
||||
job_helper = self.python_submission_helpers[submission_method](
|
||||
parsed_model, self.connections.profile.credentials
|
||||
)
|
||||
submission_result = job_helper.submit(compiled_code)
|
||||
# process submission result to generate adapter response
|
||||
return self.generate_python_submission_response(submission_result)
|
||||
|
||||
def generate_python_submission_response(self, submission_result: Any) -> AdapterResponse:
|
||||
raise NotImplementedError(
|
||||
"Your adapter need to implement generate_python_submission_response"
|
||||
)
|
||||
|
||||
def valid_incremental_strategies(self):
|
||||
"""The set of standard builtin strategies which this adapter supports out-of-the-box.
|
||||
Not used to validate custom strategies defined by end users.
|
||||
"""
|
||||
return ["append"]
|
||||
|
||||
def builtin_incremental_strategies(self):
|
||||
return ["append", "delete+insert", "merge", "insert_overwrite"]
|
||||
|
||||
@available.parse_none
|
||||
def get_incremental_strategy_macro(self, model_context, strategy: str):
|
||||
# Construct macro_name from strategy name
|
||||
if strategy is None:
|
||||
strategy = "default"
|
||||
|
||||
# validate strategies for this adapter
|
||||
valid_strategies = self.valid_incremental_strategies()
|
||||
valid_strategies.append("default")
|
||||
builtin_strategies = self.builtin_incremental_strategies()
|
||||
if strategy in builtin_strategies and strategy not in valid_strategies:
|
||||
raise DbtRuntimeError(
|
||||
f"The incremental strategy '{strategy}' is not valid for this adapter"
|
||||
)
|
||||
|
||||
strategy = strategy.replace("+", "_")
|
||||
macro_name = f"get_incremental_{strategy}_sql"
|
||||
# The model_context should have MacroGenerator callable objects for all macros
|
||||
if macro_name not in model_context:
|
||||
raise DbtRuntimeError(
|
||||
'dbt could not find an incremental strategy macro with the name "{}" in {}'.format(
|
||||
macro_name, self.config.project_name
|
||||
)
|
||||
)
|
||||
|
||||
# This returns a callable macro
|
||||
return model_context[macro_name]
|
||||
|
||||
|
||||
COLUMNS_EQUAL_SQL = """
|
||||
with diff_count as (
|
||||
@@ -1296,7 +1228,7 @@ def catch_as_completed(
|
||||
elif isinstance(exc, KeyboardInterrupt) or not isinstance(exc, Exception):
|
||||
raise exc
|
||||
else:
|
||||
warn_or_error(CatalogGenerationError(exc=str(exc)))
|
||||
warn_or_error(f"Encountered an error while generating catalog: {str(exc)}")
|
||||
# exc is not None, derives from Exception, and isn't ctrl+c
|
||||
exceptions.append(exc)
|
||||
return merge_tables(tables), exceptions
|
||||
|
||||
@@ -99,7 +99,9 @@ class AdapterMeta(abc.ABCMeta):
|
||||
# I'm not sure there is any benefit to it after poking around a bit,
|
||||
# but having it doesn't hurt on the python side (and omitting it could
|
||||
# hurt for obscure metaclass reasons, for all I know)
|
||||
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) # type: ignore
|
||||
cls = abc.ABCMeta.__new__( # type: ignore
|
||||
mcls, name, bases, namespace, **kwargs
|
||||
)
|
||||
|
||||
# this is very much inspired by ABCMeta's own implementation
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from dbt.adapters.base import Credentials
|
||||
from dbt.exceptions import CompilationError
|
||||
from dbt.exceptions import CompilationException
|
||||
from dbt.adapters.protocol import AdapterProtocol
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ def project_name_from_path(include_path: str) -> str:
|
||||
|
||||
partial = Project.partial_load(include_path)
|
||||
if partial.project_name is None:
|
||||
raise CompilationError(f"Invalid project at {include_path}: name not set!")
|
||||
raise CompilationException(f"Invalid project at {include_path}: name not set!")
|
||||
return partial.project_name
|
||||
|
||||
|
||||
|
||||
@@ -5,9 +5,9 @@ from dbt.clients.jinja import QueryStringGenerator
|
||||
|
||||
from dbt.context.manifest import generate_query_header_context
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, QueryComment
|
||||
from dbt.contracts.graph.nodes import ResultNode
|
||||
from dbt.contracts.graph.compiled import CompileResultNode
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
from dbt.exceptions import RuntimeException
|
||||
|
||||
|
||||
class NodeWrapper:
|
||||
@@ -48,7 +48,9 @@ class _QueryComment(local):
|
||||
if isinstance(comment, str) and "*/" in comment:
|
||||
# tell the user "no" so they don't hurt themselves by writing
|
||||
# garbage
|
||||
raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}')
|
||||
raise RuntimeException(
|
||||
f'query comment contains illegal value "*/": {comment}'
|
||||
)
|
||||
self.query_comment = comment
|
||||
self.append = append
|
||||
|
||||
@@ -90,7 +92,7 @@ class MacroQueryStringSetter:
|
||||
def reset(self):
|
||||
self.set("master", None)
|
||||
|
||||
def set(self, name: str, node: Optional[ResultNode]):
|
||||
def set(self, name: str, node: Optional[CompileResultNode]):
|
||||
wrapped: Optional[NodeWrapper] = None
|
||||
if node is not None:
|
||||
wrapped = NodeWrapper(node)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from collections.abc import Hashable
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
|
||||
|
||||
from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode
|
||||
from dbt.contracts.graph.compiled import CompiledNode
|
||||
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
|
||||
from dbt.contracts.relation import (
|
||||
RelationType,
|
||||
ComponentName,
|
||||
@@ -11,11 +12,7 @@ from dbt.contracts.relation import (
|
||||
Policy,
|
||||
Path,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
ApproximateMatchError,
|
||||
DbtInternalError,
|
||||
MultipleDatabasesNotAllowedError,
|
||||
)
|
||||
from dbt.exceptions import InternalException
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.utils import filter_null_values, deep_merge, classproperty
|
||||
|
||||
@@ -30,10 +27,8 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
path: Path
|
||||
type: Optional[RelationType] = None
|
||||
quote_character: str = '"'
|
||||
# Python 3.11 requires that these use default_factory instead of simple default
|
||||
# ValueError: mutable default <class 'dbt.contracts.relation.Policy'> for field include_policy is not allowed: use default_factory
|
||||
include_policy: Policy = field(default_factory=lambda: Policy())
|
||||
quote_policy: Policy = field(default_factory=lambda: Policy())
|
||||
include_policy: Policy = Policy()
|
||||
quote_policy: Policy = Policy()
|
||||
dbt_created: bool = False
|
||||
|
||||
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
|
||||
@@ -44,9 +39,9 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
@classmethod
|
||||
def _get_field_named(cls, field_name):
|
||||
for f, _ in cls._get_fields():
|
||||
if f.name == field_name:
|
||||
return f
|
||||
for field, _ in cls._get_fields():
|
||||
if field.name == field_name:
|
||||
return field
|
||||
# this should be unreachable
|
||||
raise ValueError(f"BaseRelation has no {field_name} field!")
|
||||
|
||||
@@ -57,11 +52,11 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
@classmethod
|
||||
def get_default_quote_policy(cls) -> Policy:
|
||||
return cls._get_field_named("quote_policy").default_factory()
|
||||
return cls._get_field_named("quote_policy").default
|
||||
|
||||
@classmethod
|
||||
def get_default_include_policy(cls) -> Policy:
|
||||
return cls._get_field_named("include_policy").default_factory()
|
||||
return cls._get_field_named("include_policy").default
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Override `.get` to return a metadata object so we don't break
|
||||
@@ -87,7 +82,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
|
||||
if not search:
|
||||
# nothing was passed in
|
||||
raise dbt.exceptions.DbtRuntimeError(
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
"Tried to match relation, but no search path was passed!"
|
||||
)
|
||||
|
||||
@@ -97,14 +92,15 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
for k, v in search.items():
|
||||
if not self._is_exactish_match(k, v):
|
||||
exact_match = False
|
||||
if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
|
||||
self.quote_character
|
||||
):
|
||||
approximate_match = False # type: ignore[union-attr]
|
||||
|
||||
if self.path.get_lowered_part(k) != v.lower():
|
||||
approximate_match = False
|
||||
|
||||
if approximate_match and not exact_match:
|
||||
target = self.create(database=database, schema=schema, identifier=identifier)
|
||||
raise ApproximateMatchError(target, self)
|
||||
target = self.create(
|
||||
database=database, schema=schema, identifier=identifier
|
||||
)
|
||||
dbt.exceptions.approximate_relation_match(target, self)
|
||||
|
||||
return exact_match
|
||||
|
||||
@@ -168,7 +164,9 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
"""
|
||||
return self.include(identifier=False).replace_path(identifier=None)
|
||||
|
||||
def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
def _render_iterator(
|
||||
self,
|
||||
) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
|
||||
for key in ComponentName:
|
||||
path_part: Optional[str] = None
|
||||
@@ -189,7 +187,9 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self:
|
||||
def create_from_source(
|
||||
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
|
||||
) -> Self:
|
||||
source_quoting = source.quoting.to_dict(omit_none=True)
|
||||
source_quoting.pop("column", None)
|
||||
quote_policy = deep_merge(
|
||||
@@ -214,7 +214,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
def create_ephemeral_from_node(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ManifestNode,
|
||||
node: Union[ParsedNode, CompiledNode],
|
||||
) -> Self:
|
||||
# Note that ephemeral models are based on the name.
|
||||
identifier = cls.add_ephemeral_prefix(node.name)
|
||||
@@ -227,7 +227,7 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
def create_from_node(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ManifestNode,
|
||||
node: Union[ParsedNode, CompiledNode],
|
||||
quote_policy: Optional[Dict[str, bool]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Self:
|
||||
@@ -248,20 +248,22 @@ class BaseRelation(FakeAPIObject, Hashable):
|
||||
def create_from(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ResultNode,
|
||||
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
|
||||
**kwargs: Any,
|
||||
) -> Self:
|
||||
if node.resource_type == NodeType.Source:
|
||||
if not isinstance(node, SourceDefinition):
|
||||
raise DbtInternalError(
|
||||
"type mismatch, expected SourceDefinition but got {}".format(type(node))
|
||||
if not isinstance(node, ParsedSourceDefinition):
|
||||
raise InternalException(
|
||||
"type mismatch, expected ParsedSourceDefinition but got {}".format(
|
||||
type(node)
|
||||
)
|
||||
)
|
||||
return cls.create_from_source(node, **kwargs)
|
||||
else:
|
||||
# Can't use ManifestNode here because of parameterized generics
|
||||
if not isinstance(node, (ParsedNode)):
|
||||
raise DbtInternalError(
|
||||
f"type mismatch, expected ManifestNode but got {type(node)}"
|
||||
if not isinstance(node, (ParsedNode, CompiledNode)):
|
||||
raise InternalException(
|
||||
"type mismatch, expected ParsedNode or CompiledNode but "
|
||||
"got {}".format(type(node))
|
||||
)
|
||||
return cls.create_from_node(config, node, **kwargs)
|
||||
|
||||
@@ -358,12 +360,14 @@ class InformationSchema(BaseRelation):
|
||||
|
||||
def __post_init__(self):
|
||||
if not isinstance(self.information_schema_view, (type(None), str)):
|
||||
raise dbt.exceptions.CompilationError(
|
||||
raise dbt.exceptions.CompilationException(
|
||||
"Got an invalid name: {}".format(self.information_schema_view)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
|
||||
def get_path(
|
||||
cls, relation: BaseRelation, information_schema_view: Optional[str]
|
||||
) -> Path:
|
||||
return Path(
|
||||
database=relation.database,
|
||||
schema=relation.schema,
|
||||
@@ -435,14 +439,13 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
for schema in schemas:
|
||||
yield information_schema_name, schema
|
||||
|
||||
def flatten(self, allow_multiple_databases: bool = False):
|
||||
def flatten(self):
|
||||
new = self.__class__()
|
||||
|
||||
# make sure we don't have multiple databases if allow_multiple_databases is set to False
|
||||
if not allow_multiple_databases:
|
||||
seen = {r.database.lower() for r in self if r.database}
|
||||
if len(seen) > 1:
|
||||
raise MultipleDatabasesNotAllowedError(seen)
|
||||
# make sure we don't have duplicates
|
||||
seen = {r.database.lower() for r in self if r.database}
|
||||
if len(seen) > 1:
|
||||
dbt.exceptions.raise_compiler_error(str(seen))
|
||||
|
||||
for information_schema_name, schema in self.search():
|
||||
path = {"database": information_schema_name.database, "schema": schema}
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
import threading
|
||||
from collections import namedtuple
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import List, Iterable, Optional, Dict, Set, Tuple, Any
|
||||
import threading
|
||||
|
||||
from dbt.adapters.reference_keys import (
|
||||
_make_ref_key,
|
||||
_make_ref_key_msg,
|
||||
_make_msg_from_ref_key,
|
||||
_ReferenceKey,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
DependentLinkNotCachedError,
|
||||
NewNameAlreadyInCacheError,
|
||||
NoneRelationFoundError,
|
||||
ReferencedLinkNotCachedError,
|
||||
TruncatedModelNameCausedCollisionError,
|
||||
)
|
||||
from dbt.events.functions import fire_event, fire_event_if
|
||||
from dbt.events.types import CacheAction, CacheDumpGraph
|
||||
import dbt.flags as flags
|
||||
from dbt.logger import CACHE_LOGGER as logger
|
||||
from dbt.utils import lowercase
|
||||
import dbt.exceptions
|
||||
|
||||
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
|
||||
|
||||
|
||||
def _make_key(relation) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(
|
||||
lowercase(relation.database),
|
||||
lowercase(relation.schema),
|
||||
lowercase(relation.identifier),
|
||||
)
|
||||
|
||||
|
||||
def dot_separated(key: _ReferenceKey) -> str:
|
||||
@@ -44,9 +45,9 @@ class _CachedRelation:
|
||||
self.inner = inner
|
||||
|
||||
def __str__(self) -> str:
|
||||
return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
|
||||
self.database, self.schema, self.identifier, self.inner
|
||||
)
|
||||
return (
|
||||
"_CachedRelation(database={}, schema={}, identifier={}, inner={})"
|
||||
).format(self.database, self.schema, self.identifier, self.inner)
|
||||
|
||||
@property
|
||||
def database(self) -> Optional[str]:
|
||||
@@ -78,7 +79,7 @@ class _CachedRelation:
|
||||
|
||||
:return _ReferenceKey: A key for this relation.
|
||||
"""
|
||||
return _make_ref_key(self)
|
||||
return _make_key(self)
|
||||
|
||||
def add_reference(self, referrer: "_CachedRelation"):
|
||||
"""Add a reference from referrer to self, indicating that if this node
|
||||
@@ -141,7 +142,11 @@ class _CachedRelation:
|
||||
:raises InternalError: If the new key already exists.
|
||||
"""
|
||||
if new_key in self.referenced_by:
|
||||
raise NewNameAlreadyInCacheError(old_key, new_key)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
'in rename of "{}" -> "{}", new name is in the cache already'.format(
|
||||
old_key, new_key
|
||||
)
|
||||
)
|
||||
|
||||
if old_key not in self.referenced_by:
|
||||
return
|
||||
@@ -156,6 +161,12 @@ class _CachedRelation:
|
||||
return [dot_separated(r) for r in self.referenced_by]
|
||||
|
||||
|
||||
def lazy_log(msg, func):
|
||||
if logger.disabled:
|
||||
return
|
||||
logger.debug(msg.format(func()))
|
||||
|
||||
|
||||
class RelationsCache:
|
||||
"""A cache of the relations known to dbt. Keeps track of relationships
|
||||
declared between tables and handles renames/drops as a real database would.
|
||||
@@ -230,7 +241,10 @@ class RelationsCache:
|
||||
# self.relations or any cache entry's referenced_by during iteration
|
||||
# it's a runtime error!
|
||||
with self.lock:
|
||||
return {dot_separated(k): v.dump_graph_entry() for k, v in self.relations.items()}
|
||||
return {
|
||||
dot_separated(k): v.dump_graph_entry()
|
||||
for k, v in self.relations.items()
|
||||
}
|
||||
|
||||
def _setdefault(self, relation: _CachedRelation):
|
||||
"""Add a relation to the cache, or return it if it already exists.
|
||||
@@ -257,17 +271,22 @@ class RelationsCache:
|
||||
if referenced is None:
|
||||
return
|
||||
if referenced is None:
|
||||
raise ReferencedLinkNotCachedError(referenced_key)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
"in add_link, referenced link key {} not in cache!".format(
|
||||
referenced_key
|
||||
)
|
||||
)
|
||||
|
||||
dependent = self.relations.get(dependent_key)
|
||||
if dependent is None:
|
||||
raise DependentLinkNotCachedError(dependent_key)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
"in add_link, dependent link key {} not in cache!".format(dependent_key)
|
||||
)
|
||||
|
||||
assert dependent is not None # we just raised!
|
||||
|
||||
referenced.add_reference(dependent)
|
||||
|
||||
# This is called in plugins/postgres/dbt/adapters/postgres/impl.py
|
||||
def add_link(self, referenced, dependent):
|
||||
"""Add a link between two relations to the database. If either relation
|
||||
does not exist, it will be added as an "external" relation.
|
||||
@@ -282,16 +301,15 @@ class RelationsCache:
|
||||
:param BaseRelation dependent: The dependent model.
|
||||
:raises InternalError: If either entry does not exist.
|
||||
"""
|
||||
ref_key = _make_ref_key(referenced)
|
||||
dep_key = _make_ref_key(dependent)
|
||||
ref_key = _make_key(referenced)
|
||||
if (ref_key.database, ref_key.schema) not in self:
|
||||
# if we have not cached the referenced schema at all, we must be
|
||||
# referring to a table outside our control. There's no need to make
|
||||
# a link - we will never drop the referenced relation during a run.
|
||||
fire_event(
|
||||
CacheAction(
|
||||
ref_key=_make_msg_from_ref_key(ref_key),
|
||||
ref_key_2=_make_msg_from_ref_key(dep_key),
|
||||
logger.debug(
|
||||
"{dep!s} references {ref!s} but {ref.database}.{ref.schema} "
|
||||
"is not in the cache, skipping assumed external relation".format(
|
||||
dep=dependent, ref=ref_key
|
||||
)
|
||||
)
|
||||
return
|
||||
@@ -299,17 +317,13 @@ class RelationsCache:
|
||||
# Insert a dummy "external" relation.
|
||||
referenced = referenced.replace(type=referenced.External)
|
||||
self.add(referenced)
|
||||
|
||||
dep_key = _make_key(dependent)
|
||||
if dep_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
dependent = dependent.replace(type=referenced.External)
|
||||
self.add(dependent)
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="add_link",
|
||||
ref_key=_make_msg_from_ref_key(dep_key),
|
||||
ref_key_2=_make_msg_from_ref_key(ref_key),
|
||||
)
|
||||
)
|
||||
logger.debug("adding link, {!s} references {!s}".format(dep_key, ref_key))
|
||||
with self.lock:
|
||||
self._add_link(ref_key, dep_key)
|
||||
|
||||
@@ -320,18 +334,14 @@ class RelationsCache:
|
||||
:param BaseRelation relation: The underlying relation.
|
||||
"""
|
||||
cached = _CachedRelation(relation)
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()),
|
||||
)
|
||||
fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_msg(cached)))
|
||||
logger.debug("Adding relation: {!s}".format(cached))
|
||||
|
||||
lazy_log("before adding: {!s}", self.dump_graph)
|
||||
|
||||
with self.lock:
|
||||
self._setdefault(cached)
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()),
|
||||
)
|
||||
|
||||
lazy_log("after adding: {!s}", self.dump_graph)
|
||||
|
||||
def _remove_refs(self, keys):
|
||||
"""Removes all references to all entries in keys. This does not
|
||||
@@ -346,6 +356,19 @@ class RelationsCache:
|
||||
for cached in self.relations.values():
|
||||
cached.release_references(keys)
|
||||
|
||||
def _drop_cascade_relation(self, dropped):
|
||||
"""Drop the given relation and cascade it appropriately to all
|
||||
dependent relations.
|
||||
|
||||
:param _CachedRelation dropped: An existing _CachedRelation to drop.
|
||||
"""
|
||||
if dropped not in self.relations:
|
||||
logger.debug("dropped a nonexistent relationship: {!s}".format(dropped))
|
||||
return
|
||||
consequences = self.relations[dropped].collect_consequences()
|
||||
logger.debug("drop {} is cascading to {}".format(dropped, consequences))
|
||||
self._remove_refs(consequences)
|
||||
|
||||
def drop(self, relation):
|
||||
"""Drop the named relation and cascade it appropriately to all
|
||||
dependent relations.
|
||||
@@ -357,22 +380,10 @@ class RelationsCache:
|
||||
:param str schema: The schema of the relation to drop.
|
||||
:param str identifier: The identifier of the relation to drop.
|
||||
"""
|
||||
dropped_key = _make_ref_key(relation)
|
||||
dropped_key_msg = _make_ref_key_msg(relation)
|
||||
fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg))
|
||||
dropped = _make_key(relation)
|
||||
logger.debug("Dropping relation: {!s}".format(dropped))
|
||||
with self.lock:
|
||||
if dropped_key not in self.relations:
|
||||
fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg))
|
||||
return
|
||||
consequences = self.relations[dropped_key].collect_consequences()
|
||||
# convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs
|
||||
consequence_msgs = [_make_msg_from_ref_key(key) for key in consequences]
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs
|
||||
)
|
||||
)
|
||||
self._remove_refs(consequences)
|
||||
self._drop_cascade_relation(dropped)
|
||||
|
||||
def _rename_relation(self, old_key, new_relation):
|
||||
"""Rename a relation named old_key to new_key, updating references.
|
||||
@@ -388,20 +399,16 @@ class RelationsCache:
|
||||
relation = self.relations.pop(old_key)
|
||||
new_key = new_relation.key()
|
||||
|
||||
# relation has to rename its innards, so it needs the _CachedRelation.
|
||||
# relaton has to rename its innards, so it needs the _CachedRelation.
|
||||
relation.rename(new_relation)
|
||||
# update all the relations that refer to it
|
||||
for cached in self.relations.values():
|
||||
if cached.is_referenced_by(old_key):
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="update_reference",
|
||||
ref_key=_make_ref_key_msg(old_key),
|
||||
ref_key_2=_make_ref_key_msg(new_key),
|
||||
ref_key_3=_make_ref_key_msg(cached.key()),
|
||||
logger.debug(
|
||||
"updated reference from {0} -> {2} to {1} -> {2}".format(
|
||||
old_key, new_key, cached.key()
|
||||
)
|
||||
)
|
||||
|
||||
cached.rename_key(old_key, new_key)
|
||||
|
||||
self.relations[new_key] = relation
|
||||
@@ -424,13 +431,17 @@ class RelationsCache:
|
||||
:raises InternalError: If the new key is already present.
|
||||
"""
|
||||
if new_key in self.relations:
|
||||
# Tell user when collision caused by model names truncated during
|
||||
# materialization.
|
||||
raise TruncatedModelNameCausedCollisionError(new_key, self.relations)
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
"in rename, new key {} already in cache: {}".format(
|
||||
new_key, list(self.relations.keys())
|
||||
)
|
||||
)
|
||||
|
||||
if old_key not in self.relations:
|
||||
fire_event(
|
||||
CacheAction(action="temporary_relation", ref_key=_make_msg_from_ref_key(old_key))
|
||||
logger.debug(
|
||||
"old key {} not found in self.relations, assuming temporary".format(
|
||||
old_key
|
||||
)
|
||||
)
|
||||
return False
|
||||
return True
|
||||
@@ -447,20 +458,11 @@ class RelationsCache:
|
||||
:param BaseRelation new: The new relation name information.
|
||||
:raises InternalError: If the new key is already present.
|
||||
"""
|
||||
old_key = _make_ref_key(old)
|
||||
new_key = _make_ref_key(new)
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="rename_relation",
|
||||
ref_key=_make_msg_from_ref_key(old_key),
|
||||
ref_key_2=_make_msg_from_ref_key(new),
|
||||
)
|
||||
)
|
||||
old_key = _make_key(old)
|
||||
new_key = _make_key(new)
|
||||
logger.debug("Renaming relation {!s} to {!s}".format(old_key, new_key))
|
||||
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()),
|
||||
)
|
||||
lazy_log("before rename: {!s}", self.dump_graph)
|
||||
|
||||
with self.lock:
|
||||
if self._check_rename_constraints(old_key, new_key):
|
||||
@@ -468,12 +470,11 @@ class RelationsCache:
|
||||
else:
|
||||
self._setdefault(_CachedRelation(new))
|
||||
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()),
|
||||
)
|
||||
lazy_log("after rename: {!s}", self.dump_graph)
|
||||
|
||||
def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
|
||||
def get_relations(
|
||||
self, database: Optional[str], schema: Optional[str]
|
||||
) -> List[Any]:
|
||||
"""Case-insensitively yield all relations matching the given schema.
|
||||
|
||||
:param str schema: The case-insensitive schema name to list from.
|
||||
@@ -490,7 +491,9 @@ class RelationsCache:
|
||||
]
|
||||
|
||||
if None in results:
|
||||
raise NoneRelationFoundError()
|
||||
dbt.exceptions.raise_cache_inconsistent(
|
||||
"in get_relations, a None relation was found in the cache!"
|
||||
)
|
||||
return results
|
||||
|
||||
def clear(self):
|
||||
@@ -517,6 +520,6 @@ class RelationsCache:
|
||||
"""
|
||||
for relation in to_remove:
|
||||
# it may have been cascaded out already
|
||||
drop_key = _make_ref_key(relation)
|
||||
drop_key = _make_key(relation)
|
||||
if drop_key in self.relations:
|
||||
self.drop(drop_key)
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
import threading
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Type
|
||||
from importlib import import_module
|
||||
from typing import Type, Dict, Any, List, Optional, Set
|
||||
|
||||
from dbt.exceptions import RuntimeException, InternalException
|
||||
from dbt.include.global_project import (
|
||||
PACKAGE_PATH as GLOBAL_PROJECT_PATH,
|
||||
PROJECT_NAME as GLOBAL_PROJECT_NAME,
|
||||
)
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt.contracts.connection import Credentials, AdapterRequiredConfig
|
||||
|
||||
|
||||
from dbt.adapters.protocol import (
|
||||
AdapterProtocol,
|
||||
AdapterConfig,
|
||||
RelationProtocol,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import AdapterImportError, PluginLoadError
|
||||
from dbt.exceptions import DbtInternalError, DbtRuntimeError
|
||||
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
|
||||
|
||||
Adapter = AdapterProtocol
|
||||
|
||||
@@ -34,7 +40,7 @@ class AdapterContainer:
|
||||
names = ", ".join(self.plugins.keys())
|
||||
|
||||
message = f"Invalid adapter type {name}! Must be one of {names}"
|
||||
raise DbtRuntimeError(message)
|
||||
raise RuntimeException(message)
|
||||
|
||||
def get_adapter_class_by_name(self, name: str) -> Type[Adapter]:
|
||||
plugin = self.get_plugin_by_name(name)
|
||||
@@ -59,18 +65,17 @@ class AdapterContainer:
|
||||
# if we failed to import the target module in particular, inform
|
||||
# the user about it via a runtime error
|
||||
if exc.name == "dbt.adapters." + name:
|
||||
fire_event(AdapterImportError(exc=str(exc)))
|
||||
raise DbtRuntimeError(f"Could not find adapter type {name}!")
|
||||
raise RuntimeException(f"Could not find adapter type {name}!")
|
||||
logger.info(f"Error importing adapter: {exc}")
|
||||
# otherwise, the error had to have come from some underlying
|
||||
# library. Log the stack trace.
|
||||
|
||||
fire_event(PluginLoadError(exc_info=traceback.format_exc()))
|
||||
logger.debug("", exc_info=True)
|
||||
raise
|
||||
plugin: AdapterPlugin = mod.Plugin
|
||||
plugin_type = plugin.adapter.type()
|
||||
|
||||
if plugin_type != name:
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f"Expected to find adapter with type named {name}, got "
|
||||
f"adapter with type {plugin_type}"
|
||||
)
|
||||
@@ -132,16 +137,20 @@ class AdapterContainer:
|
||||
try:
|
||||
plugin = self.plugins[plugin_name]
|
||||
except KeyError:
|
||||
raise DbtInternalError(f"No plugin found for {plugin_name}") from None
|
||||
raise InternalException(f"No plugin found for {plugin_name}") from None
|
||||
plugins.append(plugin)
|
||||
seen.add(plugin_name)
|
||||
if plugin.dependencies is None:
|
||||
continue
|
||||
for dep in plugin.dependencies:
|
||||
if dep not in seen:
|
||||
plugin_names.append(dep)
|
||||
return plugins
|
||||
|
||||
def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
|
||||
package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
|
||||
package_names: List[str] = [
|
||||
p.project_name for p in self.get_adapter_plugins(name)
|
||||
]
|
||||
package_names.append(GLOBAL_PROJECT_NAME)
|
||||
return package_names
|
||||
|
||||
@@ -151,7 +160,9 @@ class AdapterContainer:
|
||||
try:
|
||||
path = self.packages[package_name]
|
||||
except KeyError:
|
||||
raise DbtInternalError(f"No internal package listing found for {package_name}")
|
||||
raise InternalException(
|
||||
f"No internal package listing found for {package_name}"
|
||||
)
|
||||
paths.append(path)
|
||||
return paths
|
||||
|
||||
@@ -170,10 +181,6 @@ def get_adapter(config: AdapterRequiredConfig):
|
||||
return FACTORY.lookup_adapter(config.credentials.type)
|
||||
|
||||
|
||||
def get_adapter_by_type(adapter_type):
|
||||
return FACTORY.lookup_adapter(adapter_type)
|
||||
|
||||
|
||||
def reset_adapters():
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
FACTORY.reset_adapters()
|
||||
@@ -212,12 +219,3 @@ def get_adapter_package_names(name: Optional[str]) -> List[str]:
|
||||
|
||||
def get_adapter_type_names(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_type_names(name)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def adapter_management():
|
||||
reset_adapters()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cleanup_connections()
|
||||
|
||||
@@ -7,7 +7,9 @@ from typing import (
|
||||
List,
|
||||
Generic,
|
||||
TypeVar,
|
||||
ClassVar,
|
||||
Tuple,
|
||||
Union,
|
||||
Dict,
|
||||
Any,
|
||||
)
|
||||
@@ -16,7 +18,12 @@ from typing_extensions import Protocol
|
||||
import agate
|
||||
|
||||
from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
|
||||
from dbt.contracts.graph.nodes import ResultNode, ManifestNode
|
||||
from dbt.contracts.graph.compiled import (
|
||||
CompiledNode,
|
||||
ManifestNode,
|
||||
NonSourceCompiledNode,
|
||||
)
|
||||
from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition
|
||||
from dbt.contracts.graph.model_config import BaseConfig
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.contracts.relation import Policy, HasQuoting
|
||||
@@ -46,7 +53,11 @@ class RelationProtocol(Protocol):
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self:
|
||||
def create_from(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition],
|
||||
) -> Self:
|
||||
...
|
||||
|
||||
|
||||
@@ -59,7 +70,7 @@ class CompilerProtocol(Protocol):
|
||||
node: ManifestNode,
|
||||
manifest: Manifest,
|
||||
extra_context: Optional[Dict[str, Any]] = None,
|
||||
) -> ManifestNode:
|
||||
) -> NonSourceCompiledNode:
|
||||
...
|
||||
|
||||
|
||||
@@ -70,8 +81,7 @@ Column_T = TypeVar("Column_T", bound=ColumnProtocol)
|
||||
Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
|
||||
|
||||
|
||||
# TODO CT-211
|
||||
class AdapterProtocol( # type: ignore[misc]
|
||||
class AdapterProtocol(
|
||||
Protocol,
|
||||
Generic[
|
||||
AdapterConfig_T,
|
||||
@@ -81,13 +91,10 @@ class AdapterProtocol( # type: ignore[misc]
|
||||
Compiler_T,
|
||||
],
|
||||
):
|
||||
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
|
||||
# ClassVar due to the restrictiveness of PEP-526
|
||||
# See: https://github.com/python/mypy/issues/5144
|
||||
AdapterSpecificConfigs: Type[AdapterConfig_T]
|
||||
Column: Type[Column_T]
|
||||
Relation: Type[Relation_T]
|
||||
ConnectionManager: Type[ConnectionManager_T]
|
||||
AdapterSpecificConfigs: ClassVar[Type[AdapterConfig_T]]
|
||||
Column: ClassVar[Type[Column_T]]
|
||||
Relation: ClassVar[Type[Relation_T]]
|
||||
ConnectionManager: ClassVar[Type[ConnectionManager_T]]
|
||||
connections: ConnectionManager_T
|
||||
|
||||
def __init__(self, config: AdapterRequiredConfig):
|
||||
@@ -151,7 +158,7 @@ class AdapterProtocol( # type: ignore[misc]
|
||||
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
) -> Tuple[Union[str, AdapterResponse], agate.Table]:
|
||||
...
|
||||
|
||||
def get_compiler(self) -> Compiler_T:
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
# this module exists to resolve circular imports with the events module
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, Optional
|
||||
from dbt.events.proto_types import ReferenceKeyMsg
|
||||
|
||||
|
||||
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
|
||||
|
||||
|
||||
def lowercase(value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return value.lower()
|
||||
|
||||
|
||||
# For backwards compatibility. New code should use _make_ref_key
|
||||
def _make_key(relation: Any) -> _ReferenceKey:
|
||||
return _make_ref_key(relation)
|
||||
|
||||
|
||||
def _make_ref_key(relation: Any) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(
|
||||
lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
|
||||
)
|
||||
|
||||
|
||||
def _make_ref_key_msg(relation: Any):
|
||||
return _make_msg_from_ref_key(_make_ref_key(relation))
|
||||
|
||||
|
||||
def _make_msg_from_ref_key(ref_key: _ReferenceKey) -> ReferenceKeyMsg:
|
||||
return ReferenceKeyMsg(
|
||||
database=ref_key.database, schema=ref_key.schema, identifier=ref_key.identifier
|
||||
)
|
||||
@@ -1,6 +1,6 @@
|
||||
import abc
|
||||
import time
|
||||
from typing import List, Optional, Tuple, Any, Iterable, Dict
|
||||
from typing import List, Optional, Tuple, Any, Iterable, Dict, Union
|
||||
|
||||
import agate
|
||||
|
||||
@@ -8,10 +8,8 @@ import dbt.clients.agate_helper
|
||||
import dbt.exceptions
|
||||
from dbt.adapters.base import BaseConnectionManager
|
||||
from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus
|
||||
from dbt.events.contextvars import get_node_info
|
||||
from dbt.utils import cast_to_str
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt import flags
|
||||
|
||||
|
||||
class SQLConnectionManager(BaseConnectionManager):
|
||||
@@ -27,7 +25,9 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
@abc.abstractmethod
|
||||
def cancel(self, connection: Connection):
|
||||
"""Cancel the given connection."""
|
||||
raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!")
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`cancel` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
def cancel_open(self) -> List[str]:
|
||||
names = []
|
||||
@@ -39,7 +39,10 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
# if the connection failed, the handle will be None so we have
|
||||
# nothing to cancel.
|
||||
if connection.handle is not None and connection.state == ConnectionState.OPEN:
|
||||
if (
|
||||
connection.handle is not None
|
||||
and connection.state == ConnectionState.OPEN
|
||||
):
|
||||
self.cancel(connection)
|
||||
if connection.name is not None:
|
||||
names.append(connection.name)
|
||||
@@ -55,13 +58,8 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
connection = self.get_thread_connection()
|
||||
if auto_begin and connection.transaction_open is False:
|
||||
self.begin()
|
||||
fire_event(
|
||||
ConnectionUsed(
|
||||
conn_type=self.TYPE,
|
||||
conn_name=cast_to_str(connection.name),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug('Using {} connection "{}".'.format(self.TYPE, connection.name))
|
||||
|
||||
with self.exception_handler(sql):
|
||||
if abridge_sql_log:
|
||||
@@ -69,31 +67,27 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
else:
|
||||
log_sql = sql
|
||||
|
||||
fire_event(
|
||||
SQLQuery(
|
||||
conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info()
|
||||
)
|
||||
logger.debug(
|
||||
"On {connection_name}: {sql}",
|
||||
connection_name=connection.name,
|
||||
sql=log_sql,
|
||||
)
|
||||
pre = time.time()
|
||||
|
||||
cursor = connection.handle.cursor()
|
||||
cursor.execute(sql, bindings)
|
||||
|
||||
fire_event(
|
||||
SQLQueryStatus(
|
||||
status=str(self.get_response(cursor)),
|
||||
elapsed=round((time.time() - pre)),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
logger.debug(
|
||||
"SQL status: {status} in {elapsed:0.2f} seconds",
|
||||
status=self.get_response(cursor),
|
||||
elapsed=(time.time() - pre),
|
||||
)
|
||||
|
||||
return connection, cursor
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def get_response(cls, cursor: Any) -> AdapterResponse:
|
||||
@abc.abstractclassmethod
|
||||
def get_response(cls, cursor: Any) -> Union[AdapterResponse, str]:
|
||||
"""Get the status of the cursor."""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
raise dbt.exceptions.NotImplementedException(
|
||||
"`get_response` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@@ -101,19 +95,7 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
def process_results(
|
||||
cls, column_names: Iterable[str], rows: Iterable[Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
# TODO CT-211
|
||||
unique_col_names = dict() # type: ignore[var-annotated]
|
||||
# TODO CT-211
|
||||
for idx in range(len(column_names)): # type: ignore[arg-type]
|
||||
# TODO CT-211
|
||||
col_name = column_names[idx] # type: ignore[index]
|
||||
if col_name in unique_col_names:
|
||||
unique_col_names[col_name] += 1
|
||||
# TODO CT-211
|
||||
column_names[idx] = f"{col_name}_{unique_col_names[col_name]}" # type: ignore[index] # noqa
|
||||
else:
|
||||
# TODO CT-211
|
||||
unique_col_names[column_names[idx]] = 1 # type: ignore[index]
|
||||
|
||||
return [dict(zip(column_names, row)) for row in rows]
|
||||
|
||||
@classmethod
|
||||
@@ -130,7 +112,7 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
) -> Tuple[Union[AdapterResponse, str], agate.Table]:
|
||||
sql = self._add_query_comment(sql)
|
||||
_, cursor = self.add_query(sql, auto_begin)
|
||||
response = self.get_response(cursor)
|
||||
@@ -148,8 +130,15 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
def begin(self):
|
||||
connection = self.get_thread_connection()
|
||||
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f"In begin, got {connection} - not a Connection!"
|
||||
)
|
||||
|
||||
if connection.transaction_open is True:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Tried to begin a new transaction on connection "{}", but '
|
||||
"it already had one open!".format(connection.name)
|
||||
)
|
||||
@@ -161,13 +150,19 @@ class SQLConnectionManager(BaseConnectionManager):
|
||||
|
||||
def commit(self):
|
||||
connection = self.get_thread_connection()
|
||||
if flags.STRICT_MODE:
|
||||
if not isinstance(connection, Connection):
|
||||
raise dbt.exceptions.CompilerException(
|
||||
f"In commit, got {connection} - not a Connection!"
|
||||
)
|
||||
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
raise dbt.exceptions.InternalException(
|
||||
'Tried to commit transaction on connection "{}", but '
|
||||
"it does not have one open!".format(connection.name)
|
||||
)
|
||||
|
||||
fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info()))
|
||||
logger.debug("On {}: COMMIT".format(connection.name))
|
||||
self.add_commit_query()
|
||||
|
||||
connection.transaction_open = False
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
import agate
|
||||
from typing import Any, Optional, Tuple, Type, List
|
||||
|
||||
import dbt.clients.agate_helper
|
||||
from dbt.contracts.connection import Connection
|
||||
from dbt.exceptions import RelationTypeNullError
|
||||
import dbt.exceptions
|
||||
from dbt.adapters.base import BaseAdapter, available
|
||||
from dbt.adapters.cache import _make_ref_key_msg
|
||||
from dbt.adapters.sql import SQLConnectionManager
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
|
||||
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
|
||||
from dbt.adapters.base.relation import BaseRelation
|
||||
|
||||
@@ -26,7 +24,7 @@ ALTER_COLUMN_TYPE_MACRO_NAME = "alter_column_type"
|
||||
|
||||
class SQLAdapter(BaseAdapter):
|
||||
"""The default adapter with the common agate conversions and some SQL
|
||||
methods was implemented. This adapter has a different much shorter list of
|
||||
methods implemented. This adapter has a different much shorter list of
|
||||
methods to implement, but some more macros that must be implemented.
|
||||
|
||||
To implement a macro, implement "${adapter_type}__${macro_name}". in the
|
||||
@@ -70,8 +68,7 @@ class SQLAdapter(BaseAdapter):
|
||||
|
||||
@classmethod
|
||||
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
|
||||
# TODO CT-211
|
||||
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore[attr-defined]
|
||||
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore
|
||||
return "float8" if decimals else "integer"
|
||||
|
||||
@classmethod
|
||||
@@ -102,15 +99,16 @@ class SQLAdapter(BaseAdapter):
|
||||
for column_name, reference_column in reference_columns.items():
|
||||
target_column = target_columns.get(column_name)
|
||||
|
||||
if target_column is not None and target_column.can_expand_to(reference_column):
|
||||
if target_column is not None and target_column.can_expand_to(
|
||||
reference_column
|
||||
):
|
||||
col_string_size = reference_column.string_size()
|
||||
new_type = self.Column.string_type(col_string_size)
|
||||
fire_event(
|
||||
ColTypeChange(
|
||||
orig_type=target_column.data_type,
|
||||
new_type=new_type,
|
||||
table=_make_ref_key_msg(current),
|
||||
)
|
||||
logger.debug(
|
||||
"Changing col type from {} to {} in table {}",
|
||||
target_column.data_type,
|
||||
new_type,
|
||||
current,
|
||||
)
|
||||
|
||||
self.alter_column_type(current, column_name, new_type)
|
||||
@@ -131,7 +129,9 @@ class SQLAdapter(BaseAdapter):
|
||||
|
||||
def drop_relation(self, relation):
|
||||
if relation.type is None:
|
||||
raise RelationTypeNullError(relation)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
"Tried to drop relation {}, but its type is null.".format(relation)
|
||||
)
|
||||
|
||||
self.cache_dropped(relation)
|
||||
self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
|
||||
@@ -152,7 +152,7 @@ class SQLAdapter(BaseAdapter):
|
||||
|
||||
def create_schema(self, relation: BaseRelation) -> None:
|
||||
relation = relation.without_identifier()
|
||||
fire_event(SchemaCreation(relation=_make_ref_key_msg(relation)))
|
||||
logger.debug('Creating schema "{}"', relation)
|
||||
kwargs = {
|
||||
"relation": relation,
|
||||
}
|
||||
@@ -163,12 +163,11 @@ class SQLAdapter(BaseAdapter):
|
||||
|
||||
def drop_schema(self, relation: BaseRelation) -> None:
|
||||
relation = relation.without_identifier()
|
||||
fire_event(SchemaDrop(relation=_make_ref_key_msg(relation)))
|
||||
logger.debug('Dropping schema "{}".', relation)
|
||||
kwargs = {
|
||||
"relation": relation,
|
||||
}
|
||||
self.execute_macro(DROP_SCHEMA_MACRO_NAME, kwargs=kwargs)
|
||||
self.commit_if_has_connection()
|
||||
# we can update the cache here
|
||||
self.cache.drop_schema(relation.database, relation.schema)
|
||||
|
||||
@@ -201,7 +200,9 @@ class SQLAdapter(BaseAdapter):
|
||||
return '"{}"'.format(identifier)
|
||||
|
||||
def list_schemas(self, database: str) -> List[str]:
|
||||
results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
|
||||
results = self.execute_macro(
|
||||
LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database}
|
||||
)
|
||||
|
||||
return [row[0] for row in results]
|
||||
|
||||
@@ -216,25 +217,3 @@ class SQLAdapter(BaseAdapter):
|
||||
kwargs = {"information_schema": information_schema, "schema": schema}
|
||||
results = self.execute_macro(CHECK_SCHEMA_EXISTS_MACRO_NAME, kwargs=kwargs)
|
||||
return results[0][0] > 0
|
||||
|
||||
# This is for use in the test suite
|
||||
def run_sql_for_tests(self, sql, fetch, conn):
|
||||
cursor = conn.handle.cursor()
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
if hasattr(conn.handle, "commit"):
|
||||
conn.handle.commit()
|
||||
if fetch == "one":
|
||||
return cursor.fetchone()
|
||||
elif fetch == "all":
|
||||
return cursor.fetchall()
|
||||
else:
|
||||
return
|
||||
except BaseException as e:
|
||||
if conn.handle and not getattr(conn.handle, "closed", True):
|
||||
conn.handle.rollback()
|
||||
print(sql)
|
||||
print(e)
|
||||
raise
|
||||
finally:
|
||||
conn.transaction_open = False
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
TODO
|
||||
@@ -1,44 +0,0 @@
|
||||
# TODO Move this to /core/dbt/flags.py when we're ready to break things
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from multiprocessing import get_context
|
||||
from pprint import pformat as pf
|
||||
|
||||
from click import get_current_context
|
||||
|
||||
if os.name != "nt":
|
||||
# https://bugs.python.org/issue41567
|
||||
import multiprocessing.popen_spawn_posix # type: ignore # noqa: F401
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Flags:
|
||||
def __init__(self, ctx=None) -> None:
|
||||
|
||||
if ctx is None:
|
||||
ctx = get_current_context()
|
||||
|
||||
def assign_params(ctx):
|
||||
"""Recursively adds all click params to flag object"""
|
||||
for param_name, param_value in ctx.params.items():
|
||||
# N.B. You have to use the base MRO method (object.__setattr__) to set attributes
|
||||
# when using frozen dataclasses.
|
||||
# https://docs.python.org/3/library/dataclasses.html#frozen-instances
|
||||
if hasattr(self, param_name):
|
||||
raise Exception(f"Duplicate flag names found in click command: {param_name}")
|
||||
object.__setattr__(self, param_name.upper(), param_value)
|
||||
if ctx.parent:
|
||||
assign_params(ctx.parent)
|
||||
|
||||
assign_params(ctx)
|
||||
|
||||
# Hard coded flags
|
||||
object.__setattr__(self, "WHICH", ctx.info_name)
|
||||
object.__setattr__(self, "MP_CONTEXT", get_context("spawn"))
|
||||
|
||||
# Support console DO NOT TRACK initiave
|
||||
if os.getenv("DO_NOT_TRACK", "").lower() in (1, "t", "true", "y", "yes"):
|
||||
object.__setattr__(self, "ANONYMOUS_USAGE_STATS", False)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(pf(self.__dict__))
|
||||
@@ -1,412 +0,0 @@
|
||||
import inspect # This is temporary for RAT-ing
|
||||
from copy import copy
|
||||
from pprint import pformat as pf # This is temporary for RAT-ing
|
||||
|
||||
import click
|
||||
from dbt.adapters.factory import adapter_management
|
||||
from dbt.cli import params as p
|
||||
from dbt.cli.flags import Flags
|
||||
from dbt.profiler import profiler
|
||||
|
||||
|
||||
def cli_runner():
|
||||
# Alias "list" to "ls"
|
||||
ls = copy(cli.commands["list"])
|
||||
ls.hidden = True
|
||||
cli.add_command(ls, "ls")
|
||||
|
||||
# Run the cli
|
||||
cli()
|
||||
|
||||
|
||||
# dbt
|
||||
@click.group(
|
||||
context_settings={"help_option_names": ["-h", "--help"]},
|
||||
invoke_without_command=True,
|
||||
no_args_is_help=True,
|
||||
epilog="Specify one of these sub-commands and you can find more help from there.",
|
||||
)
|
||||
@click.pass_context
|
||||
@p.anonymous_usage_stats
|
||||
@p.cache_selected_only
|
||||
@p.debug
|
||||
@p.enable_legacy_logger
|
||||
@p.fail_fast
|
||||
@p.log_cache_events
|
||||
@p.log_format
|
||||
@p.macro_debugging
|
||||
@p.partial_parse
|
||||
@p.print
|
||||
@p.printer_width
|
||||
@p.quiet
|
||||
@p.record_timing_info
|
||||
@p.static_parser
|
||||
@p.use_colors
|
||||
@p.use_experimental_parser
|
||||
@p.version
|
||||
@p.version_check
|
||||
@p.warn_error
|
||||
@p.warn_error_options
|
||||
@p.write_json
|
||||
def cli(ctx, **kwargs):
|
||||
"""An ELT tool for managing your SQL transformations and data models.
|
||||
For more documentation on these commands, visit: docs.getdbt.com
|
||||
"""
|
||||
incomplete_flags = Flags()
|
||||
|
||||
# Profiling
|
||||
if incomplete_flags.RECORD_TIMING_INFO:
|
||||
ctx.with_resource(profiler(enable=True, outfile=incomplete_flags.RECORD_TIMING_INFO))
|
||||
|
||||
# Adapter management
|
||||
ctx.with_resource(adapter_management())
|
||||
|
||||
# Version info
|
||||
if incomplete_flags.VERSION:
|
||||
click.echo(f"`version` called\n ctx.params: {pf(ctx.params)}")
|
||||
return
|
||||
else:
|
||||
del ctx.params["version"]
|
||||
|
||||
|
||||
# dbt build
|
||||
@cli.command("build")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.fail_fast
|
||||
@p.full_refresh
|
||||
@p.indirect_selection
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.show
|
||||
@p.state
|
||||
@p.store_failures
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def build(ctx, **kwargs):
|
||||
"""Run all Seeds, Models, Snapshots, and tests in DAG order"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt clean
|
||||
@cli.command("clean")
|
||||
@click.pass_context
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
def clean(ctx, **kwargs):
|
||||
"""Delete all folders in the clean-targets list (usually the dbt_packages and target directories.)"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt docs
|
||||
@cli.group()
|
||||
@click.pass_context
|
||||
def docs(ctx, **kwargs):
|
||||
"""Generate or serve the documentation website for your project"""
|
||||
|
||||
|
||||
# dbt docs generate
|
||||
@docs.command("generate")
|
||||
@click.pass_context
|
||||
@p.compile_docs
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def docs_generate(ctx, **kwargs):
|
||||
"""Generate the documentation website for your project"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt docs serve
|
||||
@docs.command("serve")
|
||||
@click.pass_context
|
||||
@p.browser
|
||||
@p.port
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
def docs_serve(ctx, **kwargs):
|
||||
"""Serve the documentation website for your project"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt compile
|
||||
@cli.command("compile")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.parse_only
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def compile(ctx, **kwargs):
|
||||
"""Generates executable SQL from source, model, test, and analysis files. Compiled SQL files are written to the target/ directory."""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt debug
|
||||
@cli.command("debug")
|
||||
@click.pass_context
|
||||
@p.config_dir
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def debug(ctx, **kwargs):
|
||||
"""Show some helpful information about dbt for debugging. Not to be confused with the --debug option which increases verbosity."""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt deps
|
||||
@cli.command("deps")
|
||||
@click.pass_context
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
def deps(ctx, **kwargs):
|
||||
"""Pull the most recent version of the dependencies listed in packages.yml"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt init
|
||||
@cli.command("init")
|
||||
@click.pass_context
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.skip_profile_setup
|
||||
@p.target
|
||||
@p.vars
|
||||
def init(ctx, **kwargs):
|
||||
"""Initialize a new DBT project."""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt list
|
||||
@cli.command("list")
|
||||
@click.pass_context
|
||||
@p.exclude
|
||||
@p.indirect_selection
|
||||
@p.models
|
||||
@p.output
|
||||
@p.output_keys
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.resource_type
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.vars
|
||||
def list(ctx, **kwargs):
|
||||
"""List the resources in your project"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt parse
|
||||
@cli.command("parse")
|
||||
@click.pass_context
|
||||
@p.compile_parse
|
||||
@p.log_path
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
@p.write_manifest
|
||||
def parse(ctx, **kwargs):
|
||||
"""Parses the project and provides information on performance"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt run
|
||||
@cli.command("run")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.fail_fast
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def run(ctx, **kwargs):
|
||||
"""Compile SQL and execute against the current target database."""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt run operation
|
||||
@cli.command("run-operation")
|
||||
@click.pass_context
|
||||
@p.args
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.target
|
||||
@p.vars
|
||||
def run_operation(ctx, **kwargs):
|
||||
"""Run the named macro with any supplied arguments."""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt seed
|
||||
@cli.command("seed")
|
||||
@click.pass_context
|
||||
@p.exclude
|
||||
@p.full_refresh
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.show
|
||||
@p.state
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def seed(ctx, **kwargs):
|
||||
"""Load data from csv files into your data warehouse."""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt snapshot
|
||||
@cli.command("snapshot")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.threads
|
||||
@p.vars
|
||||
def snapshot(ctx, **kwargs):
|
||||
"""Execute snapshots defined in your project"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt source
|
||||
@cli.group()
|
||||
@click.pass_context
|
||||
def source(ctx, **kwargs):
|
||||
"""Manage your project's sources"""
|
||||
|
||||
|
||||
# dbt source freshness
|
||||
@source.command("freshness")
|
||||
@click.pass_context
|
||||
@p.exclude
|
||||
@p.models
|
||||
@p.output_path # TODO: Is this ok to re-use? We have three different output params, how much can we consolidate?
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.target
|
||||
@p.threads
|
||||
@p.vars
|
||||
def freshness(ctx, **kwargs):
|
||||
"""Snapshots the current freshness of the project's sources"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# dbt test
|
||||
@cli.command("test")
|
||||
@click.pass_context
|
||||
@p.defer
|
||||
@p.exclude
|
||||
@p.fail_fast
|
||||
@p.indirect_selection
|
||||
@p.log_path
|
||||
@p.models
|
||||
@p.profile
|
||||
@p.profiles_dir
|
||||
@p.project_dir
|
||||
@p.selector
|
||||
@p.state
|
||||
@p.store_failures
|
||||
@p.target
|
||||
@p.target_path
|
||||
@p.threads
|
||||
@p.vars
|
||||
@p.version_check
|
||||
def test(ctx, **kwargs):
|
||||
"""Runs tests on data in deployed models. Run this after `dbt run`"""
|
||||
flags = Flags()
|
||||
click.echo(f"`{inspect.stack()[0][3]}` called\n flags: {flags}")
|
||||
|
||||
|
||||
# Support running as a module
|
||||
if __name__ == "__main__":
|
||||
cli_runner()
|
||||
@@ -1,48 +0,0 @@
|
||||
from click import ParamType
|
||||
import yaml
|
||||
|
||||
from dbt.helper_types import WarnErrorOptions
|
||||
|
||||
|
||||
class YAML(ParamType):
|
||||
"""The Click YAML type. Converts YAML strings into objects."""
|
||||
|
||||
name = "YAML"
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
# assume non-string values are a problem
|
||||
if not isinstance(value, str):
|
||||
self.fail(f"Cannot load YAML from type {type(value)}", param, ctx)
|
||||
try:
|
||||
return yaml.load(value, Loader=yaml.Loader)
|
||||
except yaml.parser.ParserError:
|
||||
self.fail(f"String '{value}' is not valid YAML", param, ctx)
|
||||
|
||||
|
||||
class WarnErrorOptionsType(YAML):
|
||||
"""The Click WarnErrorOptions type. Converts YAML strings into objects."""
|
||||
|
||||
name = "WarnErrorOptionsType"
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
include_exclude = super().convert(value, param, ctx)
|
||||
|
||||
return WarnErrorOptions(
|
||||
include=include_exclude.get("include", []), exclude=include_exclude.get("exclude", [])
|
||||
)
|
||||
|
||||
|
||||
class Truthy(ParamType):
|
||||
"""The Click Truthy type. Converts strings into a "truthy" type"""
|
||||
|
||||
name = "TRUTHY"
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
# assume non-string / non-None values are a problem
|
||||
if not isinstance(value, (str, None)):
|
||||
self.fail(f"Cannot load TRUTHY from type {type(value)}", param, ctx)
|
||||
|
||||
if value is None or value.lower() in ("0", "false", "f"):
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
@@ -1,389 +0,0 @@
|
||||
from pathlib import Path, PurePath
|
||||
|
||||
import click
|
||||
from dbt.cli.option_types import YAML, WarnErrorOptionsType
|
||||
from dbt.cli.resolvers import default_project_dir, default_profiles_dir
|
||||
|
||||
|
||||
# TODO: The name (reflected in flags) is a correction!
|
||||
# The original name was `SEND_ANONYMOUS_USAGE_STATS` and used an env var called "DBT_SEND_ANONYMOUS_USAGE_STATS"
|
||||
# Both of which break existing naming conventions (doesn't match param flag).
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
anonymous_usage_stats = click.option(
|
||||
"--anonymous-usage-stats/--no-anonymous-usage-stats",
|
||||
envvar="DBT_ANONYMOUS_USAGE_STATS",
|
||||
help="Send anonymous usage stats to dbt Labs.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
args = click.option(
|
||||
"--args",
|
||||
envvar=None,
|
||||
help="Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. '{my_variable: my_value}'",
|
||||
type=YAML(),
|
||||
)
|
||||
|
||||
browser = click.option(
|
||||
"--browser/--no-browser",
|
||||
envvar=None,
|
||||
help="Wether or not to open a local web browser after starting the server",
|
||||
default=True,
|
||||
)
|
||||
|
||||
cache_selected_only = click.option(
|
||||
"--cache-selected-only/--no-cache-selected-only",
|
||||
envvar="DBT_CACHE_SELECTED_ONLY",
|
||||
help="Pre cache database objects relevant to selected resource only.",
|
||||
)
|
||||
|
||||
compile_docs = click.option(
|
||||
"--compile/--no-compile",
|
||||
envvar=None,
|
||||
help="Wether or not to run 'dbt compile' as part of docs generation",
|
||||
default=True,
|
||||
)
|
||||
|
||||
compile_parse = click.option(
|
||||
"--compile/--no-compile",
|
||||
envvar=None,
|
||||
help="TODO: No help text currently available",
|
||||
default=True,
|
||||
)
|
||||
|
||||
config_dir = click.option(
|
||||
"--config-dir",
|
||||
envvar=None,
|
||||
help="If specified, DBT will show path information for this project",
|
||||
type=click.STRING,
|
||||
)
|
||||
|
||||
debug = click.option(
|
||||
"--debug/--no-debug",
|
||||
"-d/ ",
|
||||
envvar="DBT_DEBUG",
|
||||
help="Display debug logging during dbt execution. Useful for debugging and making bug reports.",
|
||||
)
|
||||
|
||||
# TODO: The env var and name (reflected in flags) are corrections!
|
||||
# The original name was `DEFER_MODE` and used an env var called "DBT_DEFER_TO_STATE"
|
||||
# Both of which break existing naming conventions.
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
defer = click.option(
|
||||
"--defer/--no-defer",
|
||||
envvar="DBT_DEFER",
|
||||
help="If set, defer to the state variable for resolving unselected nodes.",
|
||||
)
|
||||
|
||||
enable_legacy_logger = click.option(
|
||||
"--enable-legacy-logger/--no-enable-legacy-logger",
|
||||
envvar="DBT_ENABLE_LEGACY_LOGGER",
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
exclude = click.option("--exclude", envvar=None, help="Specify the nodes to exclude.")
|
||||
|
||||
fail_fast = click.option(
|
||||
"--fail-fast/--no-fail-fast",
|
||||
"-x/ ",
|
||||
envvar="DBT_FAIL_FAST",
|
||||
help="Stop execution on first failure.",
|
||||
)
|
||||
|
||||
full_refresh = click.option(
|
||||
"--full-refresh",
|
||||
"-f",
|
||||
envvar="DBT_FULL_REFRESH",
|
||||
help="If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
indirect_selection = click.option(
|
||||
"--indirect-selection",
|
||||
envvar="DBT_INDIRECT_SELECTION",
|
||||
help="Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.",
|
||||
type=click.Choice(["eager", "cautious"], case_sensitive=False),
|
||||
default="eager",
|
||||
)
|
||||
|
||||
log_cache_events = click.option(
|
||||
"--log-cache-events/--no-log-cache-events",
|
||||
help="Enable verbose adapter cache logging.",
|
||||
envvar="DBT_LOG_CACHE_EVENTS",
|
||||
)
|
||||
|
||||
log_format = click.option(
|
||||
"--log-format",
|
||||
envvar="DBT_LOG_FORMAT",
|
||||
help="Specify the log format, overriding the command's default.",
|
||||
type=click.Choice(["text", "json", "default"], case_sensitive=False),
|
||||
default="default",
|
||||
)
|
||||
|
||||
log_path = click.option(
|
||||
"--log-path",
|
||||
envvar="DBT_LOG_PATH",
|
||||
help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.",
|
||||
type=click.Path(),
|
||||
)
|
||||
|
||||
macro_debugging = click.option(
|
||||
"--macro-debugging/--no-macro-debugging",
|
||||
envvar="DBT_MACRO_DEBUGGING",
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
models = click.option(
|
||||
"-m",
|
||||
"-s",
|
||||
"models",
|
||||
envvar=None,
|
||||
help="Specify the nodes to include.",
|
||||
multiple=True,
|
||||
)
|
||||
|
||||
output = click.option(
|
||||
"--output",
|
||||
envvar=None,
|
||||
help="TODO: No current help text",
|
||||
type=click.Choice(["json", "name", "path", "selector"], case_sensitive=False),
|
||||
default="name",
|
||||
)
|
||||
|
||||
output_keys = click.option(
|
||||
"--output-keys", envvar=None, help="TODO: No current help text", type=click.STRING
|
||||
)
|
||||
|
||||
output_path = click.option(
|
||||
"--output",
|
||||
"-o",
|
||||
envvar=None,
|
||||
help="Specify the output path for the json report. By default, outputs to 'target/sources.json'",
|
||||
type=click.Path(file_okay=True, dir_okay=False, writable=True),
|
||||
default=PurePath.joinpath(Path.cwd(), "target/sources.json"),
|
||||
)
|
||||
|
||||
parse_only = click.option(
|
||||
"--parse-only",
|
||||
envvar=None,
|
||||
help="TODO: No help text currently available",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
partial_parse = click.option(
|
||||
"--partial-parse/--no-partial-parse",
|
||||
envvar="DBT_PARTIAL_PARSE",
|
||||
help="Allow for partial parsing by looking for and writing to a pickle file in the target directory. This overrides the user configuration file.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
port = click.option(
|
||||
"--port",
|
||||
envvar=None,
|
||||
help="Specify the port number for the docs server",
|
||||
default=8080,
|
||||
type=click.INT,
|
||||
)
|
||||
|
||||
# TODO: The env var and name (reflected in flags) are corrections!
|
||||
# The original name was `NO_PRINT` and used the env var `DBT_NO_PRINT`.
|
||||
# Both of which break existing naming conventions.
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
print = click.option(
|
||||
"--print/--no-print",
|
||||
envvar="DBT_PRINT",
|
||||
help="Output all {{ print() }} macro calls.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
printer_width = click.option(
|
||||
"--printer-width",
|
||||
envvar="DBT_PRINTER_WIDTH",
|
||||
help="Sets the width of terminal output",
|
||||
type=click.INT,
|
||||
default=80,
|
||||
)
|
||||
|
||||
profile = click.option(
|
||||
"--profile",
|
||||
envvar=None,
|
||||
help="Which profile to load. Overrides setting in dbt_project.yml.",
|
||||
)
|
||||
|
||||
profiles_dir = click.option(
|
||||
"--profiles-dir",
|
||||
envvar="DBT_PROFILES_DIR",
|
||||
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
|
||||
default=default_profiles_dir(),
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
project_dir = click.option(
|
||||
"--project-dir",
|
||||
envvar=None,
|
||||
help="Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.",
|
||||
default=default_project_dir(),
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
quiet = click.option(
|
||||
"--quiet/--no-quiet",
|
||||
envvar="DBT_QUIET",
|
||||
help="Suppress all non-error logging to stdout. Does not affect {{ print() }} macro calls.",
|
||||
)
|
||||
|
||||
record_timing_info = click.option(
|
||||
"--record-timing-info",
|
||||
"-r",
|
||||
envvar=None,
|
||||
help="When this option is passed, dbt will output low-level timing stats to the specified file. Example: `--record-timing-info output.profile`",
|
||||
type=click.Path(exists=False),
|
||||
)
|
||||
|
||||
resource_type = click.option(
|
||||
"--resource-type",
|
||||
envvar=None,
|
||||
help="TODO: No current help text",
|
||||
type=click.Choice(
|
||||
[
|
||||
"metric",
|
||||
"source",
|
||||
"analysis",
|
||||
"model",
|
||||
"test",
|
||||
"exposure",
|
||||
"snapshot",
|
||||
"seed",
|
||||
"default",
|
||||
"all",
|
||||
],
|
||||
case_sensitive=False,
|
||||
),
|
||||
default="default",
|
||||
)
|
||||
|
||||
selector = click.option(
|
||||
"--selector", envvar=None, help="The selector name to use, as defined in selectors.yml"
|
||||
)
|
||||
|
||||
show = click.option(
|
||||
"--show", envvar=None, help="Show a sample of the loaded data in the terminal", is_flag=True
|
||||
)
|
||||
|
||||
skip_profile_setup = click.option(
|
||||
"--skip-profile-setup", "-s", envvar=None, help="Skip interactive profile setup.", is_flag=True
|
||||
)
|
||||
|
||||
# TODO: The env var and name (reflected in flags) are corrections!
|
||||
# The original name was `ARTIFACT_STATE_PATH` and used the env var `DBT_ARTIFACT_STATE_PATH`.
|
||||
# Both of which break existing naming conventions.
|
||||
# This will need to be fixed before use in the main codebase and communicated as a change to the community!
|
||||
state = click.option(
|
||||
"--state",
|
||||
envvar="DBT_STATE",
|
||||
help="If set, use the given directory as the source for json files to compare with this project.",
|
||||
type=click.Path(
|
||||
dir_okay=True,
|
||||
exists=True,
|
||||
file_okay=False,
|
||||
readable=True,
|
||||
resolve_path=True,
|
||||
),
|
||||
)
|
||||
|
||||
static_parser = click.option(
|
||||
"--static-parser/--no-static-parser",
|
||||
envvar="DBT_STATIC_PARSER",
|
||||
help="Use the static parser.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
store_failures = click.option(
|
||||
"--store-failures",
|
||||
envvar="DBT_STORE_FAILURES",
|
||||
help="Store test results (failing rows) in the database",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
target = click.option(
|
||||
"--target", "-t", envvar=None, help="Which target to load for the given profile"
|
||||
)
|
||||
|
||||
target_path = click.option(
|
||||
"--target-path",
|
||||
envvar="DBT_TARGET_PATH",
|
||||
help="Configure the 'target-path'. Only applies this setting for the current run. Overrides the 'DBT_TARGET_PATH' if it is set.",
|
||||
type=click.Path(),
|
||||
)
|
||||
|
||||
threads = click.option(
|
||||
"--threads",
|
||||
envvar=None,
|
||||
help="Specify number of threads to use while executing models. Overrides settings in profiles.yml.",
|
||||
default=1,
|
||||
type=click.INT,
|
||||
)
|
||||
|
||||
use_colors = click.option(
|
||||
"--use-colors/--no-use-colors",
|
||||
envvar="DBT_USE_COLORS",
|
||||
help="Output is colorized by default and may also be set in a profile or at the command line.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
use_experimental_parser = click.option(
|
||||
"--use-experimental-parser/--no-use-experimental-parser",
|
||||
envvar="DBT_USE_EXPERIMENTAL_PARSER",
|
||||
help="Enable experimental parsing features.",
|
||||
)
|
||||
|
||||
vars = click.option(
|
||||
"--vars",
|
||||
envvar=None,
|
||||
help="Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. '{my_variable: my_value}'",
|
||||
type=YAML(),
|
||||
)
|
||||
|
||||
version = click.option(
|
||||
"--version",
|
||||
envvar=None,
|
||||
help="Show version information",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
version_check = click.option(
|
||||
"--version-check/--no-version-check",
|
||||
envvar="DBT_VERSION_CHECK",
|
||||
help="Ensure dbt's version matches the one specified in the dbt_project.yml file ('require-dbt-version')",
|
||||
default=True,
|
||||
)
|
||||
|
||||
warn_error = click.option(
|
||||
"--warn-error",
|
||||
envvar="DBT_WARN_ERROR",
|
||||
help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.",
|
||||
default=None,
|
||||
flag_value=True,
|
||||
)
|
||||
|
||||
warn_error_options = click.option(
|
||||
"--warn-error-options",
|
||||
envvar="DBT_WARN_ERROR_OPTIONS",
|
||||
default=None,
|
||||
help="""If dbt would normally warn, instead raise an exception based on include/exclude configuration. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations,
|
||||
and missing sources/refs in tests. This argument should be a YAML string, with keys 'include' or 'exclude'. eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'""",
|
||||
type=WarnErrorOptionsType(),
|
||||
)
|
||||
|
||||
write_json = click.option(
|
||||
"--write-json/--no-write-json",
|
||||
envvar="DBT_WRITE_JSON",
|
||||
help="Writing the manifest and run_results.json files to disk",
|
||||
default=True,
|
||||
)
|
||||
|
||||
write_manifest = click.option(
|
||||
"--write-manifest/--no-write-manifest",
|
||||
envvar=None,
|
||||
help="TODO: No help text currently available",
|
||||
default=True,
|
||||
)
|
||||
@@ -1,11 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def default_project_dir():
|
||||
paths = list(Path.cwd().parents)
|
||||
paths.insert(0, Path.cwd())
|
||||
return next((x for x in paths if (x / "dbt_project.yml").exists()), Path.cwd())
|
||||
|
||||
|
||||
def default_profiles_dir():
|
||||
return Path.cwd() if (Path.cwd() / "profiles.yml").exists() else Path.home() / ".dbt"
|
||||
@@ -1,19 +0,0 @@
|
||||
# Clients README
|
||||
|
||||
### Jinja
|
||||
|
||||
#### How are materializations defined
|
||||
|
||||
Model materializations are kept in `core/dbt/include/global_project/macros/materializations/models/`. Materializations are defined using syntax that isn't part of the Jinja standard library. These tags are referenced internally, and materializations can be overridden in user projects when users have specific needs.
|
||||
|
||||
```
|
||||
-- Pseudocode for arguments
|
||||
{% materialization <name>, <target name := one_of{default, adapter}> %}'
|
||||
…
|
||||
{% endmaterialization %}
|
||||
```
|
||||
|
||||
These blocks are referred to Jinja extensions. Extensions are defined as part of the accepted Jinja code encapsulated within a dbt project. This includes system code used internally by dbt and user space (i.e. user-defined) macros. Extensions exist to help Jinja users create reusable code blocks or abstract objects--for us, materializations are a great use-case since we pass these around as arguments within dbt system code.
|
||||
|
||||
The code that defines this extension is a class `MaterializationExtension` and a `parse` routine. That code lives in [clients/jinja.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/clients/jinja.py). The routine
|
||||
enables Jinja to parse (i.e. recognize) the unique comma separated arg structure our `materialization` tags exhibit (the `table, default` as seen above).
|
||||
@@ -1,15 +1,7 @@
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
from dbt.exceptions import (
|
||||
BlockDefinitionNotAtTopError,
|
||||
DbtInternalError,
|
||||
MissingCloseTagError,
|
||||
MissingControlFlowStartTagError,
|
||||
NestedTagsError,
|
||||
UnexpectedControlFlowEndTagError,
|
||||
UnexpectedMacroEOFError,
|
||||
)
|
||||
import dbt.exceptions
|
||||
|
||||
|
||||
def regex(pat):
|
||||
@@ -26,7 +18,9 @@ class BlockData:
|
||||
|
||||
|
||||
class BlockTag:
|
||||
def __init__(self, block_type_name, block_name, contents=None, full_block=None, **kw):
|
||||
def __init__(
|
||||
self, block_type_name, block_name, contents=None, full_block=None, **kw
|
||||
):
|
||||
self.block_type_name = block_type_name
|
||||
self.block_name = block_name
|
||||
self.contents = contents
|
||||
@@ -62,7 +56,9 @@ _NAME_PATTERN = r"[A-Za-z_][A-Za-z_0-9]*"
|
||||
|
||||
COMMENT_START_PATTERN = regex(r"(?:(?P<comment_start>(\s*\{\#)))")
|
||||
COMMENT_END_PATTERN = regex(r"(.*?)(\s*\#\})")
|
||||
RAW_START_PATTERN = regex(r"(?:\s*\{\%\-|\{\%)\s*(?P<raw_start>(raw))\s*(?:\-\%\}\s*|\%\})")
|
||||
RAW_START_PATTERN = regex(
|
||||
r"(?:\s*\{\%\-|\{\%)\s*(?P<raw_start>(raw))\s*(?:\-\%\}\s*|\%\})"
|
||||
)
|
||||
EXPR_START_PATTERN = regex(r"(?P<expr_start>(\{\{\s*))")
|
||||
EXPR_END_PATTERN = regex(r"(?P<expr_end>(\s*\}\}))")
|
||||
|
||||
@@ -92,7 +88,9 @@ TAG_CLOSE_PATTERN = regex(r"(?:(?P<tag_close>(\-\%\}\s*|\%\})))")
|
||||
|
||||
# stolen from jinja's lexer. Note that we've consumed all prefix whitespace by
|
||||
# the time we want to use this.
|
||||
STRING_PATTERN = regex(r"(?P<string>('([^'\\]*(?:\\.[^'\\]*)*)'|" r'"([^"\\]*(?:\\.[^"\\]*)*)"))')
|
||||
STRING_PATTERN = regex(
|
||||
r"(?P<string>('([^'\\]*(?:\\.[^'\\]*)*)'|" r'"([^"\\]*(?:\\.[^"\\]*)*)"))'
|
||||
)
|
||||
|
||||
QUOTE_START_PATTERN = regex(r"""(?P<quote>(['"]))""")
|
||||
|
||||
@@ -147,7 +145,10 @@ class TagIterator:
|
||||
def _expect_match(self, expected_name, *patterns, **kwargs):
|
||||
match = self._first_match(*patterns, **kwargs)
|
||||
if match is None:
|
||||
raise UnexpectedMacroEOFError(expected_name, self.data[self.pos :])
|
||||
msg = 'unexpected EOF, expected {}, got "{}"'.format(
|
||||
expected_name, self.data[self.pos :]
|
||||
)
|
||||
dbt.exceptions.raise_compiler_error(msg)
|
||||
return match
|
||||
|
||||
def handle_expr(self, match):
|
||||
@@ -235,7 +236,10 @@ class TagIterator:
|
||||
self.advance(match.end())
|
||||
self._expect_block_close()
|
||||
return Tag(
|
||||
block_type_name=block_type_name, block_name=block_name, start=start_pos, end=self.pos
|
||||
block_type_name=block_type_name,
|
||||
block_name=block_name,
|
||||
start=start_pos,
|
||||
end=self.pos,
|
||||
)
|
||||
|
||||
def find_tags(self):
|
||||
@@ -261,7 +265,7 @@ class TagIterator:
|
||||
elif block_type_name is not None:
|
||||
yield self.handle_tag(match)
|
||||
else:
|
||||
raise DbtInternalError(
|
||||
raise dbt.exceptions.InternalException(
|
||||
"Invalid regex match in next_block, expected block start, "
|
||||
"expr start, or comment start"
|
||||
)
|
||||
@@ -270,6 +274,13 @@ class TagIterator:
|
||||
return self.find_tags()
|
||||
|
||||
|
||||
duplicate_tags = (
|
||||
"Got nested tags: {outer.block_type_name} (started at {outer.start}) did "
|
||||
"not have a matching {{% end{outer.block_type_name} %}} before a "
|
||||
"subsequent {inner.block_type_name} was found (started at {inner.start})"
|
||||
)
|
||||
|
||||
|
||||
_CONTROL_FLOW_TAGS = {
|
||||
"if": "endif",
|
||||
"for": "endfor",
|
||||
@@ -317,16 +328,41 @@ class BlockIterator:
|
||||
found = self.stack.pop()
|
||||
else:
|
||||
expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
|
||||
raise UnexpectedControlFlowEndTagError(tag, expected, self.tag_parser)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Got an unexpected control flow end tag, got {} but "
|
||||
"never saw a preceeding {} (@ {})"
|
||||
).format(
|
||||
tag.block_type_name,
|
||||
expected,
|
||||
self.tag_parser.linepos(tag.start),
|
||||
)
|
||||
)
|
||||
expected = _CONTROL_FLOW_TAGS[found]
|
||||
if expected != tag.block_type_name:
|
||||
raise MissingControlFlowStartTagError(tag, expected, self.tag_parser)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Got an unexpected control flow end tag, got {} but "
|
||||
"expected {} next (@ {})"
|
||||
).format(
|
||||
tag.block_type_name,
|
||||
expected,
|
||||
self.tag_parser.linepos(tag.start),
|
||||
)
|
||||
)
|
||||
|
||||
if tag.block_type_name in allowed_blocks:
|
||||
if self.stack:
|
||||
raise BlockDefinitionNotAtTopError(self.tag_parser, tag.start)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Got a block definition inside control flow at {}. "
|
||||
"All dbt block definitions must be at the top level"
|
||||
).format(self.tag_parser.linepos(tag.start))
|
||||
)
|
||||
if self.current is not None:
|
||||
raise NestedTagsError(outer=self.current, inner=tag)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
duplicate_tags.format(outer=self.current, inner=tag)
|
||||
)
|
||||
if collect_raw_data:
|
||||
raw_data = self.data[self.last_position : tag.start]
|
||||
self.last_position = tag.start
|
||||
@@ -347,7 +383,12 @@ class BlockIterator:
|
||||
|
||||
if self.current:
|
||||
linecount = self.data[: self.current.end].count("\n") + 1
|
||||
raise MissingCloseTagError(self.current.block_type_name, linecount)
|
||||
dbt.exceptions.raise_compiler_error(
|
||||
(
|
||||
"Reached EOF without finding a close tag for "
|
||||
"{} (searched from line {})"
|
||||
).format(self.current.block_type_name, linecount)
|
||||
)
|
||||
|
||||
if collect_raw_data:
|
||||
raw_data = self.data[self.last_position :]
|
||||
@@ -356,5 +397,7 @@ class BlockIterator:
|
||||
|
||||
def lex_for_blocks(self, allowed_blocks=None, collect_raw_data=True):
|
||||
return list(
|
||||
self.find_blocks(allowed_blocks=allowed_blocks, collect_raw_data=collect_raw_data)
|
||||
self.find_blocks(
|
||||
allowed_blocks=allowed_blocks, collect_raw_data=collect_raw_data
|
||||
)
|
||||
)
|
||||
|
||||
@@ -7,22 +7,12 @@ import json
|
||||
import dbt.utils
|
||||
from typing import Iterable, List, Dict, Union, Optional, Any
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
from dbt.exceptions import RuntimeException
|
||||
|
||||
|
||||
BOM = BOM_UTF8.decode("utf-8") # '\ufeff'
|
||||
|
||||
|
||||
class Number(agate.data_types.Number):
|
||||
# undo the change in https://github.com/wireservice/agate/pull/733
|
||||
# i.e. do not cast True and False to numeric 1 and 0
|
||||
def cast(self, d):
|
||||
if type(d) == bool:
|
||||
raise agate.exceptions.CastError("Do not cast True to 1 or False to 0.")
|
||||
else:
|
||||
return super().cast(d)
|
||||
|
||||
|
||||
class ISODateTime(agate.data_types.DateTime):
|
||||
def cast(self, d):
|
||||
# this is agate.data_types.DateTime.cast with the "clever" bits removed
|
||||
@@ -43,21 +33,20 @@ class ISODateTime(agate.data_types.DateTime):
|
||||
raise agate.exceptions.CastError('Can not parse value "%s" as datetime.' % d)
|
||||
|
||||
|
||||
def build_type_tester(
|
||||
text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = ("null", "")
|
||||
) -> agate.TypeTester:
|
||||
|
||||
def build_type_tester(text_columns: Iterable[str]) -> agate.TypeTester:
|
||||
types = [
|
||||
Number(null_values=("null", "")),
|
||||
agate.data_types.Number(null_values=("null", "")),
|
||||
agate.data_types.Date(null_values=("null", ""), date_format="%Y-%m-%d"),
|
||||
agate.data_types.DateTime(null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"),
|
||||
agate.data_types.DateTime(
|
||||
null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"
|
||||
),
|
||||
ISODateTime(null_values=("null", "")),
|
||||
agate.data_types.Boolean(
|
||||
true_values=("true",), false_values=("false",), null_values=("null", "")
|
||||
),
|
||||
agate.data_types.Text(null_values=string_null_values),
|
||||
agate.data_types.Text(null_values=("null", "")),
|
||||
]
|
||||
force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns}
|
||||
force = {k: agate.data_types.Text(null_values=("null", "")) for k in text_columns}
|
||||
return agate.TypeTester(force=force, types=types)
|
||||
|
||||
|
||||
@@ -72,15 +61,12 @@ def table_from_rows(
|
||||
if text_only_columns is None:
|
||||
column_types = DEFAULT_TYPE_TESTER
|
||||
else:
|
||||
# If text_only_columns are present, prevent coercing empty string or
|
||||
# literal 'null' strings to a None representation.
|
||||
column_types = build_type_tester(text_only_columns, string_null_values=())
|
||||
|
||||
column_types = build_type_tester(text_only_columns)
|
||||
return agate.Table(rows, column_names, column_types=column_types)
|
||||
|
||||
|
||||
def table_from_data(data, column_names: Iterable[str]) -> agate.Table:
|
||||
"Convert a list of dictionaries into an Agate table"
|
||||
"Convert list of dictionaries into an Agate table"
|
||||
|
||||
# The agate table is generated from a list of dicts, so the column order
|
||||
# from `data` is not preserved. We can use `select` to reorder the columns
|
||||
@@ -95,32 +81,19 @@ def table_from_data(data, column_names: Iterable[str]) -> agate.Table:
|
||||
|
||||
|
||||
def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:
|
||||
"""
|
||||
Convert a list of dictionaries into an Agate table. This method does not
|
||||
coerce string values into more specific types (eg. '005' will not be
|
||||
coerced to '5'). Additionally, this method does not coerce values to
|
||||
None (eg. '' or 'null' will retain their string literal representations).
|
||||
"""
|
||||
"Convert list of dictionaries into an Agate table"
|
||||
|
||||
rows = []
|
||||
text_only_columns = set()
|
||||
for _row in data:
|
||||
row = []
|
||||
for col_name in column_names:
|
||||
value = _row[col_name]
|
||||
for value in list(_row.values()):
|
||||
if isinstance(value, (dict, list, tuple)):
|
||||
# Represent container types as json strings
|
||||
value = json.dumps(value, cls=dbt.utils.JSONEncoder)
|
||||
text_only_columns.add(col_name)
|
||||
elif isinstance(value, str):
|
||||
text_only_columns.add(col_name)
|
||||
row.append(value)
|
||||
|
||||
row.append(json.dumps(value, cls=dbt.utils.JSONEncoder))
|
||||
else:
|
||||
row.append(value)
|
||||
rows.append(row)
|
||||
|
||||
return table_from_rows(
|
||||
rows=rows, column_names=column_names, text_only_columns=text_only_columns
|
||||
)
|
||||
return table_from_rows(rows=rows, column_names=column_names)
|
||||
|
||||
|
||||
def empty_table():
|
||||
@@ -168,7 +141,7 @@ class ColumnTypeBuilder(Dict[str, NullableAgateType]):
|
||||
return
|
||||
elif not isinstance(value, type(existing_type)):
|
||||
# actual type mismatch!
|
||||
raise DbtRuntimeError(
|
||||
raise RuntimeException(
|
||||
f"Tables contain columns with the same names ({key}), "
|
||||
f"but different types ({value} vs {existing_type})"
|
||||
)
|
||||
@@ -184,7 +157,9 @@ class ColumnTypeBuilder(Dict[str, NullableAgateType]):
|
||||
return result
|
||||
|
||||
|
||||
def _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]:
|
||||
def _merged_column_types(
|
||||
tables: List[agate.Table],
|
||||
) -> Dict[str, agate.data_types.DataType]:
|
||||
# this is a lot like agate.Table.merge, but with handling for all-null
|
||||
# rows being "any type".
|
||||
new_columns: ColumnTypeBuilder = ColumnTypeBuilder()
|
||||
|
||||
26
core/dbt/clients/gcloud.py
Normal file
26
core/dbt/clients/gcloud.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
import dbt.exceptions
|
||||
from dbt.clients.system import run_cmd
|
||||
|
||||
NOT_INSTALLED_MSG = """
|
||||
dbt requires the gcloud SDK to be installed to authenticate with BigQuery.
|
||||
Please download and install the SDK, or use a Service Account instead.
|
||||
|
||||
https://cloud.google.com/sdk/
|
||||
"""
|
||||
|
||||
|
||||
def gcloud_installed():
|
||||
try:
|
||||
run_cmd(".", ["gcloud", "--version"])
|
||||
return True
|
||||
except OSError as e:
|
||||
logger.debug(e)
|
||||
return False
|
||||
|
||||
|
||||
def setup_default_credentials():
|
||||
if gcloud_installed():
|
||||
run_cmd(".", ["gcloud", "auth", "application-default", "login"])
|
||||
else:
|
||||
raise dbt.exceptions.RuntimeException(NOT_INSTALLED_MSG)
|
||||
@@ -2,67 +2,22 @@ import re
|
||||
import os.path
|
||||
|
||||
from dbt.clients.system import run_cmd, rmdir
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
GitSparseCheckoutSubdirectory,
|
||||
GitProgressCheckoutRevision,
|
||||
GitProgressUpdatingExistingDependency,
|
||||
GitProgressPullingNewDependency,
|
||||
GitNothingToDo,
|
||||
GitProgressUpdatedCheckoutRange,
|
||||
GitProgressCheckedOutAt,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
CommandResultError,
|
||||
GitCheckoutError,
|
||||
GitCloningError,
|
||||
UnknownGitCloningProblemError,
|
||||
DbtRuntimeError,
|
||||
)
|
||||
from packaging import version
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
import dbt.exceptions
|
||||
|
||||
|
||||
def _is_commit(revision: str) -> bool:
|
||||
# match SHA-1 git commit
|
||||
return bool(re.match(r"\b[0-9a-f]{40}\b", revision))
|
||||
|
||||
|
||||
def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None):
|
||||
has_revision = revision is not None
|
||||
is_commit = _is_commit(revision or "")
|
||||
|
||||
def clone(repo, cwd, dirname=None, remove_git_dir=False, branch=None):
|
||||
clone_cmd = ["git", "clone", "--depth", "1"]
|
||||
if subdirectory:
|
||||
fire_event(GitSparseCheckoutSubdirectory(subdir=subdirectory))
|
||||
out, _ = run_cmd(cwd, ["git", "--version"], env={"LC_ALL": "C"})
|
||||
git_version = version.parse(re.search(r"\d+\.\d+\.\d+", out.decode("utf-8")).group(0))
|
||||
if not git_version >= version.parse("2.25.0"):
|
||||
# 2.25.0 introduces --sparse
|
||||
raise RuntimeError(
|
||||
"Please update your git version to pull a dbt package "
|
||||
"from a subdirectory: your version is {}, >= 2.25.0 needed".format(git_version)
|
||||
)
|
||||
clone_cmd.extend(["--filter=blob:none", "--sparse"])
|
||||
|
||||
if has_revision and not is_commit:
|
||||
clone_cmd.extend(["--branch", revision])
|
||||
if branch is not None:
|
||||
clone_cmd.extend(["--branch", branch])
|
||||
|
||||
clone_cmd.append(repo)
|
||||
|
||||
if dirname is not None:
|
||||
clone_cmd.append(dirname)
|
||||
try:
|
||||
result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"})
|
||||
except CommandResultError as exc:
|
||||
raise GitCloningError(repo, revision, exc)
|
||||
|
||||
if subdirectory:
|
||||
cwd_subdir = os.path.join(cwd, dirname or "")
|
||||
clone_cmd_subdir = ["git", "sparse-checkout", "set", subdirectory]
|
||||
try:
|
||||
run_cmd(cwd_subdir, clone_cmd_subdir)
|
||||
except CommandResultError as exc:
|
||||
raise GitCloningError(repo, revision, exc)
|
||||
result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"})
|
||||
|
||||
if remove_git_dir:
|
||||
rmdir(os.path.join(dirname, ".git"))
|
||||
@@ -76,36 +31,32 @@ def list_tags(cwd):
|
||||
return tags
|
||||
|
||||
|
||||
def _checkout(cwd, repo, revision):
|
||||
fire_event(GitProgressCheckoutRevision(revision=revision))
|
||||
def _checkout(cwd, repo, branch):
|
||||
logger.debug(" Checking out branch {}.".format(branch))
|
||||
|
||||
fetch_cmd = ["git", "fetch", "origin", "--depth", "1"]
|
||||
run_cmd(cwd, ["git", "remote", "set-branches", "origin", branch])
|
||||
run_cmd(cwd, ["git", "fetch", "--tags", "--depth", "1", "origin", branch])
|
||||
|
||||
if _is_commit(revision):
|
||||
run_cmd(cwd, fetch_cmd + [revision])
|
||||
else:
|
||||
run_cmd(cwd, ["git", "remote", "set-branches", "origin", revision])
|
||||
run_cmd(cwd, fetch_cmd + ["--tags", revision])
|
||||
tags = list_tags(cwd)
|
||||
|
||||
if _is_commit(revision):
|
||||
spec = revision
|
||||
# Prefer tags to branches if one exists
|
||||
elif revision in list_tags(cwd):
|
||||
spec = "tags/{}".format(revision)
|
||||
if branch in tags:
|
||||
spec = "tags/{}".format(branch)
|
||||
else:
|
||||
spec = "origin/{}".format(revision)
|
||||
spec = "origin/{}".format(branch)
|
||||
|
||||
out, err = run_cmd(cwd, ["git", "reset", "--hard", spec], env={"LC_ALL": "C"})
|
||||
return out, err
|
||||
|
||||
|
||||
def checkout(cwd, repo, revision=None):
|
||||
if revision is None:
|
||||
revision = "HEAD"
|
||||
def checkout(cwd, repo, branch=None):
|
||||
if branch is None:
|
||||
branch = "HEAD"
|
||||
try:
|
||||
return _checkout(cwd, repo, revision)
|
||||
except CommandResultError as exc:
|
||||
raise GitCheckoutError(repo=repo, revision=revision, error=exc)
|
||||
return _checkout(cwd, repo, branch)
|
||||
except dbt.exceptions.CommandResultError as exc:
|
||||
stderr = exc.stderr.decode("utf-8").strip()
|
||||
dbt.exceptions.bad_package_spec(repo, branch, stderr)
|
||||
|
||||
|
||||
def get_current_sha(cwd):
|
||||
@@ -118,46 +69,40 @@ def remove_remote(cwd):
|
||||
return run_cmd(cwd, ["git", "remote", "rm", "origin"], env={"LC_ALL": "C"})
|
||||
|
||||
|
||||
def clone_and_checkout(
|
||||
repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None
|
||||
):
|
||||
def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False, branch=None):
|
||||
exists = None
|
||||
try:
|
||||
_, err = clone(
|
||||
repo,
|
||||
cwd,
|
||||
dirname=dirname,
|
||||
remove_git_dir=remove_git_dir,
|
||||
subdirectory=subdirectory,
|
||||
)
|
||||
except CommandResultError as exc:
|
||||
err = exc.stderr
|
||||
_, err = clone(repo, cwd, dirname=dirname, remove_git_dir=remove_git_dir)
|
||||
except dbt.exceptions.CommandResultError as exc:
|
||||
err = exc.stderr.decode("utf-8")
|
||||
exists = re.match("fatal: destination path '(.+)' already exists", err)
|
||||
if not exists:
|
||||
raise UnknownGitCloningProblemError(repo)
|
||||
if not exists: # something else is wrong, raise it
|
||||
raise
|
||||
|
||||
directory = None
|
||||
start_sha = None
|
||||
if exists:
|
||||
directory = exists.group(1)
|
||||
fire_event(GitProgressUpdatingExistingDependency(dir=directory))
|
||||
logger.debug("Updating existing dependency {}.", directory)
|
||||
else:
|
||||
matches = re.match("Cloning into '(.+)'", err.decode("utf-8"))
|
||||
if matches is None:
|
||||
raise DbtRuntimeError(f'Error cloning {repo} - never saw "Cloning into ..." from git')
|
||||
raise dbt.exceptions.RuntimeException(
|
||||
f'Error cloning {repo} - never saw "Cloning into ..." from git'
|
||||
)
|
||||
directory = matches.group(1)
|
||||
fire_event(GitProgressPullingNewDependency(dir=directory))
|
||||
logger.debug("Pulling new dependency {}.", directory)
|
||||
full_path = os.path.join(cwd, directory)
|
||||
start_sha = get_current_sha(full_path)
|
||||
checkout(full_path, repo, revision)
|
||||
checkout(full_path, repo, branch)
|
||||
end_sha = get_current_sha(full_path)
|
||||
if exists:
|
||||
if start_sha == end_sha:
|
||||
fire_event(GitNothingToDo(sha=start_sha[:7]))
|
||||
logger.debug(" Already at {}, nothing to do.", start_sha[:7])
|
||||
else:
|
||||
fire_event(
|
||||
GitProgressUpdatedCheckoutRange(start_sha=start_sha[:7], end_sha=end_sha[:7])
|
||||
logger.debug(
|
||||
" Updated checkout from {} to {}.", start_sha[:7], end_sha[:7]
|
||||
)
|
||||
else:
|
||||
fire_event(GitProgressCheckedOutAt(end_sha=end_sha[:7]))
|
||||
return os.path.join(directory, subdirectory or "")
|
||||
logger.debug(" Checked out at {}.", end_sha[:7])
|
||||
return directory
|
||||
|
||||
@@ -7,7 +7,19 @@ import threading
|
||||
from ast import literal_eval
|
||||
from contextlib import contextmanager
|
||||
from itertools import chain, islice
|
||||
from typing import List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple, Callable
|
||||
from typing import (
|
||||
List,
|
||||
Union,
|
||||
Set,
|
||||
Optional,
|
||||
Dict,
|
||||
Any,
|
||||
Iterator,
|
||||
Type,
|
||||
NoReturn,
|
||||
Tuple,
|
||||
Callable,
|
||||
)
|
||||
|
||||
import jinja2
|
||||
import jinja2.ext
|
||||
@@ -20,31 +32,22 @@ from dbt.utils import (
|
||||
get_dbt_macro_name,
|
||||
get_docs_macro_name,
|
||||
get_materialization_macro_name,
|
||||
get_test_macro_name,
|
||||
deep_map_render,
|
||||
deep_map,
|
||||
)
|
||||
|
||||
from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag
|
||||
from dbt.contracts.graph.nodes import GenericTestNode
|
||||
|
||||
from dbt.contracts.graph.compiled import CompiledSchemaTestNode
|
||||
from dbt.contracts.graph.parsed import ParsedSchemaTestNode
|
||||
from dbt.exceptions import (
|
||||
CaughtMacroError,
|
||||
CaughtMacroErrorWithNodeError,
|
||||
CompilationError,
|
||||
DbtInternalError,
|
||||
MaterializationArgError,
|
||||
JinjaRenderingError,
|
||||
InternalException,
|
||||
raise_compiler_error,
|
||||
CompilationException,
|
||||
invalid_materialization_argument,
|
||||
MacroReturn,
|
||||
MaterializtionMacroNotUsedError,
|
||||
NoSupportedLanguagesFoundError,
|
||||
UndefinedCompilationError,
|
||||
UndefinedMacroError,
|
||||
JinjaRenderingException,
|
||||
)
|
||||
from dbt import flags
|
||||
from dbt.node_types import ModelLanguage
|
||||
|
||||
|
||||
SUPPORTED_LANG_ARG = jinja2.nodes.Name("supported_languages", "param")
|
||||
from dbt.logger import GLOBAL_LOGGER as logger # noqa
|
||||
|
||||
|
||||
def _linecache_inject(source, write):
|
||||
@@ -67,7 +70,12 @@ def _linecache_inject(source, write):
|
||||
filename = rnd.decode("ascii")
|
||||
|
||||
# put ourselves in the cache
|
||||
cache_entry = (len(source), None, [line + "\n" for line in source.splitlines()], filename)
|
||||
cache_entry = (
|
||||
len(source),
|
||||
None,
|
||||
[line + "\n" for line in source.splitlines()],
|
||||
filename,
|
||||
)
|
||||
# linecache does in fact have an attribute `cache`, thanks
|
||||
linecache.cache[filename] = cache_entry # type: ignore
|
||||
return filename
|
||||
@@ -111,7 +119,7 @@ class NativeSandboxEnvironment(MacroFuzzEnvironment):
|
||||
|
||||
|
||||
class TextMarker(str):
|
||||
"""A special native-env marker that indicates a value is text and is
|
||||
"""A special native-env marker that indicates that a value is text and is
|
||||
not to be evaluated. Use this to prevent your numbery-strings from becoming
|
||||
numbers!
|
||||
"""
|
||||
@@ -161,15 +169,19 @@ def quoted_native_concat(nodes):
|
||||
except (ValueError, SyntaxError, MemoryError):
|
||||
result = raw
|
||||
if isinstance(raw, BoolMarker) and not isinstance(result, bool):
|
||||
raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'bool'")
|
||||
raise JinjaRenderingException(
|
||||
f"Could not convert value '{raw!s}' into type 'bool'"
|
||||
)
|
||||
if isinstance(raw, NumberMarker) and not _is_number(result):
|
||||
raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'number'")
|
||||
raise JinjaRenderingException(
|
||||
f"Could not convert value '{raw!s}' into type 'number'"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class NativeSandboxTemplate(jinja2.nativetypes.NativeTemplate): # mypy: ignore
|
||||
environment_class = NativeSandboxEnvironment # type: ignore
|
||||
environment_class = NativeSandboxEnvironment
|
||||
|
||||
def render(self, *args, **kwargs):
|
||||
"""Render the template to produce a native Python type. If the
|
||||
@@ -241,12 +253,12 @@ class BaseMacroGenerator:
|
||||
try:
|
||||
yield
|
||||
except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:
|
||||
raise CaughtMacroError(e)
|
||||
raise_compiler_error(str(e))
|
||||
|
||||
def call_macro(self, *args, **kwargs):
|
||||
# called from __call__ methods
|
||||
if self.context is None:
|
||||
raise DbtInternalError("Context is still None in call_macro!")
|
||||
raise InternalException("Context is still None in call_macro!")
|
||||
assert self.context is not None
|
||||
|
||||
macro = self.get_macro()
|
||||
@@ -273,7 +285,7 @@ class MacroStack(threading.local):
|
||||
def pop(self, name):
|
||||
got = self.call_stack.pop()
|
||||
if got != name:
|
||||
raise DbtInternalError(f"popped {got}, expected {name}")
|
||||
raise InternalException(f"popped {got}, expected {name}")
|
||||
|
||||
|
||||
class MacroGenerator(BaseMacroGenerator):
|
||||
@@ -300,8 +312,8 @@ class MacroGenerator(BaseMacroGenerator):
|
||||
try:
|
||||
yield
|
||||
except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e:
|
||||
raise CaughtMacroErrorWithNodeError(exc=e, node=self.macro)
|
||||
except CompilationError as e:
|
||||
raise_compiler_error(str(e), self.macro)
|
||||
except CompilationException as e:
|
||||
e.stack.append(self.macro)
|
||||
raise e
|
||||
|
||||
@@ -309,13 +321,13 @@ class MacroGenerator(BaseMacroGenerator):
|
||||
@contextmanager
|
||||
def track_call(self):
|
||||
# This is only called from __call__
|
||||
if self.stack is None:
|
||||
if self.stack is None or self.node is None:
|
||||
yield
|
||||
else:
|
||||
unique_id = self.macro.unique_id
|
||||
depth = self.stack.depth
|
||||
# only mark depth=0 as a dependency, when creating this dependency we don't pass in stack
|
||||
if depth == 0 and self.node:
|
||||
# only mark depth=0 as a dependency
|
||||
if depth == 0:
|
||||
self.node.depends_on.add_macro(unique_id)
|
||||
self.stack.push(unique_id)
|
||||
try:
|
||||
@@ -372,23 +384,14 @@ class MaterializationExtension(jinja2.ext.Extension):
|
||||
value = parser.parse_expression()
|
||||
adapter_name = value.value
|
||||
|
||||
elif target.name == "supported_languages":
|
||||
target.set_ctx("param")
|
||||
node.args.append(target)
|
||||
parser.stream.expect("assign")
|
||||
languages = parser.parse_expression()
|
||||
node.defaults.append(languages)
|
||||
|
||||
else:
|
||||
raise MaterializationArgError(materialization_name, target.name)
|
||||
|
||||
if SUPPORTED_LANG_ARG not in node.args:
|
||||
node.args.append(SUPPORTED_LANG_ARG)
|
||||
node.defaults.append(jinja2.nodes.List([jinja2.nodes.Const("sql")]))
|
||||
invalid_materialization_argument(materialization_name, target.name)
|
||||
|
||||
node.name = get_materialization_macro_name(materialization_name, adapter_name)
|
||||
|
||||
node.body = parser.parse_statements(("name:endmaterialization",), drop_needle=True)
|
||||
node.body = parser.parse_statements(
|
||||
("name:endmaterialization",), drop_needle=True
|
||||
)
|
||||
|
||||
return node
|
||||
|
||||
@@ -407,19 +410,6 @@ class DocumentationExtension(jinja2.ext.Extension):
|
||||
return node
|
||||
|
||||
|
||||
class TestExtension(jinja2.ext.Extension):
|
||||
tags = ["test"]
|
||||
|
||||
def parse(self, parser):
|
||||
node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
|
||||
test_name = parser.parse_assign_target(name_only=True).name
|
||||
|
||||
parser.parse_signature(node)
|
||||
node.name = get_test_macro_name(test_name)
|
||||
node.body = parser.parse_statements(("name:endtest",), drop_needle=True)
|
||||
return node
|
||||
|
||||
|
||||
def _is_dunder_name(name):
|
||||
return name.startswith("__") and name.endswith("__")
|
||||
|
||||
@@ -444,7 +434,9 @@ def create_undefined(node=None):
|
||||
def __getattr__(self, name):
|
||||
if name == "name" or _is_dunder_name(name):
|
||||
raise AttributeError(
|
||||
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
|
||||
"'{}' object has no attribute '{}'".format(
|
||||
type(self).__name__, name
|
||||
)
|
||||
)
|
||||
|
||||
self.name = name
|
||||
@@ -455,7 +447,7 @@ def create_undefined(node=None):
|
||||
return self
|
||||
|
||||
def __reduce__(self):
|
||||
raise UndefinedCompilationError(name=self.name, node=node)
|
||||
raise_compiler_error(f"{self.name} is undefined", node=node)
|
||||
|
||||
return Undefined
|
||||
|
||||
@@ -490,7 +482,6 @@ def get_environment(
|
||||
|
||||
args["extensions"].append(MaterializationExtension)
|
||||
args["extensions"].append(DocumentationExtension)
|
||||
args["extensions"].append(TestExtension)
|
||||
|
||||
env_cls: Type[jinja2.Environment]
|
||||
text_filter: Type
|
||||
@@ -513,10 +504,10 @@ def catch_jinja(node=None) -> Iterator[None]:
|
||||
yield
|
||||
except jinja2.exceptions.TemplateSyntaxError as e:
|
||||
e.translated = False
|
||||
raise CompilationError(str(e), node) from e
|
||||
raise CompilationException(str(e), node) from e
|
||||
except jinja2.exceptions.UndefinedError as e:
|
||||
raise UndefinedMacroError(str(e), node) from e
|
||||
except CompilationError as exc:
|
||||
raise CompilationException(str(e), node) from e
|
||||
except CompilationException as exc:
|
||||
exc.add_node(node)
|
||||
raise
|
||||
|
||||
@@ -578,7 +569,11 @@ def get_rendered(
|
||||
# If this is desirable in the native env as well, we could handle the
|
||||
# native=True case by passing the input string to ast.literal_eval, like
|
||||
# the native renderer does.
|
||||
if not native and isinstance(string, str) and _HAS_RENDER_CHARS_PAT.search(string) is None:
|
||||
if (
|
||||
not native
|
||||
and isinstance(string, str)
|
||||
and _HAS_RENDER_CHARS_PAT.search(string) is None
|
||||
):
|
||||
return string
|
||||
template = get_template(
|
||||
string,
|
||||
@@ -599,7 +594,7 @@ def extract_toplevel_blocks(
|
||||
allowed_blocks: Optional[Set[str]] = None,
|
||||
collect_raw_data: bool = True,
|
||||
) -> List[Union[BlockData, BlockTag]]:
|
||||
"""Extract the top-level blocks with matching block types from a jinja
|
||||
"""Extract the top level blocks with matching block types from a jinja
|
||||
file, with some special handling for block nesting.
|
||||
|
||||
:param data: The data to extract blocks from.
|
||||
@@ -618,12 +613,12 @@ def extract_toplevel_blocks(
|
||||
)
|
||||
|
||||
|
||||
GENERIC_TEST_KWARGS_NAME = "_dbt_generic_test_kwargs"
|
||||
SCHEMA_TEST_KWARGS_NAME = "_dbt_schema_test_kwargs"
|
||||
|
||||
|
||||
def add_rendered_test_kwargs(
|
||||
context: Dict[str, Any],
|
||||
node: GenericTestNode,
|
||||
node: Union[ParsedSchemaTestNode, CompiledSchemaTestNode],
|
||||
capture_macros: bool = False,
|
||||
) -> None:
|
||||
"""Render each of the test kwargs in the given context using the native
|
||||
@@ -643,29 +638,11 @@ def add_rendered_test_kwargs(
|
||||
# curly braces to make rendering happy
|
||||
value = f"{{{{ {value} }}}}"
|
||||
|
||||
value = get_rendered(value, context, node, capture_macros=capture_macros, native=True)
|
||||
value = get_rendered(
|
||||
value, context, node, capture_macros=capture_macros, native=True
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
# The test_metadata.kwargs come from the test builder, and were set
|
||||
# when the test node was created in _parse_generic_test.
|
||||
kwargs = deep_map_render(_convert_function, node.test_metadata.kwargs)
|
||||
context[GENERIC_TEST_KWARGS_NAME] = kwargs
|
||||
|
||||
|
||||
def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]:
|
||||
if "materialization" not in node.name:
|
||||
raise MaterializtionMacroNotUsedError(node=node)
|
||||
|
||||
no_kwargs = not node.defaults
|
||||
no_langs_found = SUPPORTED_LANG_ARG not in node.args
|
||||
|
||||
if no_kwargs or no_langs_found:
|
||||
raise NoSupportedLanguagesFoundError(node=node)
|
||||
|
||||
lang_idx = node.args.index(SUPPORTED_LANG_ARG)
|
||||
# indexing defaults from the end
|
||||
# since supported_languages is a kwarg, and kwargs are at always after args
|
||||
return [
|
||||
ModelLanguage[item.value] for item in node.defaults[-(len(node.args) - lang_idx)].items
|
||||
]
|
||||
kwargs = deep_map(_convert_function, node.test_metadata.kwargs)
|
||||
context[SCHEMA_TEST_KWARGS_NAME] = kwargs
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
import jinja2
|
||||
from dbt.clients.jinja import get_environment
|
||||
from dbt.exceptions import MacroNamespaceNotStringError, MacroNameNotStringError
|
||||
|
||||
|
||||
def statically_extract_macro_calls(string, ctx, db_wrapper=None):
|
||||
# set 'capture_macros' to capture undefined
|
||||
env = get_environment(None, capture_macros=True)
|
||||
parsed = env.parse(string)
|
||||
|
||||
standard_calls = ["source", "ref", "config"]
|
||||
possible_macro_calls = []
|
||||
for func_call in parsed.find_all(jinja2.nodes.Call):
|
||||
func_name = None
|
||||
if hasattr(func_call, "node") and hasattr(func_call.node, "name"):
|
||||
func_name = func_call.node.name
|
||||
else:
|
||||
# func_call for dbt.current_timestamp macro
|
||||
# Call(
|
||||
# node=Getattr(
|
||||
# node=Name(
|
||||
# name='dbt_utils',
|
||||
# ctx='load'
|
||||
# ),
|
||||
# attr='current_timestamp',
|
||||
# ctx='load
|
||||
# ),
|
||||
# args=[],
|
||||
# kwargs=[],
|
||||
# dyn_args=None,
|
||||
# dyn_kwargs=None
|
||||
# )
|
||||
if (
|
||||
hasattr(func_call, "node")
|
||||
and hasattr(func_call.node, "node")
|
||||
and type(func_call.node.node).__name__ == "Name"
|
||||
and hasattr(func_call.node, "attr")
|
||||
):
|
||||
package_name = func_call.node.node.name
|
||||
macro_name = func_call.node.attr
|
||||
if package_name == "adapter":
|
||||
if macro_name == "dispatch":
|
||||
ad_macro_calls = statically_parse_adapter_dispatch(
|
||||
func_call, ctx, db_wrapper
|
||||
)
|
||||
possible_macro_calls.extend(ad_macro_calls)
|
||||
else:
|
||||
# This skips calls such as adapter.parse_index
|
||||
continue
|
||||
else:
|
||||
func_name = f"{package_name}.{macro_name}"
|
||||
else:
|
||||
continue
|
||||
if not func_name:
|
||||
continue
|
||||
if func_name in standard_calls:
|
||||
continue
|
||||
elif ctx.get(func_name):
|
||||
continue
|
||||
else:
|
||||
if func_name not in possible_macro_calls:
|
||||
possible_macro_calls.append(func_name)
|
||||
|
||||
return possible_macro_calls
|
||||
|
||||
|
||||
# Call(
|
||||
# node=Getattr(
|
||||
# node=Name(
|
||||
# name='adapter',
|
||||
# ctx='load'
|
||||
# ),
|
||||
# attr='dispatch',
|
||||
# ctx='load'
|
||||
# ),
|
||||
# args=[
|
||||
# Const(value='test_pkg_and_dispatch')
|
||||
# ],
|
||||
# kwargs=[
|
||||
# Keyword(
|
||||
# key='packages',
|
||||
# value=Call(node=Getattr(node=Name(name='local_utils', ctx='load'),
|
||||
# attr='_get_utils_namespaces', ctx='load'), args=[], kwargs=[],
|
||||
# dyn_args=None, dyn_kwargs=None)
|
||||
# )
|
||||
# ],
|
||||
# dyn_args=None,
|
||||
# dyn_kwargs=None
|
||||
# )
|
||||
def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper):
|
||||
possible_macro_calls = []
|
||||
# This captures an adapter.dispatch('<macro_name>') call.
|
||||
|
||||
func_name = None
|
||||
# macro_name positional argument
|
||||
if len(func_call.args) > 0:
|
||||
func_name = func_call.args[0].value
|
||||
if func_name:
|
||||
possible_macro_calls.append(func_name)
|
||||
|
||||
# packages positional argument
|
||||
macro_namespace = None
|
||||
packages_arg = None
|
||||
packages_arg_type = None
|
||||
|
||||
if len(func_call.args) > 1:
|
||||
packages_arg = func_call.args[1]
|
||||
# This can be a List or a Call
|
||||
packages_arg_type = type(func_call.args[1]).__name__
|
||||
|
||||
# keyword arguments
|
||||
if func_call.kwargs:
|
||||
for kwarg in func_call.kwargs:
|
||||
if kwarg.key == "macro_name":
|
||||
# This will remain to enable static resolution
|
||||
if type(kwarg.value).__name__ == "Const":
|
||||
func_name = kwarg.value.value
|
||||
possible_macro_calls.append(func_name)
|
||||
else:
|
||||
raise MacroNameNotStringError(kwarg_value=kwarg.value.value)
|
||||
elif kwarg.key == "macro_namespace":
|
||||
# This will remain to enable static resolution
|
||||
kwarg_type = type(kwarg.value).__name__
|
||||
if kwarg_type == "Const":
|
||||
macro_namespace = kwarg.value.value
|
||||
else:
|
||||
raise MacroNamespaceNotStringError(kwarg_type)
|
||||
|
||||
# positional arguments
|
||||
if packages_arg:
|
||||
if packages_arg_type == "List":
|
||||
# This will remain to enable static resolution
|
||||
packages = []
|
||||
for item in packages_arg.items:
|
||||
packages.append(item.value)
|
||||
elif packages_arg_type == "Const":
|
||||
# This will remain to enable static resolution
|
||||
macro_namespace = packages_arg.value
|
||||
|
||||
if db_wrapper:
|
||||
macro = db_wrapper.dispatch(func_name, macro_namespace=macro_namespace).macro
|
||||
func_name = f"{macro.package_name}.{macro.name}"
|
||||
possible_macro_calls.append(func_name)
|
||||
else: # this is only for test/unit/test_macro_calls.py
|
||||
if macro_namespace:
|
||||
packages = [macro_namespace]
|
||||
else:
|
||||
packages = []
|
||||
for package_name in packages:
|
||||
possible_macro_calls.append(f"{package_name}.{func_name}")
|
||||
|
||||
return possible_macro_calls
|
||||
@@ -1,21 +1,10 @@
|
||||
import functools
|
||||
from typing import Any, Dict, List
|
||||
from functools import wraps
|
||||
import requests
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
RegistryProgressGETRequest,
|
||||
RegistryProgressGETResponse,
|
||||
RegistryIndexProgressGETRequest,
|
||||
RegistryIndexProgressGETResponse,
|
||||
RegistryResponseUnexpectedType,
|
||||
RegistryResponseMissingTopKeys,
|
||||
RegistryResponseMissingNestedKeys,
|
||||
RegistryResponseExtraNestedKeys,
|
||||
)
|
||||
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
|
||||
from dbt import deprecations
|
||||
from dbt import semver
|
||||
from dbt.exceptions import RegistryException
|
||||
from dbt.utils import memoized
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
import os
|
||||
import time
|
||||
|
||||
if os.getenv("DBT_PACKAGE_HUB_URL"):
|
||||
DEFAULT_REGISTRY_BASE_URL = os.getenv("DBT_PACKAGE_HUB_URL")
|
||||
@@ -23,169 +12,61 @@ else:
|
||||
DEFAULT_REGISTRY_BASE_URL = "https://hub.getdbt.com/"
|
||||
|
||||
|
||||
def _get_url(name, registry_base_url=None):
|
||||
def _get_url(url, registry_base_url=None):
|
||||
if registry_base_url is None:
|
||||
registry_base_url = DEFAULT_REGISTRY_BASE_URL
|
||||
url = "api/v1/{}.json".format(name)
|
||||
|
||||
return "{}{}".format(registry_base_url, url)
|
||||
|
||||
|
||||
def _get_with_retries(package_name, registry_base_url=None):
|
||||
get_fn = functools.partial(_get, package_name, registry_base_url)
|
||||
return connection_exception_retry(get_fn, 5)
|
||||
def _wrap_exceptions(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
max_attempts = 5
|
||||
attempt = 0
|
||||
while True:
|
||||
attempt += 1
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except requests.exceptions.ConnectionError as exc:
|
||||
if attempt < max_attempts:
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
raise RegistryException("Unable to connect to registry hub") from exc
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def _get(package_name, registry_base_url=None):
|
||||
url = _get_url(package_name, registry_base_url)
|
||||
fire_event(RegistryProgressGETRequest(url=url))
|
||||
# all exceptions from requests get caught in the retry logic so no need to wrap this here
|
||||
resp = requests.get(url, timeout=30)
|
||||
fire_event(RegistryProgressGETResponse(url=url, resp_code=resp.status_code))
|
||||
@_wrap_exceptions
|
||||
def _get(path, registry_base_url=None):
|
||||
url = _get_url(path, registry_base_url)
|
||||
logger.debug("Making package registry request: GET {}".format(url))
|
||||
resp = requests.get(url)
|
||||
logger.debug("Response from registry: GET {} {}".format(url, resp.status_code))
|
||||
resp.raise_for_status()
|
||||
|
||||
# The response should always be a dictionary. Anything else is unexpected, raise error.
|
||||
# Raising this error will cause this function to retry (if called within _get_with_retries)
|
||||
# and hopefully get a valid response. This seems to happen when there's an issue with the Hub.
|
||||
# Since we control what we expect the HUB to return, this is safe.
|
||||
# See https://github.com/dbt-labs/dbt-core/issues/4577
|
||||
# and https://github.com/dbt-labs/dbt-core/issues/4849
|
||||
response = resp.json()
|
||||
|
||||
if not isinstance(response, dict): # This will also catch Nonetype
|
||||
error_msg = (
|
||||
f"Request error: Expected a response type of <dict> but got {type(response)} instead"
|
||||
)
|
||||
fire_event(RegistryResponseUnexpectedType(response=response))
|
||||
raise requests.exceptions.ContentDecodingError(error_msg, response=resp)
|
||||
|
||||
# check for expected top level keys
|
||||
expected_keys = {"name", "versions"}
|
||||
if not expected_keys.issubset(response):
|
||||
error_msg = (
|
||||
f"Request error: Expected the response to contain keys {expected_keys} "
|
||||
f"but is missing {expected_keys.difference(set(response))}"
|
||||
)
|
||||
fire_event(RegistryResponseMissingTopKeys(response=response))
|
||||
raise requests.exceptions.ContentDecodingError(error_msg, response=resp)
|
||||
|
||||
# check for the keys we need nested under each version
|
||||
expected_version_keys = {"name", "packages", "downloads"}
|
||||
all_keys = set().union(*(response["versions"][d] for d in response["versions"]))
|
||||
if not expected_version_keys.issubset(all_keys):
|
||||
error_msg = (
|
||||
"Request error: Expected the response for the version to contain keys "
|
||||
f"{expected_version_keys} but is missing {expected_version_keys.difference(all_keys)}"
|
||||
)
|
||||
fire_event(RegistryResponseMissingNestedKeys(response=response))
|
||||
raise requests.exceptions.ContentDecodingError(error_msg, response=resp)
|
||||
|
||||
# all version responses should contain identical keys.
|
||||
has_extra_keys = set().difference(*(response["versions"][d] for d in response["versions"]))
|
||||
if has_extra_keys:
|
||||
error_msg = (
|
||||
"Request error: Keys for all versions do not match. Found extra key(s) "
|
||||
f"of {has_extra_keys}."
|
||||
)
|
||||
fire_event(RegistryResponseExtraNestedKeys(response=response))
|
||||
raise requests.exceptions.ContentDecodingError(error_msg, response=resp)
|
||||
|
||||
return response
|
||||
return resp.json()
|
||||
|
||||
|
||||
_get_cached = memoized(_get_with_retries)
|
||||
|
||||
|
||||
def package(package_name, registry_base_url=None) -> Dict[str, Any]:
|
||||
# returns a dictionary of metadata for all versions of a package
|
||||
response = _get_cached(package_name, registry_base_url)
|
||||
# Either redirectnamespace or redirectname in the JSON response indicate a redirect
|
||||
# redirectnamespace redirects based on package ownership
|
||||
# redirectname redirects based on package name
|
||||
# Both can be present at the same time, or neither. Fails gracefully to old name
|
||||
if ("redirectnamespace" in response) or ("redirectname" in response):
|
||||
|
||||
if ("redirectnamespace" in response) and response["redirectnamespace"] is not None:
|
||||
use_namespace = response["redirectnamespace"]
|
||||
else:
|
||||
use_namespace = response["namespace"]
|
||||
|
||||
if ("redirectname" in response) and response["redirectname"] is not None:
|
||||
use_name = response["redirectname"]
|
||||
else:
|
||||
use_name = response["name"]
|
||||
|
||||
new_nwo = use_namespace + "/" + use_name
|
||||
deprecations.warn("package-redirect", old_name=package_name, new_name=new_nwo)
|
||||
return response["versions"]
|
||||
|
||||
|
||||
def package_version(package_name, version, registry_base_url=None) -> Dict[str, Any]:
|
||||
# returns the metadata of a specific version of a package
|
||||
response = package(package_name, registry_base_url)
|
||||
return response[version]
|
||||
|
||||
|
||||
def is_compatible_version(package_spec, dbt_version) -> bool:
|
||||
require_dbt_version = package_spec.get("require_dbt_version")
|
||||
if not require_dbt_version:
|
||||
# if version requirements are missing or empty, assume any version is compatible
|
||||
return True
|
||||
else:
|
||||
# determine whether dbt_version satisfies this package's require-dbt-version config
|
||||
if not isinstance(require_dbt_version, list):
|
||||
require_dbt_version = [require_dbt_version]
|
||||
supported_versions = [
|
||||
semver.VersionSpecifier.from_version_string(v) for v in require_dbt_version
|
||||
]
|
||||
return semver.versions_compatible(dbt_version, *supported_versions)
|
||||
|
||||
|
||||
def get_compatible_versions(package_name, dbt_version, should_version_check) -> List["str"]:
|
||||
# returns a list of all available versions of a package
|
||||
response = package(package_name)
|
||||
|
||||
# if the user doesn't care about installing compatible versions, just return them all
|
||||
if not should_version_check:
|
||||
return list(response)
|
||||
|
||||
# otherwise, only return versions that are compatible with the installed version of dbt-core
|
||||
else:
|
||||
compatible_versions = [
|
||||
pkg_version
|
||||
for pkg_version, info in response.items()
|
||||
if is_compatible_version(info, dbt_version)
|
||||
]
|
||||
return compatible_versions
|
||||
|
||||
|
||||
def _get_index(registry_base_url=None):
|
||||
|
||||
url = _get_url("index", registry_base_url)
|
||||
fire_event(RegistryIndexProgressGETRequest(url=url))
|
||||
# all exceptions from requests get caught in the retry logic so no need to wrap this here
|
||||
resp = requests.get(url, timeout=30)
|
||||
fire_event(RegistryIndexProgressGETResponse(url=url, resp_code=resp.status_code))
|
||||
resp.raise_for_status()
|
||||
|
||||
# The response should be a list. Anything else is unexpected, raise an error.
|
||||
# Raising this error will cause this function to retry and hopefully get a valid response.
|
||||
|
||||
response = resp.json()
|
||||
|
||||
if not isinstance(response, list): # This will also catch Nonetype
|
||||
error_msg = (
|
||||
f"Request error: The response type of {type(response)} is not valid: {resp.text}"
|
||||
)
|
||||
raise requests.exceptions.ContentDecodingError(error_msg, response=resp)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def index(registry_base_url=None) -> List[str]:
|
||||
# this returns a list of all packages on the Hub
|
||||
get_index_fn = functools.partial(_get_index, registry_base_url)
|
||||
return connection_exception_retry(get_index_fn, 5)
|
||||
def index(registry_base_url=None):
|
||||
return _get("api/v1/index.json", registry_base_url)
|
||||
|
||||
|
||||
index_cached = memoized(index)
|
||||
|
||||
|
||||
def packages(registry_base_url=None):
|
||||
return _get("api/v1/packages.json", registry_base_url)
|
||||
|
||||
|
||||
def package(name, registry_base_url=None):
|
||||
return _get("api/v1/{}.json".format(name), registry_base_url)
|
||||
|
||||
|
||||
def package_version(name, version, registry_base_url=None):
|
||||
return _get("api/v1/{}/{}.json".format(name, version), registry_base_url)
|
||||
|
||||
|
||||
def get_available_versions(name):
|
||||
response = package(name)
|
||||
return list(response["versions"])
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user