Compare commits

..

1 Commits

Author SHA1 Message Date
Callum McCann
2da925aa25 Adding entity node to core (#6648)
* first draft

* finishing first commit

* adding testing project

* adding changie

* cleaning

* removing blocks

* fixing proto error message

* updates to events

* fixing issues

* adding test dimension

* updating schemas

* updating manfiest.json

* removing old versions from compatability

* updating

* fixes

* fixing more bugs caught by tests

* updating tests
2023-01-31 09:03:06 -06:00
1243 changed files with 61539 additions and 161209 deletions

View File

@@ -1,19 +1,13 @@
[bumpversion]
current_version = 1.10.0a1
parse = (?P<major>[\d]+) # major version number
\.(?P<minor>[\d]+) # minor version number
\.(?P<patch>[\d]+) # patch version number
(?P<prerelease> # optional pre-release - ex: a1, b2, rc25
(?P<prekind>a|b|rc) # pre-release type
(?P<num>[\d]+) # pre-release version number
current_version = 1.5.0a1
parse = (?P<major>\d+)
\.(?P<minor>\d+)
\.(?P<patch>\d+)
((?P<prekind>a|b|rc)
(?P<pre>\d+) # pre-release version num
)?
( # optional nightly release indicator
\.(?P<nightly>dev[0-9]+) # ex: .dev02142023
)? # expected matches: `1.15.0`, `1.5.0a11`, `1.5.0a1.dev123`, `1.5.0.dev123457`, expected failures: `1`, `1.5`, `1.5.2-a1`, `text1.5.0`
serialize =
{major}.{minor}.{patch}{prekind}{num}.{nightly}
{major}.{minor}.{patch}.{nightly}
{major}.{minor}.{patch}{prekind}{num}
{major}.{minor}.{patch}{prekind}{pre}
{major}.{minor}.{patch}
commit = False
tag = False
@@ -27,11 +21,19 @@ values =
rc
final
[bumpversion:part:num]
[bumpversion:part:pre]
first_value = 1
[bumpversion:part:nightly]
[bumpversion:file:core/setup.py]
[bumpversion:file:core/dbt/version.py]
[bumpversion:file:plugins/postgres/setup.py]
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
[bumpversion:file:docker/Dockerfile]
[bumpversion:file:tests/adapter/setup.py]
[bumpversion:file:tests/adapter/dbt/tests/adapter/__version__.py]

View File

@@ -3,9 +3,6 @@
For information on prior major and minor releases, see their changelogs:
* [1.7](https://github.com/dbt-labs/dbt-core/blob/1.7.latest/CHANGELOG.md)
* [1.6](https://github.com/dbt-labs/dbt-core/blob/1.6.latest/CHANGELOG.md)
* [1.5](https://github.com/dbt-labs/dbt-core/blob/1.5.latest/CHANGELOG.md)
* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
* [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
* [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)

View File

@@ -1,6 +1,6 @@
# dbt Core Changelog
- This file provides a full account of all changes to `dbt-core`
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)

View File

@@ -0,0 +1,6 @@
kind: Features
body: Adding the entity node
time: 2023-01-18T13:48:04.487817-06:00
custom:
Author: callum-mcdata
Issue: "6627"

View File

@@ -1,6 +0,0 @@
kind: Features
body: Add new hard_deletes="new_record" mode for snapshots.
time: 2024-11-04T12:00:53.95191-05:00
custom:
Author: peterallenwebb
Issue: "10235"

View File

@@ -1,6 +0,0 @@
kind: Features
body: Add `batch` context object to model jinja context
time: 2024-11-21T12:56:30.715473-06:00
custom:
Author: QMalcolm
Issue: "11025"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: dbt retry does not respect --threads
time: 2024-08-22T12:21:32.358066+05:30
custom:
Author: donjin-master
Issue: "10584"

View File

@@ -1,6 +0,0 @@
kind: Fixes
body: Catch DbtRuntimeError for hooks
time: 2024-11-21T18:17:39.753235Z
custom:
Author: aranke
Issue: "11012"

View File

@@ -0,0 +1,6 @@
kind: Under the Hood
body: Fix use of ConnectionReused logging event
time: 2023-01-13T13:25:13.023168-05:00
custom:
Author: gshank
Issue: "6168"

View File

@@ -0,0 +1,6 @@
kind: Under the Hood
body: Update deprecated github action command
time: 2023-01-17T11:17:37.046095-06:00
custom:
Author: davidbloss
Issue: "6153"

View File

@@ -4,7 +4,6 @@ headerPath: header.tpl.md
versionHeaderPath: ""
changelogPath: CHANGELOG.md
versionExt: md
envPrefix: "CHANGIE_"
versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}'
kindFormat: '### {{.Kind}}'
changeFormat: |-
@@ -31,7 +30,43 @@ kinds:
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
- label: Under the Hood
- label: Dependencies
changeFormat: |-
{{- $PRList := list }}
{{- $changes := splitList " " $.Custom.PR }}
{{- range $pullrequest := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
{{- $PRList = append $PRList $changeLink }}
{{- end -}}
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
skipGlobalChoices: true
additionalChoices:
- key: Author
label: GitHub Username(s) (separated by a single space if multiple)
type: string
minLength: 3
- key: PR
label: GitHub Pull Request Number (separated by a single space if multiple)
type: string
minLength: 1
- label: Security
changeFormat: |-
{{- $PRList := list }}
{{- $changes := splitList " " $.Custom.PR }}
{{- range $pullrequest := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
{{- $PRList = append $PRList $changeLink }}
{{- end -}}
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
skipGlobalChoices: true
additionalChoices:
- key: Author
label: GitHub Username(s) (separated by a single space if multiple)
type: string
minLength: 3
- key: PR
label: GitHub Pull Request Number (separated by a single space if multiple)
type: string
minLength: 1
newlines:
afterChangelogHeader: 1
@@ -52,36 +87,32 @@ custom:
footerFormat: |
{{- $contributorDict := dict }}
{{- /* ensure all names in this list are all lowercase for later matching purposes */}}
{{- $core_team := splitList " " .Env.CORE_TEAM }}
{{- /* ensure we always skip snyk and dependabot in addition to the core team */}}
{{- $maintainers := list "dependabot[bot]" "snyk-bot"}}
{{- range $team_member := $core_team }}
{{- $team_member_lower := lower $team_member }}
{{- $maintainers = append $maintainers $team_member_lower }}
{{- end }}
{{- /* any names added to this list should be all lowercase for later matching purposes */}}
{{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }}
{{- range $change := .Changes }}
{{- $authorList := splitList " " $change.Custom.Author }}
{{- /* loop through all authors for a single changelog */}}
{{- range $author := $authorList }}
{{- $authorLower := lower $author }}
{{- /* we only want to include non-core team contributors */}}
{{- if not (has $authorLower $maintainers)}}
{{- if not (has $authorLower $core_team)}}
{{- $changeList := splitList " " $change.Custom.Author }}
{{- $IssueList := list }}
{{- /* Docs kind link back to dbt-docs instead of dbt-core issues */}}
{{- $changeLink := $change.Kind }}
{{- $changes := splitList " " $change.Custom.Issue }}
{{- range $issueNbr := $changes }}
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
{{- $IssueList = append $IssueList $changeLink }}
{{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
{{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $change.Custom.PR }}
{{- else if eq $change.Kind "Docs"}}
{{- $changeLink = "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $change.Custom.Issue }}
{{- else }}
{{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $change.Custom.Issue }}
{{- end }}
{{- /* check if this contributor has other changes associated with them already */}}
{{- if hasKey $contributorDict $author }}
{{- $contributionList := get $contributorDict $author }}
{{- $contributionList = concat $contributionList $IssueList }}
{{- $contributionList = append $contributionList $changeLink }}
{{- $contributorDict := set $contributorDict $author $contributionList }}
{{- else }}
{{- $contributionList := $IssueList }}
{{- $contributionList := list $changeLink }}
{{- $contributorDict := set $contributorDict $author $contributionList }}
{{- end }}
{{- end}}

View File

@@ -7,9 +7,6 @@ ignore =
W503 # makes Flake8 work like black
W504
E203 # makes Flake8 work like black
E704 # makes Flake8 work like black
E741
E501 # long line checking is done in black
exclude = test/
per-file-ignores =
*/__init__.py: F401

6
.gitattributes vendored
View File

@@ -1,6 +1,2 @@
core/dbt/task/docs/index.html binary
core/dbt/include/index.html binary
tests/functional/artifacts/data/state/*/manifest.json binary
core/dbt/docs/build/html/searchindex.js binary
core/dbt/docs/build/html/index.html binary
performance/runner/Cargo.lock binary
core/dbt/events/types_pb2.py binary

63
.github/CODEOWNERS vendored
View File

@@ -11,8 +11,65 @@
# As a default for areas with no assignment,
# the core team as a whole will be assigned
* @dbt-labs/core-team
* @dbt-labs/core
### ARTIFACTS
# Changes to GitHub configurations including Actions
/.github/ @leahwicz
/schemas/dbt @dbt-labs/cloud-artifacts
### LANGUAGE
# Language core modules
/core/dbt/config/ @dbt-labs/core-language
/core/dbt/context/ @dbt-labs/core-language
/core/dbt/contracts/ @dbt-labs/core-language
/core/dbt/deps/ @dbt-labs/core-language
/core/dbt/events/ @dbt-labs/core-language # structured logging
/core/dbt/parser/ @dbt-labs/core-language
# Language misc files
/core/dbt/dataclass_schema.py @dbt-labs/core-language
/core/dbt/hooks.py @dbt-labs/core-language
/core/dbt/node_types.py @dbt-labs/core-language
/core/dbt/semver.py @dbt-labs/core-language
### EXECUTION
# Execution core modules
/core/dbt/graph/ @dbt-labs/core-execution
/core/dbt/task/ @dbt-labs/core-execution
# Execution misc files
/core/dbt/compilation.py @dbt-labs/core-execution
/core/dbt/flags.py @dbt-labs/core-execution
/core/dbt/lib.py @dbt-labs/core-execution
/core/dbt/main.py @dbt-labs/core-execution
/core/dbt/profiler.py @dbt-labs/core-execution
/core/dbt/selected_resources.py @dbt-labs/core-execution
/core/dbt/tracking.py @dbt-labs/core-execution
/core/dbt/version.py @dbt-labs/core-execution
### ADAPTERS
# Adapter interface ("base" + "sql" adapter defaults, cache)
/core/dbt/adapters @dbt-labs/core-adapters
# Global project (default macros + materializations), starter project
/core/dbt/include @dbt-labs/core-adapters
# Postgres plugin
/plugins/ @dbt-labs/core-adapters
# Functional tests for adapter plugins
/tests/adapter @dbt-labs/core-adapters
### TESTS
# Overlapping ownership for vast majority of unit + functional tests
# Perf regression testing framework
# This excludes the test project files itself since those aren't specific
# framework changes (excluded by not setting an owner next to it- no owner)
/performance @nathaniel-may
/performance/projects

View File

@@ -1,18 +0,0 @@
name: 📄 Code docs
description: Report an issue for markdown files within this repo, such as README, ARCHITECTURE, etc.
title: "[Code docs] <title>"
labels: ["triage"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this code docs issue!
- type: textarea
attributes:
label: Please describe the issue and your proposals.
description: |
Links? References? Anything that will give us more context about the issue you are encountering!
Tip: You can attach images by clicking this area to highlight it and then dragging files in.
validations:
required: false

View File

@@ -1,8 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: Documentation
url: https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose
about: Problems and issues with dbt product documentation hosted on docs.getdbt.com. Issues for markdown files within this repo, such as README, should be opened using the "Code docs" template.
- name: Ask the community for help
url: https://github.com/dbt-labs/docs.getdbt.com/discussions
about: Need help troubleshooting? Check out our guide on how to ask

View File

@@ -1,67 +0,0 @@
name: 🛠️ Implementation
description: This is an implementation ticket intended for use by the maintainers of dbt-core
title: "[<project>] <title>"
labels: ["user docs"]
body:
- type: markdown
attributes:
value: This is an implementation ticket intended for use by the maintainers of dbt-core
- type: checkboxes
attributes:
label: Housekeeping
description: >
A couple friendly reminders:
1. Remove the `user docs` label if the scope of this work does not require changes to https://docs.getdbt.com/docs: no end-user interface (e.g. yml spec, CLI, error messages, etc) or functional changes
2. Link any blocking issues in the "Blocked on" field under the "Core devs & maintainers" project.
options:
- label: I am a maintainer of dbt-core
required: true
- type: textarea
attributes:
label: Short description
description: |
Describe the scope of the ticket, a high-level implementation approach and any tradeoffs to consider
validations:
required: true
- type: textarea
attributes:
label: Acceptance criteria
description: |
What is the definition of done for this ticket? Include any relevant edge cases and/or test cases
validations:
required: true
- type: textarea
attributes:
label: Suggested Tests
description: |
Provide scenarios to test. Link to existing similar tests if appropriate.
placeholder: |
1. Test with no version specified in the schema file and use selection logic on a versioned model for a specific version. Expect pass.
2. Test with a version specified in the schema file that is no valid. Expect ParsingError.
validations:
required: true
- type: textarea
attributes:
label: Impact to Other Teams
description: |
Will this change impact other teams? Include details of the kinds of changes required (new tests, code changes, related tickets) and _add the relevant `Impact:[team]` label_.
placeholder: |
Example: This change impacts `dbt-redshift` because the tests will need to be modified. The `Impact:[Adapter]` label has been added.
validations:
required: true
- type: textarea
attributes:
label: Will backports be required?
description: |
Will this change need to be backported to previous versions? Add details, possible blockers to backporting and _add the relevant backport labels `backport 1.x.latest`_
placeholder: |
Example: Backport to 1.6.latest, 1.5.latest and 1.4.latest. Since 1.4 isn't using click, the backport may be complicated. The `backport 1.6.latest`, `backport 1.5.latest` and `backport 1.4.latest` labels have been added.
validations:
required: true
- type: textarea
attributes:
label: Context
description: |
Provide the "why", motivation, and alternative approaches considered -- linking to previous refinement issues, spikes and documentation as appropriate
validations:
required: false

5
.github/_README.md vendored
View File

@@ -47,8 +47,7 @@ ___
### How to re-run jobs
- From the UI you can rerun from failure
- You can retrigger the cla check by commenting on the PR with `@cla-bot check`
- Some actions cannot be rerun in the GitHub UI. Namely the snyk checks and the cla check. Snyk checks are rerun by closing and reopening the PR. You can retrigger the cla check by commenting on the PR with `@cla-bot check`
___
@@ -198,7 +197,7 @@ ___
```yaml
- name: Configure AWS credentials from Test account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@@ -35,7 +35,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v1
- name: Wrangle latest tag
id: is_latest
uses: ./.github/actions/latest-wrangler

View File

@@ -1,21 +1,20 @@
name: "GitHub package `latest` tag wrangler for containers"
description: "Determines if the published image should include `latest` tags"
name: "Github package 'latest' tag wrangler for containers"
description: "Determines wether or not a given dbt container should be given a bare 'latest' tag (I.E. dbt-core:latest)"
inputs:
package_name:
description: "Package being published (i.e. `dbt-core`, `dbt-redshift`, etc.)"
description: "Package to check (I.E. dbt-core, dbt-redshift, etc)"
required: true
new_version:
description: "SemVer of the package being published (i.e. 1.7.2, 1.8.0a1, etc.)"
description: "Semver of the container being built (I.E. 1.0.4)"
required: true
github_token:
description: "Auth token for GitHub (must have view packages scope)"
gh_token:
description: "Auth token for github (must have view packages scope)"
required: true
outputs:
tags:
description: "A list of tags to associate with this version"
latest:
description: "Wether or not built container should be tagged latest (bool)"
minor_latest:
description: "Wether or not built container should be tagged minor.latest (bool)"
runs:
using: "docker"
image: "Dockerfile"

View File

@@ -13,7 +13,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v1
- name: Wrangle latest tag
id: is_latest
uses: ./.github/actions/latest-wrangler

View File

@@ -1,71 +1,98 @@
import os
from packaging.version import Version, parse
import requests
import sys
from typing import List
def main():
package_name: str = os.environ["INPUT_PACKAGE_NAME"]
new_version: Version = parse(os.environ["INPUT_NEW_VERSION"])
github_token: str = os.environ["INPUT_GITHUB_TOKEN"]
response = _package_metadata(package_name, github_token)
published_versions = _published_versions(response)
new_version_tags = _new_version_tags(new_version, published_versions)
_register_tags(new_version_tags, package_name)
def _package_metadata(package_name: str, github_token: str) -> requests.Response:
url = f"https://api.github.com/orgs/dbt-labs/packages/container/{package_name}/versions"
return requests.get(url, auth=("", github_token))
def _published_versions(response: requests.Response) -> List[Version]:
package_metadata = response.json()
return [
parse(tag)
for version in package_metadata
for tag in version["metadata"]["container"]["tags"]
if "latest" not in tag
]
def _new_version_tags(new_version: Version, published_versions: List[Version]) -> List[str]:
# the package version is always a tag
tags = [str(new_version)]
# pre-releases don't get tagged with `latest`
if new_version.is_prerelease:
return tags
if new_version > max(published_versions):
tags.append("latest")
published_patches = [
version
for version in published_versions
if version.major == new_version.major and version.minor == new_version.minor
]
if new_version > max(published_patches):
tags.append(f"{new_version.major}.{new_version.minor}.latest")
return tags
def _register_tags(tags: List[str], package_name: str) -> None:
fully_qualified_tags = ",".join([f"ghcr.io/dbt-labs/{package_name}:{tag}" for tag in tags])
github_output = os.environ.get("GITHUB_OUTPUT")
with open(github_output, "at", encoding="utf-8") as gh_output:
gh_output.write(f"fully_qualified_tags={fully_qualified_tags}")
def _validate_response(response: requests.Response) -> None:
message = response["message"]
if response.status_code != 200:
print(f"Call to GitHub API failed: {response.status_code} - {message}")
sys.exit(1)
import requests
from distutils.util import strtobool
from typing import Union
from packaging.version import parse, Version
if __name__ == "__main__":
main()
# get inputs
package = os.environ["INPUT_PACKAGE"]
new_version = parse(os.environ["INPUT_NEW_VERSION"])
gh_token = os.environ["INPUT_GH_TOKEN"]
halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
# get package metadata from github
package_request = requests.get(
f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
auth=("", gh_token),
)
package_meta = package_request.json()
# Log info if we don't get a 200
if package_request.status_code != 200:
print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
# Make an early exit if there is no matching package in github
if package_request.status_code == 404:
if halt_on_missing:
sys.exit(1)
# everything is the latest if the package doesn't exist
github_output = os.environ.get("GITHUB_OUTPUT")
with open(github_output, "at", encoding="utf-8") as gh_output:
gh_output.write("latest=True")
gh_output.write("minor_latest=True")
sys.exit(0)
# TODO: verify package meta is "correct"
# https://github.com/dbt-labs/dbt-core/issues/4640
# map versions and tags
version_tag_map = {
version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
}
# is pre-release
pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
# semver of current latest
for version, tags in version_tag_map.items():
if "latest" in tags:
# N.B. This seems counterintuitive, but we expect any version tagged
# 'latest' to have exactly three associated tags:
# latest, major.minor.latest, and major.minor.patch.
# Subtracting everything that contains the string 'latest' gets us
# the major.minor.patch which is what's needed for comparison.
current_latest = parse([tag for tag in tags if "latest" not in tag][0])
else:
current_latest = False
# semver of current_minor_latest
for version, tags in version_tag_map.items():
if f"{new_version.major}.{new_version.minor}.latest" in tags:
# Similar to above, only now we expect exactly two tags:
# major.minor.patch and major.minor.latest
current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
else:
current_minor_latest = False
def is_latest(
pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
) -> bool:
"""Determine if a given contaier should be tagged 'latest' based on:
- it's pre-release status
- it's version
- the version of a previously identified container tagged 'latest'
:param pre_rel: Wether or not the version of the new container is a pre-release
:param new_version: The version of the new container
:param remote_latest: The version of the previously identified container that's
already tagged latest or False
"""
# is a pre-release = not latest
if pre_rel:
return False
# + no latest tag found = is latest
if not remote_latest:
return True
# + if remote version is lower than current = is latest, else not latest
return True if remote_latest <= new_version else False
latest = is_latest(pre_rel, new_version, current_latest)
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
github_output = os.environ.get("GITHUB_OUTPUT")
with open(github_output, "at", encoding="utf-8") as gh_output:
gh_output.write(f"latest={latest}")
gh_output.write(f"minor_latest={minor_latest}")

View File

@@ -5,15 +5,6 @@ runs:
steps:
- shell: bash
run: |
sudo apt-get --purge remove postgresql postgresql-*
sudo apt update -y
sudo apt install gnupg2 wget vim -y
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc|sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg
sudo apt update -y
sudo apt install postgresql-16
sudo apt-get -y install postgresql postgresql-contrib
sudo systemctl start postgresql
sudo systemctl enable postgresql
sudo systemctl start postgresql.service
pg_isready
sudo -u postgres bash ${{ github.action_path }}/setup_db.sh

View File

@@ -5,9 +5,7 @@ runs:
steps:
- shell: bash
run: |
brew install postgresql@16
brew link postgresql@16 --force
brew services start postgresql@16
brew services start postgresql
echo "Check PostgreSQL service is running"
i=10
COMMAND='pg_isready'

View File

@@ -5,22 +5,8 @@ runs:
steps:
- shell: pwsh
run: |
Write-Host -Object "Installing PostgreSQL 16 as windows service..."
$installerArgs = @("--install_runtimes 0", "--superpassword root", "--enable_acledit 1", "--unattendedmodeui none", "--mode unattended")
$filePath = Invoke-DownloadWithRetry -Url "https://get.enterprisedb.com/postgresql/postgresql-16.1-1-windows-x64.exe" -Path "$env:PGROOT/postgresql-16.1-1-windows-x64.exe"
Start-Process -FilePath $filePath -ArgumentList $installerArgs -Wait -PassThru
Write-Host -Object "Validating PostgreSQL 16 Install..."
Get-Service -Name postgresql*
$pgReady = Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru
$exitCode = $pgReady.ExitCode
if ($exitCode -ne 0) {
Write-Host -Object "PostgreSQL is not ready. Exitcode: $exitCode"
exit $exitCode
}
Write-Host -Object "Starting PostgreSQL 16 Service..."
$pgService = Get-Service -Name postgresql-x64-16
$pgService = Get-Service -Name postgresql*
Set-Service -InputObject $pgService -Status running -StartupType automatic
Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru
$env:Path += ";$env:PGBIN"
bash ${{ github.action_path }}/setup_db.sh

View File

@@ -11,6 +11,11 @@ updates:
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/postgres"
schedule:
interval: "daily"
rebase-strategy: "disabled"
# docker dependencies
- package-ecosystem: "docker"
@@ -23,10 +28,3 @@ updates:
schedule:
interval: "weekly"
rebase-strategy: "disabled"
# github dependencies
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
rebase-strategy: "disabled"

View File

@@ -1,33 +1,23 @@
Resolves #
resolves #
<!---
Include the number of the issue addressed by this PR above, if applicable.
Include the number of the issue addressed by this PR above if applicable.
PRs for code changes without an associated issue *will not be merged*.
See CONTRIBUTING.md for more information.
Add the `user docs` label to this PR if it will need docs changes. An
issue will get opened in docs.getdbt.com upon successful merge of this PR.
-->
### Problem
### Description
<!---
Describe the problem this PR is solving. What is the application state
before this PR is merged?
-->
### Solution
<!---
Describe the way this PR solves the above problem. Add as much detail as you
can to help reviewers understand your changes. Include any alternatives and
tradeoffs you considered.
Describe the Pull Request here. Add any references and info to help reviewers
understand your changes. Include any tradeoffs you considered.
-->
### Checklist
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me.
- [ ] I have run this code in development, and it appears to resolve the stated issue.
- [ ] This PR includes tests, or tests are not required or relevant for this PR.
- [ ] This PR has no interface changes (e.g., macros, CLI, logs, JSON artifacts, config files, adapter interface, etc.) or this PR has already received feedback and approval from Product or DX.
- [ ] This PR includes [type annotations](https://docs.python.org/3/library/typing.html) for new and modified functions.
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR
- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)

View File

@@ -35,6 +35,6 @@ jobs:
github.event.pull_request.merged
&& contains(github.event.label.name, 'backport')
steps:
- uses: tibdex/backport@v2.0.4
- uses: tibdex/backport@v2.0.2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -41,6 +41,8 @@ jobs:
include:
- label: "dependencies"
changie_kind: "Dependencies"
- label: "snyk"
changie_kind: "Security"
runs-on: ubuntu-latest
steps:
@@ -48,7 +50,7 @@ jobs:
- name: Create and commit changelog on bot PR
if: ${{ contains(github.event.pull_request.labels.*.name, matrix.label) }}
id: bot_changelog
uses: emmyoop/changie_bot@v1.1.0
uses: emmyoop/changie_bot@v1.0.1
with:
GITHUB_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
commit_author_name: "Github Build Bot"
@@ -56,4 +58,4 @@ jobs:
commit_message: "Add automated changelog yaml from template for bot PR"
changie_kind: ${{ matrix.changie_kind }}
label: ${{ matrix.label }}
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n Issue: ${{ github.event.pull_request.number }}"
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}"

View File

@@ -2,8 +2,10 @@
# Checks that a file has been committed under the /.changes directory
# as a new CHANGELOG entry. Cannot check for a specific filename as
# it is dynamically generated by change type and timestamp.
# This workflow runs on pull_request_target because it requires
# secrets to post comments.
# This workflow should not require any secrets since it runs for PRs
# from forked repos.
# By default, secrets are not passed to workflows running from
# a forked repo.
# **why?**
# Ensure code change gets reflected in the CHANGELOG.
@@ -17,10 +19,8 @@
name: Check Changelog Entry
on:
pull_request_target:
pull_request:
types: [opened, reopened, labeled, unlabeled, synchronize]
paths-ignore: ['.changes/**', '.github/**', 'tests/**', '**.md', '**.yml']
workflow_dispatch:
defaults:

View File

@@ -1,41 +0,0 @@
name: Check Artifact Changes
on:
pull_request:
types: [ opened, reopened, labeled, unlabeled, synchronize ]
paths-ignore: [ '.changes/**', '.github/**', 'tests/**', '**.md', '**.yml' ]
workflow_dispatch:
jobs:
check-artifact-changes:
runs-on: ubuntu-latest
if: ${{ !contains(github.event.pull_request.labels.*.name, 'artifact_minor_upgrade') }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check for changes in core/dbt/artifacts
# https://github.com/marketplace/actions/paths-changes-filter
uses: dorny/paths-filter@v3
id: check_artifact_changes
with:
filters: |
artifacts_changed:
- 'core/dbt/artifacts/**'
list-files: shell
- name: Fail CI if artifacts have changed
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
run: |
echo "CI failure: Artifact changes checked in core/dbt/artifacts directory."
echo "Files changed: ${{ steps.check_artifact_changes.outputs.artifacts_changed_files }}"
echo "To bypass this check, confirm that the change is not breaking (https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/artifacts/README.md#breaking-changes) and add the 'artifact_minor_upgrade' label to the PR. Modifications and additions to all fields require updates to https://github.com/dbt-labs/dbt-jsonschema."
exit 1
- name: CI check passed
if: steps.check_artifact_changes.outputs.artifacts_changed == 'false'
run: |
echo "No prohibited artifact changes found in core/dbt/artifacts. CI check passed."

View File

@@ -1,39 +0,0 @@
# **what?**
# Label a PR with a `community` label when a PR is opened by a user outside core/adapters
# **why?**
# To streamline triage and ensure that community contributions are recognized and prioritized
# **when?**
# When a PR is opened, not in draft or moved from draft to ready for review
name: Label community PRs
on:
# have to use pull_request_target since community PRs come from forks
pull_request_target:
types: [opened, ready_for_review]
defaults:
run:
shell: bash
permissions:
pull-requests: write # labels PRs
contents: read # reads team membership
jobs:
open_issues:
# If this PR already has the community label, no need to relabel it
# If this PR is opened and not draft, determine if it needs to be labeled
# if the PR is converted out of draft, determine if it needs to be labeled
if: |
(!contains(github.event.pull_request.labels.*.name, 'community') &&
(github.event.action == 'opened' && github.event.pull_request.draft == false ) ||
github.event.action == 'ready_for_review' )
uses: dbt-labs/actions/.github/workflows/label-community.yml@main
with:
github_team: 'core-group'
label: 'community'
secrets: inherit

View File

@@ -1,41 +0,0 @@
# **what?**
# Cuts a new `*.latest` branch
# Also cleans up all files in `.changes/unreleased` and `.changes/previous verion on
# `main` and bumps `main` to the input version.
# **why?**
# Generally reduces the workload of engineers and reduces error. Allow automation.
# **when?**
# This will run when called manually.
name: Cut new release branch
on:
workflow_dispatch:
inputs:
version_to_bump_main:
description: 'The alpha version main should bump to (ex. 1.6.0a1)'
required: true
new_branch_name:
description: 'The full name of the new branch (ex. 1.5.latest)'
required: true
defaults:
run:
shell: bash
permissions:
contents: write
jobs:
cut_branch:
name: "Cut branch and clean up main for dbt-core"
uses: dbt-labs/actions/.github/workflows/cut-release-branch.yml@main
with:
version_to_bump_main: ${{ inputs.version_to_bump_main }}
new_branch_name: ${{ inputs.new_branch_name }}
PR_title: "Cleanup main after cutting new ${{ inputs.new_branch_name }} branch"
PR_body: "All adapter PRs will fail CI until the dbt-core PR has been merged due to release version conflicts."
secrets:
FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}

View File

@@ -1,41 +0,0 @@
# **what?**
# Open an issue in docs.getdbt.com when an issue is labeled `user docs` and closed as completed
# **why?**
# To reduce barriers for keeping docs up to date
# **when?**
# When an issue is labeled `user docs` and is closed as completed. Can be labeled before or after the issue is closed.
name: Open issues in docs.getdbt.com repo when an issue is labeled
run-name: "Open an issue in docs.getdbt.com for issue #${{ github.event.issue.number }}"
on:
issues:
types: [labeled, closed]
defaults:
run:
shell: bash
permissions:
issues: write # comments on issues
jobs:
open_issues:
# we only want to run this when the issue is closed as completed and the label `user docs` has been assigned.
# If this logic does not exist in this workflow, it runs the
# risk of duplicaton of issues being created due to merge and label both triggering this workflow to run and neither having
# generating the comment before the other runs. This lives here instead of the shared workflow because this is where we
# decide if it should run or not.
if: |
(github.event.issue.state == 'closed' &&
github.event.issue.state_reason == 'completed' &&
contains( github.event.issue.labels.*.name, 'user docs'))
uses: dbt-labs/actions/.github/workflows/open-issue-in-repo.yml@main
with:
issue_repository: "dbt-labs/docs.getdbt.com"
issue_title: "[Core] Docs Changes Needed from ${{ github.event.repository.name }} Issue #${{ github.event.issue.number }}"
issue_body: "At a minimum, update body to include a link to the page on docs.getdbt.com requiring updates and what part(s) of the page you would like to see updated.\n Originating from this issue: https://github.com/dbt-labs/dbt-core/issues/${{ github.event.issue.number }}"
secrets: inherit

View File

@@ -0,0 +1,165 @@
# **what?**
# On push, if anything in core/dbt/docs or core/dbt/cli has been
# created or modified, regenerate the CLI API docs using sphinx.
# **why?**
# We watch for changes in core/dbt/cli because the CLI API docs rely on click
# and all supporting flags/params to be generated. We watch for changes in
# core/dbt/docs since any changes to sphinx configuration or any of the
# .rst files there could result in a differently build final index.html file.
# **when?**
# Whenever a change has been pushed to a branch, and only if there is a diff
# between the PR branch and main's core/dbt/cli and or core/dbt/docs dirs.
# TODO: add bot comment to PR informing contributor that the docs have been committed
# TODO: figure out why github action triggered pushes cause github to fail to report
# the status of jobs
name: Generate CLI API docs
on:
pull_request:
permissions:
contents: write
pull-requests: write
env:
CLI_DIR: ${{ github.workspace }}/core/dbt/cli
DOCS_DIR: ${{ github.workspace }}/core/dbt/docs
DOCS_BUILD_DIR: ${{ github.workspace }}/core/dbt/docs/build
jobs:
check_gen:
name: check if generation needed
runs-on: ubuntu-latest
if: ${{ github.event.pull_request.head.repo.fork == false }}
outputs:
cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }}
docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }}
steps:
- name: "[DEBUG] print variables"
run: |
echo "env.CLI_DIR: ${{ env.CLI_DIR }}"
echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}"
echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}"
- name: git checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
ref: ${{ github.head_ref }}
- name: set shas
id: set_shas
run: |
THIS_SHA=$(git rev-parse @)
LAST_SHA=$(git rev-parse @~1)
echo "this sha: $THIS_SHA"
echo "last sha: $LAST_SHA"
echo "this_sha=$THIS_SHA" >> $GITHUB_OUTPUT
echo "last_sha=$LAST_SHA" >> $GITHUB_OUTPUT
- name: check for changes in core/dbt/cli
id: check_cli
run: |
CLI_DIR_CHANGES=$(git diff \
${{ steps.set_shas.outputs.last_sha }} \
${{ steps.set_shas.outputs.this_sha }} \
-- ${{ env.CLI_DIR }})
if [ -n "$CLI_DIR_CHANGES" ]; then
echo "changes found"
echo $CLI_DIR_CHANGES
echo "cli_dir_changed=true" >> $GITHUB_OUTPUT
exit 0
fi
echo "cli_dir_changed=false" >> $GITHUB_OUTPUT
echo "no changes found"
- name: check for changes in core/dbt/docs
id: check_docs
if: steps.check_cli.outputs.cli_dir_changed == 'false'
run: |
DOCS_DIR_CHANGES=$(git diff --name-only \
${{ steps.set_shas.outputs.last_sha }} \
${{ steps.set_shas.outputs.this_sha }} \
-- ${{ env.DOCS_DIR }} ':!${{ env.DOCS_BUILD_DIR }}')
DOCS_BUILD_DIR_CHANGES=$(git diff --name-only \
${{ steps.set_shas.outputs.last_sha }} \
${{ steps.set_shas.outputs.this_sha }} \
-- ${{ env.DOCS_BUILD_DIR }})
if [ -n "$DOCS_DIR_CHANGES" ] && [ -z "$DOCS_BUILD_DIR_CHANGES" ]; then
echo "changes found"
echo $DOCS_DIR_CHANGES
echo "docs_dir_changed=true" >> $GITHUB_OUTPUT
exit 0
fi
echo "docs_dir_changed=false" >> $GITHUB_OUTPUT
echo "no changes found"
gen_docs:
name: generate docs
runs-on: ubuntu-latest
needs: [check_gen]
if: |
needs.check_gen.outputs.cli_dir_changed == 'true'
|| needs.check_gen.outputs.docs_dir_changed == 'true'
steps:
- name: "[DEBUG] print variables"
run: |
echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}"
echo "github head_ref: ${{ github.head_ref }}"
- name: git checkout
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: install python
uses: actions/setup-python@v4.3.0
with:
python-version: 3.8
- name: install dev requirements
run: |
python3 -m venv env
source env/bin/activate
python -m pip install --upgrade pip
pip install -r requirements.txt -r dev-requirements.txt
- name: generate docs
run: |
source env/bin/activate
cd ${{ env.DOCS_DIR }}
echo "cleaning existing docs"
make clean
echo "creating docs"
make html
- name: debug
run: |
echo ">>>>> status"
git status
echo ">>>>> remotes"
git remote -v
echo ">>>>> branch"
git branch -v
echo ">>>>> log"
git log --pretty=oneline | head -5
- name: commit docs
run: |
git config user.name 'Github Build Bot'
git config user.email 'buildbot@fishtownanalytics.com'
git commit -am "Add generated CLI API docs"
git push -u origin ${{ github.head_ref }}

26
.github/workflows/jira-creation.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
# **what?**
# Mirrors issues into Jira. Includes the information: title,
# GitHub Issue ID and URL
# **why?**
# Jira is our tool for tracking and we need to see these issues in there
# **when?**
# On issue creation or when an issue is labeled `Jira`
name: Jira Issue Creation
on:
issues:
types: [opened, labeled]
permissions:
issues: write
jobs:
call-label-action:
uses: dbt-labs/jira-actions/.github/workflows/jira-creation.yml@main
secrets:
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}

26
.github/workflows/jira-label.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
# **what?**
# Calls mirroring Jira label Action. Includes adding a new label
# to an existing issue or removing a label as well
# **why?**
# Jira is our tool for tracking and we need to see these labels in there
# **when?**
# On labels being added or removed from issues
name: Jira Label Mirroring
on:
issues:
types: [labeled, unlabeled]
permissions:
issues: read
jobs:
call-label-action:
uses: dbt-labs/jira-actions/.github/workflows/jira-label.yml@main
secrets:
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}

27
.github/workflows/jira-transition.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
# **what?**
# Transition a Jira issue to a new state
# Only supports these GitHub Issue transitions:
# closed, deleted, reopened
# **why?**
# Jira needs to be kept up-to-date
# **when?**
# On issue closing, deletion, reopened
name: Jira Issue Transition
on:
issues:
types: [closed, deleted, reopened]
# no special access is needed
permissions: read-all
jobs:
call-label-action:
uses: dbt-labs/jira-actions/.github/workflows/jira-transition.yml@main
secrets:
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}

View File

@@ -33,11 +33,6 @@ defaults:
run:
shell: bash
# top-level adjustments can be made here
env:
# number of parallel processes to spawn for python integration testing
PYTHON_INTEGRATION_TEST_WORKERS: 5
jobs:
code-quality:
name: code-quality
@@ -47,20 +42,23 @@ jobs:
steps:
- name: Check out the repository
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4.3.0
with:
python-version: '3.9'
python-version: '3.8'
- name: Install python dependencies
run: |
python -m pip install --user --upgrade pip
python -m pip --version
make dev
make dev_req
python -m pip install pre-commit
pre-commit --version
python -m pip install mypy==0.942
mypy --version
python -m pip install -r requirements.txt
python -m pip install -r dev-requirements.txt
dbt --version
- name: Run pre-commit hooks
@@ -75,17 +73,18 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: [ "3.9", "3.10", "3.11", "3.12" ]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
env:
TOXENV: "unit"
PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv"
steps:
- name: Check out the repository
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v4.3.0
with:
python-version: ${{ matrix.python-version }}
@@ -96,12 +95,8 @@ jobs:
python -m pip install tox
tox --version
- name: Run unit tests
uses: nick-fields/retry@v3
with:
timeout_minutes: 10
max_attempts: 3
command: tox -e unit
- name: Run tox
run: tox
- name: Get current date
if: always()
@@ -110,80 +105,43 @@ jobs:
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
- name: Upload Unit Test Coverage to Codecov
if: ${{ matrix.python-version == '3.11' }}
uses: codecov/codecov-action@v4
- uses: actions/upload-artifact@v2
if: always()
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: unit
integration-metadata:
name: integration test metadata generation
runs-on: ubuntu-latest
outputs:
split-groups: ${{ steps.generate-split-groups.outputs.split-groups }}
include: ${{ steps.generate-include.outputs.include }}
steps:
- name: generate split-groups
id: generate-split-groups
run: |
MATRIX_JSON="["
for B in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
MATRIX_JSON+=$(sed 's/^/"/;s/$/"/' <<< "${B}")
done
MATRIX_JSON="${MATRIX_JSON//\"\"/\", \"}"
MATRIX_JSON+="]"
echo "split-groups=${MATRIX_JSON}"
echo "split-groups=${MATRIX_JSON}" >> $GITHUB_OUTPUT
- name: generate include
id: generate-include
run: |
INCLUDE=('"python-version":"3.9","os":"windows-latest"' '"python-version":"3.9","os":"macos-14"' )
INCLUDE_GROUPS="["
for include in ${INCLUDE[@]}; do
for group in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
INCLUDE_GROUPS+=$(sed 's/$/, /' <<< "{\"split-group\":\"${group}\",${include}}")
done
done
INCLUDE_GROUPS=$(echo $INCLUDE_GROUPS | sed 's/,*$//g')
INCLUDE_GROUPS+="]"
echo "include=${INCLUDE_GROUPS}"
echo "include=${INCLUDE_GROUPS}" >> $GITHUB_OUTPUT
name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv
path: unit_results.csv
integration:
name: (${{ matrix.split-group }}) integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
name: integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
runs-on: ${{ matrix.os }}
timeout-minutes: 30
needs:
- integration-metadata
timeout-minutes: 45
strategy:
fail-fast: false
matrix:
python-version: [ "3.9", "3.10", "3.11", "3.12" ]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
os: [ubuntu-20.04]
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
include: ${{ fromJson(needs.integration-metadata.outputs.include) }}
include:
- python-version: 3.8
os: windows-latest
- python-version: 3.8
os: macos-latest
env:
TOXENV: integration
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
DBT_INVOCATION_ENV: github-actions
DBT_TEST_USER_1: dbt_test_user_1
DBT_TEST_USER_2: dbt_test_user_2
DBT_TEST_USER_3: dbt_test_user_3
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
DD_SITE: datadoghq.com
DD_ENV: ci
DD_SERVICE: ${{ github.event.repository.name }}
steps:
- name: Check out the repository
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v4.3.0
with:
python-version: ${{ matrix.python-version }}
@@ -206,14 +164,8 @@ jobs:
python -m pip install tox
tox --version
- name: Run integration tests
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
command: tox -- --ddtrace
env:
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
- name: Run tests
run: tox
- name: Get current date
if: always()
@@ -222,35 +174,17 @@ jobs:
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v2
if: always()
with:
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.split-group }}_${{ steps.date.outputs.date }}
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}
path: ./logs
- name: Upload Integration Test Coverage to Codecov
if: ${{ matrix.python-version == '3.11' }}
uses: codecov/codecov-action@v4
- uses: actions/upload-artifact@v2
if: always()
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: integration
integration-report:
if: ${{ always() }}
name: Integration Test Suite
runs-on: ubuntu-latest
needs: integration
steps:
- name: "Integration Tests Failed"
if: ${{ contains(needs.integration.result, 'failure') || contains(needs.integration.result, 'cancelled') }}
# when this is true the next step won't execute
run: |
echo "::notice title='Integration test suite failed'"
exit 1
- name: "Integration Tests Passed"
run: |
echo "::notice title='Integration test suite passed'"
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}.csv
path: integration_results.csv
build:
name: build packages
@@ -259,12 +193,12 @@ jobs:
steps:
- name: Check out the repository
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4.3.0
with:
python-version: '3.9'
python-version: '3.8'
- name: Install python dependencies
run: |
@@ -297,7 +231,7 @@ jobs:
- name: Install source distributions
# ignore dbt-1.0.0, which intentionally raises an error when installed from source
run: |
find ./dist/*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
- name: Check source distributions
run: |

View File

@@ -1,265 +0,0 @@
# **what?**
# This workflow models the performance characteristics of a point in time in dbt.
# It runs specific dbt commands on committed projects multiple times to create and
# commit information about the distribution to the current branch. For more information
# see the readme in the performance module at /performance/README.md.
#
# **why?**
# When developing new features, we can take quick performance samples and compare
# them against the commited baseline measurements produced by this workflow to detect
# some performance regressions at development time before they reach users.
#
# **when?**
# This is only run once directly after each release (for non-prereleases). If for some
# reason the results of a run are not satisfactory, it can also be triggered manually.
name: Model Performance Characteristics
on:
# runs after non-prereleases are published.
release:
types: [released]
# run manually from the actions tab
workflow_dispatch:
inputs:
release_id:
description: 'dbt version to model (must be non-prerelease in Pypi)'
type: string
required: true
env:
RUNNER_CACHE_PATH: performance/runner/target/release/runner
# both jobs need to write
permissions:
contents: write
pull-requests: write
jobs:
set-variables:
name: Setting Variables
runs-on: ubuntu-latest
outputs:
cache_key: ${{ steps.variables.outputs.cache_key }}
release_id: ${{ steps.semver.outputs.base-version }}
release_branch: ${{ steps.variables.outputs.release_branch }}
steps:
# explicitly checkout the performance runner from main regardless of which
# version we are modeling.
- name: Checkout
uses: actions/checkout@v4
with:
ref: main
- name: Parse version into parts
id: semver
uses: dbt-labs/actions/parse-semver@v1
with:
version: ${{ github.event.inputs.release_id || github.event.release.tag_name }}
# collect all the variables that need to be used in subsequent jobs
- name: Set variables
id: variables
run: |
# create a cache key that will be used in the next job. without this the
# next job would have to checkout from main and hash the files itself.
echo "cache_key=${{ runner.os }}-${{ hashFiles('performance/runner/Cargo.toml')}}-${{ hashFiles('performance/runner/src/*') }}" >> $GITHUB_OUTPUT
branch_name="${{steps.semver.outputs.major}}.${{steps.semver.outputs.minor}}.latest"
echo "release_branch=$branch_name" >> $GITHUB_OUTPUT
echo "release branch is inferred to be ${branch_name}"
latest-runner:
name: Build or Fetch Runner
runs-on: ubuntu-latest
needs: [set-variables]
env:
RUSTFLAGS: "-D warnings"
steps:
- name: '[DEBUG] print variables'
run: |
echo "all variables defined in set-variables"
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
# explicitly checkout the performance runner from main regardless of which
# version we are modeling.
- name: Checkout
uses: actions/checkout@v4
with:
ref: main
# attempts to access a previously cached runner
- uses: actions/cache@v4
id: cache
with:
path: ${{ env.RUNNER_CACHE_PATH }}
key: ${{ needs.set-variables.outputs.cache_key }}
- name: Fetch Rust Toolchain
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Add fmt
if: steps.cache.outputs.cache-hit != 'true'
run: rustup component add rustfmt
- name: Cargo fmt
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/cargo@v1
with:
command: fmt
args: --manifest-path performance/runner/Cargo.toml --all -- --check
- name: Test
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/cargo@v1
with:
command: test
args: --manifest-path performance/runner/Cargo.toml
- name: Build (optimized)
if: steps.cache.outputs.cache-hit != 'true'
uses: actions-rs/cargo@v1
with:
command: build
args: --release --manifest-path performance/runner/Cargo.toml
# the cache action automatically caches this binary at the end of the job
model:
# depends on `latest-runner` as a separate job so that failures in this job do not prevent
# a successfully tested and built binary from being cached.
needs: [set-variables, latest-runner]
name: Model a release
runs-on: ubuntu-latest
steps:
- name: '[DEBUG] print variables'
run: |
echo "all variables defined in set-variables"
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.9"
- name: Install dbt
run: pip install dbt-postgres==${{ needs.set-variables.outputs.release_id }}
- name: Install Hyperfine
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
# explicitly checkout main to get the latest project definitions
- name: Checkout
uses: actions/checkout@v4
with:
ref: main
# this was built in the previous job so it will be there.
- name: Fetch Runner
uses: actions/cache@v4
id: cache
with:
path: ${{ env.RUNNER_CACHE_PATH }}
key: ${{ needs.set-variables.outputs.cache_key }}
- name: Move Runner
run: mv performance/runner/target/release/runner performance/app
- name: Change Runner Permissions
run: chmod +x ./performance/app
- name: '[DEBUG] ls baseline directory before run'
run: ls -R performance/baselines/
# `${{ github.workspace }}` is used to pass the absolute path
- name: Create directories
run: |
mkdir ${{ github.workspace }}/performance/tmp/
mkdir -p performance/baselines/${{ needs.set-variables.outputs.release_id }}/
# Run modeling with taking 20 samples
- name: Run Measurement
run: |
performance/app model -v ${{ needs.set-variables.outputs.release_id }} -b ${{ github.workspace }}/performance/baselines/ -p ${{ github.workspace }}/performance/projects/ -t ${{ github.workspace }}/performance/tmp/ -n 20
- name: '[DEBUG] ls baseline directory after run'
run: ls -R performance/baselines/
- uses: actions/upload-artifact@v4
with:
name: baseline
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}/
create-pr:
name: Open PR for ${{ matrix.base-branch }}
# depends on `model` as a separate job so that the baseline can be committed to more than one branch
# i.e. release branch and main
needs: [set-variables, latest-runner, model]
runs-on: ubuntu-latest
strategy:
matrix:
include:
- base-branch: refs/heads/main
target-branch: performance-bot/main_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
- base-branch: refs/heads/${{ needs.set-variables.outputs.release_branch }}
target-branch: performance-bot/release_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
steps:
- name: '[DEBUG] print variables'
run: |
echo "all variables defined in set-variables"
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ matrix.base-branch }}
- name: Create PR branch
run: |
git checkout -b ${{ matrix.target-branch }}
git push origin ${{ matrix.target-branch }}
git branch --set-upstream-to=origin/${{ matrix.target-branch }} ${{ matrix.target-branch }}
- uses: actions/download-artifact@v4
with:
name: baseline
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}
- name: '[DEBUG] ls baselines after artifact download'
run: ls -R performance/baselines/
- name: Commit baseline
uses: EndBug/add-and-commit@v9
with:
add: 'performance/baselines/*'
author_name: 'Github Build Bot'
author_email: 'buildbot@fishtownanalytics.com'
message: 'adding performance baseline for ${{ needs.set-variables.outputs.release_id }}'
push: 'origin origin/${{ matrix.target-branch }}'
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
with:
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
base: ${{ matrix.base-branch }}
branch: '${{ matrix.target-branch }}'
title: 'Adding performance modeling for ${{needs.set-variables.outputs.release_id}} to ${{ matrix.base-branch }}'
body: 'Committing perf results for tracking for the ${{needs.set-variables.outputs.release_id}}'
labels: |
Skip Changelog
Performance

View File

@@ -1,97 +0,0 @@
# **what?**
# Nightly releases to GitHub and PyPI. This workflow produces the following outcome:
# - generate and validate data for night release (commit SHA, version number, release branch);
# - pass data to release workflow;
# - night release will be pushed to GitHub as a draft release;
# - night build will be pushed to test PyPI;
#
# **why?**
# Ensure an automated and tested release process for nightly builds
#
# **when?**
# This workflow runs on schedule or can be run manually on demand.
name: Nightly Test Release to GitHub and PyPI
on:
workflow_dispatch: # for manual triggering
schedule:
- cron: 0 9 * * *
permissions:
contents: write # this is the permission that allows creating a new release
packages: write # this is the permission that allows Docker release
defaults:
run:
shell: bash
env:
RELEASE_BRANCH: "main"
jobs:
aggregate-release-data:
runs-on: ubuntu-latest
outputs:
version_number: ${{ steps.nightly-release-version.outputs.number }}
release_branch: ${{ steps.release-branch.outputs.name }}
steps:
- name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
uses: actions/checkout@v4
with:
ref: ${{ env.RELEASE_BRANCH }}
- name: "Get Current Version Number"
id: version-number-sources
run: |
current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
echo "current_version=$current_version" >> $GITHUB_OUTPUT
- name: "Audit Version And Parse Into Parts"
id: semver
uses: dbt-labs/actions/parse-semver@v1.1.0
with:
version: ${{ steps.version-number-sources.outputs.current_version }}
- name: "Get Current Date"
id: current-date
run: echo "date=$(date +'%m%d%Y')" >> $GITHUB_OUTPUT
- name: "Generate Nightly Release Version Number"
id: nightly-release-version
run: |
number="${{ steps.semver.outputs.version }}.dev${{ steps.current-date.outputs.date }}"
echo "number=$number" >> $GITHUB_OUTPUT
- name: "Audit Nightly Release Version And Parse Into Parts"
uses: dbt-labs/actions/parse-semver@v1.1.0
with:
version: ${{ steps.nightly-release-version.outputs.number }}
- name: "Set Release Branch"
id: release-branch
run: |
echo "name=${{ env.RELEASE_BRANCH }}" >> $GITHUB_OUTPUT
log-outputs-aggregate-release-data:
runs-on: ubuntu-latest
needs: [aggregate-release-data]
steps:
- name: "[DEBUG] Log Outputs"
run: |
echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
release-github-pypi:
needs: [aggregate-release-data]
uses: ./.github/workflows/release.yml
with:
target_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
test_run: true
nightly_release: true
secrets: inherit

View File

@@ -1,7 +1,11 @@
# **what?**
# The purpose of this workflow is to trigger CI to run for each
# release branch and main branch on a regular cadence. If the CI workflow
# fails for a branch, it will post to #dev-core-alerts to raise awareness.
# fails for a branch, it will post to dev-core-alerts to raise awareness.
# The 'aurelien-baudet/workflow-dispatch' Action triggers the existing
# CI worklow file on the given branch to run so that even if we change the
# CI workflow file in the future, the one that is tailored for the given
# release branch will be used.
# **why?**
# Ensures release branches and main are always shippable and not broken.
@@ -24,8 +28,35 @@ on:
permissions: read-all
jobs:
run_tests:
uses: dbt-labs/actions/.github/workflows/release-branch-tests.yml@main
kick-off-ci:
name: Kick-off CI
runs-on: ubuntu-latest
strategy:
# must run CI 1 branch at a time b/c the workflow-dispatch Action polls for
# latest run for results and it gets confused when we kick off multiple runs
# at once. There is a race condition so we will just run in sequential order.
max-parallel: 1
fail-fast: false
matrix:
branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, main]
steps:
- name: Call CI workflow for ${{ matrix.branch }} branch
id: trigger-step
uses: aurelien-baudet/workflow-dispatch@v2.1.1
with:
workflows_to_run: '["main.yml"]'
secrets: inherit
workflow: main.yml
ref: ${{ matrix.branch }}
token: ${{ secrets.FISHTOWN_BOT_PAT }}
- name: Post failure to Slack
uses: ravsamhq/notify-slack-action@v1
if: ${{ always() && !contains(steps.trigger-step.outputs.workflow-conclusion,'success') }}
with:
status: ${{ job.status }}
notification_title: 'dbt-core scheduled run of "${{ matrix.branch }}" branch not successful'
message_format: ':x: CI on branch "${{ matrix.branch }}" ${{ steps.trigger-step.outputs.workflow-conclusion }}'
footer: 'Linked failed CI run ${{ steps.trigger-step.outputs.workflow-url }}'
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}

118
.github/workflows/release-docker.yml vendored Normal file
View File

@@ -0,0 +1,118 @@
# **what?**
# This workflow will generate a series of docker images for dbt and push them to the github container registry
# **why?**
# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. This is how we keep those images up-to-date.
# **when?**
# This is triggered manually
# **next steps**
# - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
name: Docker release
permissions:
packages: write
on:
workflow_dispatch:
inputs:
package:
description: The package to release. _One_ of [dbt-core, dbt-redshift, dbt-bigquery, dbt-snowflake, dbt-spark, dbt-postgres]
required: true
version_number:
description: The release version number (i.e. 1.0.0b1). Do not include `latest` tags or a leading `v`!
required: true
jobs:
get_version_meta:
name: Get version meta
runs-on: ubuntu-latest
outputs:
major: ${{ steps.version.outputs.major }}
minor: ${{ steps.version.outputs.minor }}
patch: ${{ steps.version.outputs.patch }}
latest: ${{ steps.latest.outputs.latest }}
minor_latest: ${{ steps.latest.outputs.minor_latest }}
steps:
- uses: actions/checkout@v1
- name: Split version
id: version
run: |
IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
echo "major=$MAJOR" >> $GITHUB_OUTPUT
echo "minor=$MINOR" >> $GITHUB_OUTPUT
echo "patch=$PATCH" >> $GITHUB_OUTPUT
- name: Is pkg 'latest'
id: latest
uses: ./.github/actions/latest-wrangler
with:
package: ${{ github.event.inputs.package }}
new_version: ${{ github.event.inputs.version_number }}
gh_token: ${{ secrets.GITHUB_TOKEN }}
halt_on_missing: False
setup_image_builder:
name: Set up docker image builder
runs-on: ubuntu-latest
needs: [get_version_meta]
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
build_and_push:
name: Build images and push to GHCR
runs-on: ubuntu-latest
needs: [setup_image_builder, get_version_meta]
steps:
- name: Get docker build arg
id: build_arg
run: |
BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT
echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT
- name: Log in to the GHCR
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push MAJOR.MINOR.PATCH tag
uses: docker/build-push-action@v2
with:
file: docker/Dockerfile
push: True
target: ${{ github.event.inputs.package }}
build-args: |
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
tags: |
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ github.event.inputs.version_number }}
- name: Build and push MINOR.latest tag
uses: docker/build-push-action@v2
if: ${{ needs.get_version_meta.outputs.minor_latest == 'True' }}
with:
file: docker/Dockerfile
push: True
target: ${{ github.event.inputs.package }}
build-args: |
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
tags: |
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ needs.get_version_meta.outputs.major }}.${{ needs.get_version_meta.outputs.minor }}.latest
- name: Build and push latest tag
uses: docker/build-push-action@v2
if: ${{ needs.get_version_meta.outputs.latest == 'True' }}
with:
file: docker/Dockerfile
push: True
target: ${{ github.event.inputs.package }}
build-args: |
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
tags: |
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest

View File

@@ -1,69 +1,24 @@
# **what?**
# Release workflow provides the following steps:
# - checkout the given commit;
# - validate version in sources and changelog file for given version;
# - bump the version and generate a changelog if needed;
# - merge all changes to the target branch if needed;
# - run unit and integration tests against given commit;
# - build and package that SHA;
# - release it to GitHub and PyPI with that specific build;
# - release it to Docker
#
# Take the given commit, run unit tests specifically on that sha, build and
# package it, and then release to GitHub and PyPi with that specific build
# **why?**
# Ensure an automated and tested release process
#
# **when?**
# This workflow can be run manually on demand or can be called by other workflows
name: "Release to GitHub, PyPI & Docker"
run-name: "Release ${{ inputs.version_number }} to GitHub, PyPI & Docker"
# **when?**
# This will only run manually with a given sha and version
name: Release to GitHub and PyPi
on:
workflow_dispatch:
inputs:
target_branch:
description: "The branch to release from"
type: string
sha:
description: 'The last commit sha in the release'
required: true
version_number:
description: "The release version number (i.e. 1.0.0b1)"
type: string
description: 'The release version number (i.e. 1.0.0b1)'
required: true
test_run:
description: "Test run (Publish release as draft)"
type: boolean
default: true
required: false
nightly_release:
description: "Nightly release to dev environment"
type: boolean
default: false
required: false
only_docker:
description: "Only release Docker image, skip GitHub & PyPI"
type: boolean
default: false
required: false
workflow_call:
inputs:
target_branch:
description: "The branch to release from"
type: string
required: true
version_number:
description: "The release version number (i.e. 1.0.0b1)"
type: string
required: true
test_run:
description: "Test run (Publish release as draft)"
type: boolean
default: true
required: false
nightly_release:
description: "Nightly release to dev environment"
type: boolean
default: false
required: false
permissions:
contents: write # this is the permission that allows creating a new release
@@ -73,198 +28,175 @@ defaults:
shell: bash
jobs:
job-setup:
name: Log Inputs
unit:
name: Unit test
runs-on: ubuntu-latest
outputs:
starting_sha: ${{ steps.set_sha.outputs.starting_sha }}
env:
TOXENV: "unit"
steps:
- name: "[DEBUG] Print Variables"
run: |
echo Inputs
echo The branch to release from: ${{ inputs.target_branch }}
echo The release version number: ${{ inputs.version_number }}
echo Test run: ${{ inputs.test_run }}
echo Nightly release: ${{ inputs.nightly_release }}
echo Only Docker: ${{ inputs.only_docker }}
- name: "Checkout target branch"
uses: actions/checkout@v4
- name: Check out the repository
uses: actions/checkout@v2
with:
ref: ${{ inputs.target_branch }}
# release-prep.yml really shouldn't take in the sha but since core + all adapters
# depend on it now this workaround lets us not input it manually with risk of error.
# The changes always get merged into the head so we can't use a specific commit for
# releases anyways.
- name: "Capture sha"
id: set_sha
run: |
echo "starting_sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
bump-version-generate-changelog:
name: Bump package version, Generate changelog
needs: [job-setup]
if: ${{ !inputs.only_docker }}
uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
persist-credentials: false
ref: ${{ github.event.inputs.sha }}
- name: Set up Python
uses: actions/setup-python@v2
with:
sha: ${{ needs.job-setup.outputs.starting_sha }}
version_number: ${{ inputs.version_number }}
target_branch: ${{ inputs.target_branch }}
env_setup_script_path: "scripts/env-setup.sh"
test_run: ${{ inputs.test_run }}
nightly_release: ${{ inputs.nightly_release }}
python-version: 3.8
secrets: inherit
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip install tox
pip --version
tox --version
log-outputs-bump-version-generate-changelog:
name: "[Log output] Bump package version, Generate changelog"
if: ${{ !failure() && !cancelled() && !inputs.only_docker }}
- name: Run tox
run: tox
needs: [bump-version-generate-changelog]
build:
name: build packages
runs-on: ubuntu-latest
steps:
- name: Print variables
run: |
echo Final SHA : ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
echo Changelog path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
build-test-package:
name: Build, Test, Package
if: ${{ !failure() && !cancelled() && !inputs.only_docker }}
needs: [job-setup, bump-version-generate-changelog]
uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
- name: Check out the repository
uses: actions/checkout@v2
with:
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
version_number: ${{ inputs.version_number }}
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
build_script_path: "scripts/build-dist.sh"
s3_bucket_name: "core-team-artifacts"
package_test_command: "dbt --version"
test_run: ${{ inputs.test_run }}
nightly_release: ${{ inputs.nightly_release }}
persist-credentials: false
ref: ${{ github.event.inputs.sha }}
secrets:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip install --upgrade setuptools wheel twine check-wheel-contents
pip --version
- name: Build distributions
run: ./scripts/build-dist.sh
- name: Show distributions
run: ls -lh dist/
- name: Check distribution descriptions
run: |
twine check dist/*
- name: Check wheel contents
run: |
check-wheel-contents dist/*.whl --ignore W007,W008
- uses: actions/upload-artifact@v2
with:
name: dist
path: |
dist/
!dist/dbt-${{github.event.inputs.version_number}}.tar.gz
test-build:
name: verify packages
needs: [build, unit]
runs-on: ubuntu-latest
steps:
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install python dependencies
run: |
pip install --user --upgrade pip
pip install --upgrade wheel
pip --version
- uses: actions/download-artifact@v2
with:
name: dist
path: dist/
- name: Show distributions
run: ls -lh dist/
- name: Install wheel distributions
run: |
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check wheel distributions
run: |
dbt --version
- name: Install source distributions
run: |
find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check source distributions
run: |
dbt --version
github-release:
name: GitHub Release
if: ${{ !failure() && !cancelled() && !inputs.only_docker }}
needs: [bump-version-generate-changelog, build-test-package]
needs: test-build
uses: dbt-labs/dbt-release/.github/workflows/github-release.yml@main
runs-on: ubuntu-latest
steps:
- uses: actions/download-artifact@v2
with:
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
version_number: ${{ inputs.version_number }}
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
test_run: ${{ inputs.test_run }}
name: dist
path: '.'
# Need to set an output variable because env variables can't be taken as input
# This is needed for the next step with releasing to GitHub
- name: Find release type
id: release_type
env:
IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') || contains(github.event.inputs.version_number, 'b') }}
run: |
echo "isPrerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
- name: Creating GitHub Release
uses: softprops/action-gh-release@v1
with:
name: dbt-core v${{github.event.inputs.version_number}}
tag_name: v${{github.event.inputs.version_number}}
prerelease: ${{ steps.release_type.outputs.isPrerelease }}
target_commitish: ${{github.event.inputs.sha}}
body: |
[Release notes](https://github.com/dbt-labs/dbt-core/blob/main/CHANGELOG.md)
files: |
dbt_postgres-${{github.event.inputs.version_number}}-py3-none-any.whl
dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
dbt-core-${{github.event.inputs.version_number}}.tar.gz
pypi-release:
name: PyPI Release
name: Pypi release
needs: [github-release]
uses: dbt-labs/dbt-release/.github/workflows/pypi-release.yml@main
with:
version_number: ${{ inputs.version_number }}
test_run: ${{ inputs.test_run }}
secrets:
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
determine-docker-package:
# dbt-postgres exists within dbt-core for versions 1.7 and earlier but is a separate package for 1.8 and later.
# determine if we need to release dbt-core or both dbt-core and dbt-postgres
name: Determine Docker Package
if: ${{ !failure() && !cancelled() }}
runs-on: ubuntu-latest
needs: [pypi-release]
outputs:
matrix: ${{ steps.determine-docker-package.outputs.matrix }}
needs: github-release
environment: PypiProd
steps:
- name: "Audit Version And Parse Into Parts"
id: semver
uses: dbt-labs/actions/parse-semver@v1.1.0
- uses: actions/download-artifact@v2
with:
version: ${{ inputs.version_number }}
name: dist
path: 'dist'
- name: "Determine Packages to Release"
id: determine-docker-package
run: |
if [ ${{ steps.semver.outputs.minor }} -ge 8 ]; then
json_output={\"package\":[\"dbt-core\"]}
else
json_output={\"package\":[\"dbt-core\",\"dbt-postgres\"]}
fi
echo "matrix=$json_output" >> $GITHUB_OUTPUT
docker-release:
name: "Docker Release for ${{ matrix.package }}"
needs: [determine-docker-package]
# We cannot release to docker on a test run because it uses the tag in GitHub as
# what we need to release but draft releases don't actually tag the commit so it
# finds nothing to release
if: ${{ !failure() && !cancelled() && (!inputs.test_run || inputs.only_docker) }}
strategy:
matrix: ${{fromJson(needs.determine-docker-package.outputs.matrix)}}
permissions:
packages: write
uses: dbt-labs/dbt-release/.github/workflows/release-docker.yml@main
- name: Publish distribution to PyPI
uses: pypa/gh-action-pypi-publish@v1.4.2
with:
package: ${{ matrix.package }}
version_number: ${{ inputs.version_number }}
test_run: ${{ inputs.test_run }}
slack-notification:
name: Slack Notification
if: ${{ failure() && (!inputs.test_run || inputs.nightly_release) }}
needs:
[
bump-version-generate-changelog,
build-test-package,
github-release,
pypi-release,
docker-release,
]
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
with:
status: "failure"
secrets:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
testing-slack-notification:
# sends notifications to #slackbot-test
name: Testing - Slack Notification
if: ${{ failure() && inputs.test_run && !inputs.nightly_release }}
needs:
[
bump-version-generate-changelog,
build-test-package,
github-release,
pypi-release,
docker-release,
]
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
with:
status: "failure"
secrets:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_TESTING_WEBHOOK_URL }}
password: ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -1,30 +0,0 @@
# **what?**
# Cleanup branches left over from automation and testing. Also cleanup
# draft releases from release testing.
# **why?**
# The automations are leaving behind branches and releases that clutter
# the repository. Sometimes we need them to debug processes so we don't
# want them immediately deleted. Running on Saturday to avoid running
# at the same time as an actual release to prevent breaking a release
# mid-release.
# **when?**
# Mainly on a schedule of 12:00 Saturday.
# Manual trigger can also run on demand
name: Repository Cleanup
on:
schedule:
- cron: '0 12 * * SAT' # At 12:00 on Saturday - details in `why` above
workflow_dispatch: # for manual triggering
permissions:
contents: write
jobs:
cleanup-repo:
uses: dbt-labs/actions/.github/workflows/repository-cleanup.yml@main
secrets: inherit

View File

@@ -13,63 +13,48 @@
name: Artifact Schema Check
on:
pull_request:
types: [ opened, reopened, labeled, unlabeled, synchronize ]
paths-ignore: [ '.changes/**', '.github/**', 'tests/**', '**.md', '**.yml' ]
workflow_dispatch:
pull_request: #TODO: remove before merging
push:
branches:
- "develop"
- "*.latest"
- "releases/*"
# no special access is needed
permissions: read-all
env:
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}/schema_changes.txt
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
DBT_REPO_DIRECTORY: ${{ github.workspace }}/dbt
SCHEMA_REPO_DIRECTORY: ${{ github.workspace }}/schemas.getdbt.com
jobs:
checking-schemas:
name: "Post-merge schema changes required"
name: "Checking schemas"
runs-on: ubuntu-latest
steps:
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: 3.9
python-version: 3.8
- name: Checkout dbt repo
uses: actions/checkout@v4
uses: actions/checkout@v2.3.4
with:
path: ${{ env.DBT_REPO_DIRECTORY }}
- name: Check for changes in core/dbt/artifacts
# https://github.com/marketplace/actions/paths-changes-filter
uses: dorny/paths-filter@v3
id: check_artifact_changes
with:
filters: |
artifacts_changed:
- 'core/dbt/artifacts/**'
list-files: shell
working-directory: ${{ env.DBT_REPO_DIRECTORY }}
- name: Succeed if no artifacts have changed
if: steps.check_artifact_changes.outputs.artifacts_changed == 'false'
run: |
echo "No artifact changes found in core/dbt/artifacts. CI check passed."
- name: Checkout schemas.getdbt.com repo
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
uses: actions/checkout@v4
uses: actions/checkout@v2.3.4
with:
repository: dbt-labs/schemas.getdbt.com
ref: 'main'
ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
- name: Generate current schema
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
run: |
cd ${{ env.DBT_REPO_DIRECTORY }}
python3 -m venv env
@@ -80,17 +65,26 @@ jobs:
# Copy generated schema files into the schemas.getdbt.com repo
# Do a git diff to find any changes
# Ignore any lines with date-like (yyyy-mm-dd) or version-like (x.y.z) changes
# Ignore any date or version changes though
- name: Compare schemas
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
run: |
cp -r ${{ env.LATEST_SCHEMA_PATH }}/dbt ${{ env.SCHEMA_REPO_DIRECTORY }}
cd ${{ env.SCHEMA_REPO_DIRECTORY }}
git diff -I='*[0-9]{4}-[0-9]{2}-[0-9]{2}' -I='*[0-9]+\.[0-9]+\.[0-9]+' --exit-code > ${{ env.SCHEMA_DIFF_ARTIFACT }}
diff_results=$(git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' --compact-summary)
if [[ $(echo diff_results) ]]; then
echo $diff_results
echo "Schema changes detected!"
git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' > ${{ env.SCHEMA_DIFF_ARTIFACT }}
exit 1
else
echo "No schema changes detected"
fi
- name: Upload schema diff
uses: actions/upload-artifact@v4
if: ${{ failure() && steps.check_artifact_changes.outputs.artifacts_changed == 'true' }}
uses: actions/upload-artifact@v2.2.4
if: ${{ failure() }}
with:
name: 'schema_changes.txt'
name: 'schema_schanges.txt'
path: '${{ env.SCHEMA_DIFF_ARTIFACT }}'

View File

@@ -18,41 +18,11 @@ on:
permissions: read-all
# top-level adjustments can be made here
env:
# number of parallel processes to spawn for python testing
PYTHON_INTEGRATION_TEST_WORKERS: 5
jobs:
integration-metadata:
name: integration test metadata generation
runs-on: ubuntu-latest
outputs:
split-groups: ${{ steps.generate-split-groups.outputs.split-groups }}
steps:
- name: generate split-groups
id: generate-split-groups
run: |
MATRIX_JSON="["
for B in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
MATRIX_JSON+=$(sed 's/^/"/;s/$/"/' <<< "${B}")
done
MATRIX_JSON="${MATRIX_JSON//\"\"/\", \"}"
MATRIX_JSON+="]"
echo "split-groups=${MATRIX_JSON}" >> $GITHUB_OUTPUT
# run the performance measurements on the current or default branch
test-schema:
name: Test Log Schema
runs-on: ubuntu-20.04
timeout-minutes: 30
needs:
- integration-metadata
strategy:
fail-fast: false
matrix:
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
env:
# turns warnings into errors
RUSTFLAGS: "-D warnings"
@@ -60,8 +30,6 @@ jobs:
LOG_DIR: "/home/runner/work/dbt-core/dbt-core/logs"
# tells integration tests to output into json format
DBT_LOG_FORMAT: "json"
# tell eventmgr to convert logging events into bytes
DBT_TEST_BINARY_SERIALIZATION: "true"
# Additional test users
DBT_TEST_USER_1: dbt_test_user_1
DBT_TEST_USER_2: dbt_test_user_2
@@ -69,14 +37,14 @@ jobs:
steps:
- name: checkout dev
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v2.2.2
with:
python-version: "3.9"
python-version: "3.8"
- name: Install python dependencies
run: |
@@ -94,19 +62,4 @@ jobs:
# integration tests generate a ton of logs in different files. the next step will find them all.
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
- name: Run integration tests
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
command: tox -e integration -- -nauto
env:
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
test-schema-report:
name: Log Schema Test Suite
runs-on: ubuntu-latest
needs: test-schema
steps:
- name: "[Notification] Log test suite passes"
run: |
echo "::notice title="Log test suite passes""
run: tox -e integration -- -nauto

View File

@@ -1,154 +0,0 @@
# **what?**
# This workflow will test all test(s) at the input path given number of times to determine if it's flaky or not. You can test with any supported OS/Python combination.
# This is batched in 10 to allow more test iterations faster.
# **why?**
# Testing if a test is flaky and if a previously flaky test has been fixed. This allows easy testing on supported python versions and OS combinations.
# **when?**
# This is triggered manually from dbt-core.
name: Flaky Tester
on:
workflow_dispatch:
inputs:
branch:
description: 'Branch to check out'
type: string
required: true
default: 'main'
test_path:
description: 'Path to single test to run (ex: tests/functional/retry/test_retry.py::TestRetry::test_fail_fast)'
type: string
required: true
default: 'tests/functional/...'
python_version:
description: 'Version of Python to Test Against'
type: choice
options:
- '3.9'
- '3.10'
- '3.11'
os:
description: 'OS to run test in'
type: choice
options:
- 'ubuntu-latest'
- 'macos-14'
- 'windows-latest'
num_runs_per_batch:
description: 'Max number of times to run the test per batch. We always run 10 batches.'
type: number
required: true
default: '50'
permissions: read-all
defaults:
run:
shell: bash
jobs:
debug:
runs-on: ubuntu-latest
steps:
- name: "[DEBUG] Output Inputs"
run: |
echo "Branch: ${{ inputs.branch }}"
echo "test_path: ${{ inputs.test_path }}"
echo "python_version: ${{ inputs.python_version }}"
echo "os: ${{ inputs.os }}"
echo "num_runs_per_batch: ${{ inputs.num_runs_per_batch }}"
pytest:
runs-on: ${{ inputs.os }}
strategy:
# run all batches, even if one fails. This informs how flaky the test may be.
fail-fast: false
# using a matrix to speed up the jobs since the matrix will run in parallel when runners are available
matrix:
batch: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
env:
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
DBT_TEST_USER_1: dbt_test_user_1
DBT_TEST_USER_2: dbt_test_user_2
DBT_TEST_USER_3: dbt_test_user_3
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
DD_SITE: datadoghq.com
DD_ENV: ci
DD_SERVICE: ${{ github.event.repository.name }}
steps:
- name: "Checkout code"
uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
- name: "Setup Python"
uses: actions/setup-python@v5
with:
python-version: "${{ inputs.python_version }}"
- name: "Setup Dev Environment"
run: make dev
- name: "Set up postgres (linux)"
if: inputs.os == 'ubuntu-latest'
run: make setup-db
# mac and windows don't use make due to limitations with docker with those runners in GitHub
- name: "Set up postgres (macos)"
if: inputs.os == 'macos-14'
uses: ./.github/actions/setup-postgres-macos
- name: "Set up postgres (windows)"
if: inputs.os == 'windows-latest'
uses: ./.github/actions/setup-postgres-windows
- name: "Test Command"
id: command
run: |
test_command="python -m pytest ${{ inputs.test_path }}"
echo "test_command=$test_command" >> $GITHUB_OUTPUT
- name: "Run test ${{ inputs.num_runs_per_batch }} times"
id: pytest
run: |
set +e
for ((i=1; i<=${{ inputs.num_runs_per_batch }}; i++))
do
echo "Running pytest iteration $i..."
python -m pytest --ddtrace ${{ inputs.test_path }}
exit_code=$?
if [[ $exit_code -eq 0 ]]; then
success=$((success + 1))
echo "Iteration $i: Success"
else
failure=$((failure + 1))
echo "Iteration $i: Failure"
fi
echo
echo "==========================="
echo "Successful runs: $success"
echo "Failed runs: $failure"
echo "==========================="
echo
done
echo "failure=$failure" >> $GITHUB_OUTPUT
- name: "Success and Failure Summary: ${{ inputs.os }}/Python ${{ inputs.python_version }}"
run: |
echo "Batch: ${{ matrix.batch }}"
echo "Successful runs: ${{ steps.pytest.outputs.success }}"
echo "Failed runs: ${{ steps.pytest.outputs.failure }}"
- name: "Error for Failures"
if: ${{ steps.pytest.outputs.failure }}
run: |
echo "Batch ${{ matrix.batch }} failed ${{ steps.pytest.outputs.failure }} of ${{ inputs.num_runs_per_batch }} tests"
exit 1

1
.github/workflows/test/.actrc vendored Normal file
View File

@@ -0,0 +1 @@
-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest

1
.github/workflows/test/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.secrets

View File

@@ -0,0 +1 @@
GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE

View File

@@ -0,0 +1,6 @@
{
"inputs": {
"version_number": "1.0.1",
"package": "dbt-postgres"
}
}

View File

@@ -24,8 +24,10 @@ permissions:
jobs:
triage_label:
if: contains(github.event.issue.labels.*.name, 'awaiting_response')
uses: dbt-labs/actions/.github/workflows/swap-labels.yml@main
runs-on: ubuntu-latest
steps:
- name: initial labeling
uses: andymckay/labeler@master
with:
add_label: "triage"
remove_label: "awaiting_response"
secrets: inherit
add-labels: "triage"
remove-labels: "awaiting_response"

125
.github/workflows/version-bump.yml vendored Normal file
View File

@@ -0,0 +1,125 @@
# **what?**
# This workflow will take the new version number to bump to. With that
# it will run versionbump to update the version number everywhere in the
# code base and then run changie to create the corresponding changelog.
# A PR will be created with the changes that can be reviewed before committing.
# **why?**
# This is to aid in releasing dbt and making sure we have updated
# the version in all places and generated the changelog.
# **when?**
# This is triggered manually
name: Version Bump
on:
workflow_dispatch:
inputs:
version_number:
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
required: true
permissions:
contents: write
pull-requests: write
jobs:
bump:
runs-on: ubuntu-latest
steps:
- name: "[DEBUG] Print Variables"
run: |
echo "all variables defined as inputs"
echo The version_number: ${{ github.event.inputs.version_number }}
- name: Check out the repository
uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: "3.8"
- name: Install python dependencies
run: |
python3 -m venv env
source env/bin/activate
pip install --upgrade pip
- name: Add Homebrew to PATH
run: |
echo "/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin" >> $GITHUB_PATH
- name: Install Homebrew packages
run: |
brew install pre-commit
brew tap miniscruff/changie https://github.com/miniscruff/changie
brew install changie
- name: Audit Version and Parse Into Parts
id: semver
uses: dbt-labs/actions/parse-semver@v1
with:
version: ${{ github.event.inputs.version_number }}
- name: Set branch value
id: variables
run: |
echo "BRANCH_NAME=prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
- name: Create PR branch
run: |
git checkout -b ${{ steps.variables.outputs.BRANCH_NAME }}
git push origin ${{ steps.variables.outputs.BRANCH_NAME }}
git branch --set-upstream-to=origin/${{ steps.variables.outputs.BRANCH_NAME }} ${{ steps.variables.outputs.BRANCH_NAME }}
- name: Bump version
run: |
source env/bin/activate
pip install -r dev-requirements.txt
env/bin/bumpversion --allow-dirty --new-version ${{ github.event.inputs.version_number }} major
git status
- name: Run changie
run: |
if [[ ${{ steps.semver.outputs.is-pre-release }} -eq 1 ]]
then
changie batch ${{ steps.semver.outputs.base-version }} --move-dir '${{ steps.semver.outputs.base-version }}' --prerelease '${{ steps.semver.outputs.pre-release }}'
else
changie batch ${{ steps.semver.outputs.base-version }} --include '${{ steps.semver.outputs.base-version }}' --remove-prereleases
fi
changie merge
git status
# this step will fail on whitespace errors but also correct them
- name: Remove trailing whitespace
continue-on-error: true
run: |
pre-commit run trailing-whitespace --files .bumpversion.cfg CHANGELOG.md .changes/*
git status
# this step will fail on newline errors but also correct them
- name: Removing extra newlines
continue-on-error: true
run: |
pre-commit run end-of-file-fixer --files .bumpversion.cfg CHANGELOG.md .changes/*
git status
- name: Commit version bump to branch
uses: EndBug/add-and-commit@v7
with:
author_name: 'Github Build Bot'
author_email: 'buildbot@fishtownanalytics.com'
message: 'Bumping version to ${{ github.event.inputs.version_number }} and generate CHANGELOG'
branch: '${{ steps.variables.outputs.BRANCH_NAME }}'
push: 'origin origin/${{ steps.variables.outputs.BRANCH_NAME }}'
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
with:
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
base: ${{github.ref}}
title: 'Bumping version to ${{ github.event.inputs.version_number }} and generate changelog'
branch: '${{ steps.variables.outputs.BRANCH_NAME }}'
labels: |
Skip Changelog

10
.gitignore vendored
View File

@@ -11,7 +11,6 @@ __pycache__/
env*/
dbt_env/
build/
!tests/functional/build
!core/dbt/docs/build
develop-eggs/
dist/
@@ -29,8 +28,6 @@ var/
.mypy_cache/
.dmypy.json
logs/
.user.yml
profiles.yml
# PyInstaller
# Usually these files are written by a python script from a template
@@ -54,12 +51,8 @@ coverage.xml
*,cover
.hypothesis/
test.env
makefile.test.env
*.pytest_cache/
# Unit test artifacts
index.html
# Translations
*.mo
@@ -108,6 +101,3 @@ venv/
# poetry
poetry.lock
# asdf
.tool-versions

View File

@@ -1,4 +0,0 @@
[settings]
profile=black
extend_skip_glob=.github/*,third-party-stubs/*,scripts/*
known_first_party=dbt,dbt_adapters,dbt_common,dbt_extractor,dbt_semantic_interfaces

View File

@@ -1,9 +1,10 @@
# Configuration for pre-commit hooks (see https://pre-commit.com/).
# Eventually the hooks described here will be run as tests before merging each PR.
exclude: ^(core/dbt/docs/build/|core/dbt/common/events/types_pb2.py|core/dbt/events/core_types_pb2.py|core/dbt/adapters/events/adapter_types_pb2.py)
# TODO: remove global exclusion of tests when testing overhaul is complete
exclude: ^(test/|core/dbt/docs/build/)
# Force all unspecified python hooks to run python 3.9
# Force all unspecified python hooks to run python 3.8
default_language_version:
python: python3
@@ -15,19 +16,12 @@ repos:
args: [--unsafe]
- id: check-json
- id: end-of-file-fixer
exclude: schemas/dbt/manifest/
- id: trailing-whitespace
exclude_types:
- "markdown"
- id: check-case-conflict
- repo: https://github.com/pycqa/isort
# rev must match what's in dev-requirements.txt
rev: 5.13.2
hooks:
- id: isort
- repo: https://github.com/psf/black
# rev must match what's in dev-requirements.txt
rev: 24.3.0
rev: 22.3.0
hooks:
- id: black
- id: black
@@ -37,7 +31,6 @@ repos:
- "--check"
- "--diff"
- repo: https://github.com/pycqa/flake8
# rev must match what's in dev-requirements.txt
rev: 4.0.1
hooks:
- id: flake8
@@ -45,8 +38,7 @@ repos:
alias: flake8-check
stages: [manual]
- repo: https://github.com/pre-commit/mirrors-mypy
# rev must match what's in dev-requirements.txt
rev: v1.4.1
rev: v0.942
hooks:
- id: mypy
# N.B.: Mypy is... a bit fragile.

View File

@@ -26,13 +26,12 @@ Legacy tests are found in the 'test' directory:
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
core/dbt/task/docs/index.html
core/dbt/include/index.html
This is the docs website code. It comes from the dbt-docs repository, and is generated when a release is packaged.
## Adapters
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc.
Note: dbt-postgres used to exist in dbt-core but is now in [its own repo](https://github.com/dbt-labs/dbt-postgres)
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. For testing and development purposes, the dbt-postgres plugin lives alongside the dbt-core codebase, in the [`plugins`](plugins) subdirectory. Like other adapter plugins, it is a self-contained codebase and package that builds on top of dbt-core.
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.

View File

@@ -1,6 +1,6 @@
# dbt Core Changelog
- This file provides a full account of all changes to `dbt-core`
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
@@ -10,11 +10,6 @@
For information on prior major and minor releases, see their changelogs:
* [1.9](https://github.com/dbt-labs/dbt-core/blob/1.9.latest/CHANGELOG.md)
* [1.8](https://github.com/dbt-labs/dbt-core/blob/1.8.latest/CHANGELOG.md)
* [1.7](https://github.com/dbt-labs/dbt-core/blob/1.7.latest/CHANGELOG.md)
* [1.6](https://github.com/dbt-labs/dbt-core/blob/1.6.latest/CHANGELOG.md)
* [1.5](https://github.com/dbt-labs/dbt-core/blob/1.5.latest/CHANGELOG.md)
* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md)
* [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md)
* [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md)

View File

@@ -5,12 +5,11 @@
1. [About this document](#about-this-document)
2. [Getting the code](#getting-the-code)
3. [Setting up an environment](#setting-up-an-environment)
4. [Running dbt-core in development](#running-dbt-core-in-development)
4. [Running `dbt` in development](#running-dbt-core-in-development)
5. [Testing dbt-core](#testing)
6. [Debugging](#debugging)
7. [Adding or modifying a changelog entry](#adding-or-modifying-a-changelog-entry)
7. [Adding a changelog entry](#adding-a-changelog-entry)
8. [Submitting a Pull Request](#submitting-a-pull-request)
9. [Troubleshooting Tips](#troubleshooting-tips)
## About this document
@@ -22,10 +21,10 @@ If you get stuck, we're happy to help! Drop us a line in the `#dbt-core-developm
### Notes
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead.
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead. The sole exception is Postgres; the `dbt-postgres` plugin lives in this repository (`dbt-core`).
- **CLA:** Please note that anyone contributing code to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements). If you are unable to sign the CLA, the `dbt-core` maintainers will unfortunately be unable to merge any of your Pull Requests. We welcome you to participate in discussions, open issues, and comment on existing ones.
- **Branches:** All pull requests from community contributors should target the `main` branch (default). If the change is needed as a patch for a minor version of dbt that has already been released (or is already a release candidate), a maintainer will backport the changes in your PR to the relevant "latest" release branch (`1.0.latest`, `1.1.latest`, ...). If an issue fix applies to a release branch, that fix should be first committed to the development branch and then to the release branch (rarely release-branch fixes may not apply to `main`).
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via our [supported installation methods](https://docs.getdbt.com/docs/core/installation-overview#install-dbt-core).
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via pip, homebrew, and dbt Cloud.
## Getting the code
@@ -45,7 +44,9 @@ If you are not a member of the `dbt-labs` GitHub organization, you can contribut
### dbt Labs contributors
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch. Branch names should be fixed by `CT-XXX/` where:
* CT stands for 'core team'
* XXX stands for a JIRA ticket number
## Setting up an environment
@@ -55,7 +56,7 @@ There are some tools that will be helpful to you in developing locally. While th
These are the tools used in `dbt-core` development and testing:
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.8, 3.9, 3.10 and 3.11
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, 3.10 and 3.11
- [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
- [`black`](https://github.com/psf/black) for code formatting
@@ -112,7 +113,7 @@ When installed in this way, any changes you make to your local copy of the sourc
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local Postgres instance, or a specific test sandbox within your data warehouse if appropriate. Make sure to create a profile before running integration tests.
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local Postgres instance, or a specific test sandbox within your data warehouse if appropriate.
## Testing
@@ -162,7 +163,7 @@ suites.
#### `tox`
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py38`. The configuration for these tests in located in `tox.ini`.
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`.
#### `pytest`
@@ -170,10 +171,12 @@ Finally, you can also run a specific test or group of tests using [`pytest`](htt
```sh
# run all unit tests in a file
python3 -m pytest tests/unit/test_invocation_id.py
python3 -m pytest test/unit/test_graph.py
# run a specific unit test
python3 -m pytest tests/unit/test_invocation_id.py::TestInvocationId::test_invocation_id
# run specific Postgres functional tests
python3 -m pytest test/unit/test_graph.py::GraphTest::test__dependency_list
# run specific Postgres integration tests (old way)
python3 -m pytest -m profile_postgres test/integration/074_postgres_unlogged_table_tests
# run specific Postgres integration tests (new way)
python3 -m pytest tests/functional/sources
```
@@ -182,8 +185,9 @@ python3 -m pytest tests/functional/sources
### Unit, Integration, Functional?
Here are some general rules for adding tests:
* unit tests (`tests/unit`) dont need to access a database; "pure Python" tests should be written as unit tests
* functional tests (`tests/functional`) cover anything that interacts with a database, namely adapter
* unit tests (`test/unit` & `tests/unit`) dont need to access a database; "pure Python" tests should be written as unit tests
* functional tests (`test/integration` & `tests/functional`) cover anything that interacts with a database, namely adapter
* *everything in* `test/*` *is being steadily migrated to* `tests/*`
## Debugging
@@ -220,12 +224,10 @@ You don't need to worry about which `dbt-core` version your change will go into.
## Submitting a Pull Request
Code can be merged into the current development branch `main` by opening a pull request. If the proposal looks like it's on the right track, then a `dbt-core` maintainer will triage the PR and label it as `ready_for_review`. From this point, two code reviewers will be assigned with the aim of responding to any updates to the PR within about one week. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code. Once merged, your contribution will be available for the next release of `dbt-core`.
Code can be merged into the current development branch `main` by opening a pull request. A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
Automated tests run via GitHub Actions. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-core` repository trigger integration tests against Postgres. dbt Labs also provides CI environments in which to test changes to other adapters, triggered by PRs in those adapters' repositories, as well as periodic maintenance checks of each adapter in concert with the latest `dbt-core` code changes.
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
## Troubleshooting Tips
Sometimes, the content license agreement auto-check bot doesn't find a user's entry in its roster. If you need to force a rerun, add `@cla-bot check` in a comment on the pull request.

View File

@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
software-properties-common gpg-agent \
software-properties-common \
&& add-apt-repository ppa:git-core/ppa -y \
&& apt-get dist-upgrade -y \
&& apt-get install -y --no-install-recommends \
@@ -30,9 +30,19 @@ RUN apt-get update \
unixodbc-dev \
&& add-apt-repository ppa:deadsnakes/ppa \
&& apt-get install -y \
python-is-python3 \
python-dev-is-python3 \
python \
python-dev \
python3-pip \
python3.6 \
python3.6-dev \
python3-pip \
python3.6-venv \
python3.7 \
python3.7-dev \
python3.7-venv \
python3.8 \
python3.8-dev \
python3.8-venv \
python3.9 \
python3.9-dev \
python3.9-venv \

View File

@@ -6,26 +6,18 @@ ifeq ($(USE_DOCKER),true)
DOCKER_CMD := docker-compose run --rm test
endif
#
# To override CI_flags, create a file at this repo's root dir named `makefile.test.env`. Fill it
# with any ENV_VAR overrides required by your test environment, e.g.
# DBT_TEST_USER_1=user
# LOG_DIR="dir with a space in it"
#
# Warn: Restrict each line to one variable only.
#
ifeq (./makefile.test.env,$(wildcard ./makefile.test.env))
include ./makefile.test.env
endif
LOGS_DIR := ./logs
# Optional flag to invoke tests using our CI env.
# But we always want these active for structured
# log testing.
CI_FLAGS =\
DBT_TEST_USER_1=$(if $(DBT_TEST_USER_1),$(DBT_TEST_USER_1),dbt_test_user_1)\
DBT_TEST_USER_2=$(if $(DBT_TEST_USER_2),$(DBT_TEST_USER_2),dbt_test_user_2)\
DBT_TEST_USER_3=$(if $(DBT_TEST_USER_3),$(DBT_TEST_USER_3),dbt_test_user_3)\
RUSTFLAGS=$(if $(RUSTFLAGS),$(RUSTFLAGS),"-D warnings")\
LOG_DIR=$(if $(LOG_DIR),$(LOG_DIR),./logs)\
DBT_LOG_FORMAT=$(if $(DBT_LOG_FORMAT),$(DBT_LOG_FORMAT),json)
DBT_TEST_USER_1=dbt_test_user_1\
DBT_TEST_USER_2=dbt_test_user_2\
DBT_TEST_USER_3=dbt_test_user_3\
RUSTFLAGS="-D warnings"\
LOG_DIR=./logs\
DBT_LOG_FORMAT=json
.PHONY: dev_req
dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
@@ -37,16 +29,6 @@ dev: dev_req ## Installs dbt-* packages in develop mode along with development d
@\
pre-commit install
.PHONY: dev-uninstall
dev-uninstall: ## Uninstall all packages in venv except for build tools
@\
pip freeze | grep -v "^-e" | cut -d "@" -f1 | xargs pip uninstall -y; \
pip uninstall -y dbt-core
.PHONY: core_proto_types
core_proto_types: ## generates google protobuf python file from core_types.proto
protoc -I=./core/dbt/events --python_out=./core/dbt/events ./core/dbt/events/core_types.proto
.PHONY: mypy
mypy: .env ## Runs mypy against staged changes for static type checking.
@\
@@ -82,21 +64,21 @@ test: .env ## Runs unit tests with py and code checks against staged changes.
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
.PHONY: integration
integration: .env ## Runs core integration tests using postgres with py-integration
integration: .env ## Runs postgres integration tests with py-integration
@\
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto
$(if $(USE_CI_FLAGS), $(CI_FLAGS)) $(DOCKER_CMD) tox -e py-integration -- -nauto
.PHONY: integration-fail-fast
integration-fail-fast: .env ## Runs core integration tests using postgres with py-integration in "fail fast" mode.
integration-fail-fast: .env ## Runs postgres integration tests with py-integration in "fail fast" mode.
@\
$(DOCKER_CMD) tox -e py-integration -- -x -nauto
.PHONY: interop
interop: clean
@\
mkdir $(LOG_DIR) && \
mkdir $(LOGS_DIR) && \
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto && \
LOG_DIR=$(LOG_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
LOG_DIR=$(LOGS_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
.PHONY: setup-db
setup-db: ## Setup Postgres database with docker-compose for system testing.
@@ -144,7 +126,3 @@ help: ## Show this help message.
@echo
@echo 'options:'
@echo 'use USE_DOCKER=true to run target in a docker container'
.PHONY: json_schema
json_schema: ## Update generated JSON schema using code changes.
scripts/collect-artifact-schema.py --path schemas

View File

@@ -21,7 +21,7 @@ These select statements, or "models", form a dbt project. Models frequently buil
## Getting started
- [Install dbt Core](https://docs.getdbt.com/docs/get-started/installation) or explore the [dbt Cloud CLI](https://docs.getdbt.com/docs/cloud/cloud-cli-installation), a command-line interface powered by [dbt Cloud](https://docs.getdbt.com/docs/cloud/about-cloud/dbt-cloud-features) that enhances collaboration.
- [Install dbt](https://docs.getdbt.com/docs/installation)
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
## Join the dbt Community
@@ -31,7 +31,7 @@ These select statements, or "models", form a dbt project. Models frequently buil
## Reporting bugs and contributing code
- Want to report a bug or request a feature? Let us know and open [an issue](https://github.com/dbt-labs/dbt-core/issues/new/choose)
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-core/issues/new)
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md)
## Code of Conduct

View File

@@ -1 +0,0 @@
[About dbt Core versions](https://docs.getdbt.com/docs/dbt-versions/core)

View File

@@ -1,39 +0,0 @@
ignore:
- ".github"
- ".changes"
coverage:
status:
project:
default:
target: auto
threshold: 0.1% # Reduce noise by ignoring rounding errors in coverage drops
patch:
default:
target: auto
threshold: 80%
comment:
layout: "header, diff, flags, components" # show component info in the PR comment
component_management:
default_rules: # default rules that will be inherited by all components
statuses:
- type: project # in this case every component that doens't have a status defined will have a project type one
target: auto
threshold: 0.1%
- type: patch
target: 80%
individual_components:
- component_id: unittests
name: "Unit Tests"
flag_regexes:
- "unit"
statuses:
- type: patch
target: 80%
threshold: 5%
- component_id: integrationtests
name: "Integration Tests"
flag_regexes:
- "integration"

View File

@@ -1,3 +1,2 @@
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore
include dbt/py.typed
recursive-include dbt/task/docs *.html

View File

@@ -22,6 +22,8 @@
### links.py
### logger.py
### main.py
### node_types.py

View File

@@ -0,0 +1,30 @@
# Adapters README
The Adapters module is responsible for defining database connection methods, caching information from databases, how relations are defined, and the two major connection types we have - base and sql.
# Directories
## `base`
Defines the base implementation Adapters can use to build out full functionality.
## `sql`
Defines a sql implementation for adapters that initially inherits the above base implementation and comes with some premade methods and macros that can be overwritten as needed per adapter. (most common type of adapter.)
# Files
## `cache.py`
Cached information from the database.
## `factory.py`
Defines how we generate adapter objects
## `protocol.py`
Defines various interfaces for various adapter objects. Helps mypy correctly resolve methods.
## `reference_keys.py`
Configures naming scheme for cache elements to be universal.

View File

@@ -0,0 +1,7 @@
# N.B.
# This will add to the packages __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
# The matching statement is in plugins/postgres/dbt/adapters/__init__.py
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)

View File

@@ -0,0 +1,10 @@
## Base adapters
### impl.py
The class `SQLAdapter` in [base/imply.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/adapters/base/impl.py) is a (mostly) abstract object that adapter objects inherit from. The base class scaffolds out methods that every adapter project usually should implement for smooth communication between dbt and database.
Some target databases require more or fewer methods--it all depends on what the warehouse's featureset is.
Look into the class for function-level comments.

View File

@@ -0,0 +1,14 @@
# these are all just exports, #noqa them so flake8 will be happy
# TODO: Should we still include this in the `adapters` namespace?
from dbt.contracts.connection import Credentials # noqa
from dbt.adapters.base.meta import available # noqa
from dbt.adapters.base.connections import BaseConnectionManager # noqa
from dbt.adapters.base.relation import ( # noqa
BaseRelation,
RelationType,
SchemaSearchMap,
)
from dbt.adapters.base.column import Column # noqa
from dbt.adapters.base.impl import AdapterConfig, BaseAdapter, PythonJobHelper # noqa
from dbt.adapters.base.plugin import AdapterPlugin # noqa

View File

@@ -0,0 +1,160 @@
from dataclasses import dataclass
import re
from typing import Dict, ClassVar, Any, Optional
from dbt.exceptions import DbtRuntimeError
@dataclass
class Column:
TYPE_LABELS: ClassVar[Dict[str, str]] = {
"STRING": "TEXT",
"TIMESTAMP": "TIMESTAMP",
"FLOAT": "FLOAT",
"INTEGER": "INT",
"BOOLEAN": "BOOLEAN",
}
column: str
dtype: str
char_size: Optional[int] = None
numeric_precision: Optional[Any] = None
numeric_scale: Optional[Any] = None
@classmethod
def translate_type(cls, dtype: str) -> str:
return cls.TYPE_LABELS.get(dtype.upper(), dtype)
@classmethod
def create(cls, name, label_or_dtype: str) -> "Column":
column_type = cls.translate_type(label_or_dtype)
return cls(name, column_type)
@property
def name(self) -> str:
return self.column
@property
def quoted(self) -> str:
return '"{}"'.format(self.column)
@property
def data_type(self) -> str:
if self.is_string():
return self.string_type(self.string_size())
elif self.is_numeric():
return self.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
else:
return self.dtype
def is_string(self) -> bool:
return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
def is_number(self):
return any([self.is_integer(), self.is_numeric(), self.is_float()])
def is_float(self):
return self.dtype.lower() in [
# floats
"real",
"float4",
"float",
"double precision",
"float8",
]
def is_integer(self) -> bool:
return self.dtype.lower() in [
# real types
"smallint",
"integer",
"bigint",
"smallserial",
"serial",
"bigserial",
# aliases
"int2",
"int4",
"int8",
"serial2",
"serial4",
"serial8",
]
def is_numeric(self) -> bool:
return self.dtype.lower() in ["numeric", "decimal"]
def string_size(self) -> int:
if not self.is_string():
raise DbtRuntimeError("Called string_size() on non-string field!")
if self.dtype == "text" or self.char_size is None:
# char_size should never be None. Handle it reasonably just in case
return 256
else:
return int(self.char_size)
def can_expand_to(self, other_column: "Column") -> bool:
"""returns True if this column can be expanded to the size of the
other column"""
if not self.is_string() or not other_column.is_string():
return False
return other_column.string_size() > self.string_size()
def literal(self, value: Any) -> str:
return "{}::{}".format(value, self.data_type)
@classmethod
def string_type(cls, size: int) -> str:
return "character varying({})".format(size)
@classmethod
def numeric_type(cls, dtype: str, precision: Any, scale: Any) -> str:
# This could be decimal(...), numeric(...), number(...)
# Just use whatever was fed in here -- don't try to get too clever
if precision is None or scale is None:
return dtype
else:
return "{}({},{})".format(dtype, precision, scale)
def __repr__(self) -> str:
return "<Column {} ({})>".format(self.name, self.data_type)
@classmethod
def from_description(cls, name: str, raw_data_type: str) -> "Column":
match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
if match is None:
raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"')
data_type, size_info = match.groups()
char_size = None
numeric_precision = None
numeric_scale = None
if size_info is not None:
# strip out the parentheses
size_info = size_info[1:-1]
parts = size_info.split(",")
if len(parts) == 1:
try:
char_size = int(parts[0])
except ValueError:
raise DbtRuntimeError(
f'Could not interpret data_type "{raw_data_type}": '
f'could not convert "{parts[0]}" to an integer'
)
elif len(parts) == 2:
try:
numeric_precision = int(parts[0])
except ValueError:
raise DbtRuntimeError(
f'Could not interpret data_type "{raw_data_type}": '
f'could not convert "{parts[0]}" to an integer'
)
try:
numeric_scale = int(parts[1])
except ValueError:
raise DbtRuntimeError(
f'Could not interpret data_type "{raw_data_type}": '
f'could not convert "{parts[1]}" to an integer'
)
return cls(name, data_type, char_size, numeric_precision, numeric_scale)

View File

@@ -0,0 +1,414 @@
import abc
import os
from time import sleep
import sys
import traceback
# multiprocessing.RLock is a function returning this type
from multiprocessing.synchronize import RLock
from threading import get_ident
from typing import (
Any,
Dict,
Tuple,
Hashable,
Optional,
ContextManager,
List,
Type,
Union,
Iterable,
Callable,
)
import agate
import dbt.exceptions
from dbt.contracts.connection import (
Connection,
Identifier,
ConnectionState,
AdapterRequiredConfig,
LazyHandle,
AdapterResponse,
)
from dbt.contracts.graph.manifest import Manifest
from dbt.adapters.base.query_headers import (
MacroQueryStringSetter,
)
from dbt.events import AdapterLogger
from dbt.events.functions import fire_event
from dbt.events.types import (
NewConnection,
ConnectionReused,
ConnectionLeftOpenInCleanup,
ConnectionLeftOpen,
ConnectionClosedInCleanup,
ConnectionClosed,
Rollback,
RollbackFailed,
)
from dbt.events.contextvars import get_node_info
from dbt import flags
from dbt.utils import cast_to_str
SleepTime = Union[int, float] # As taken by time.sleep.
AdapterHandle = Any # Adapter connection handle objects can be any class.
class BaseConnectionManager(metaclass=abc.ABCMeta):
"""Methods to implement:
- exception_handler
- cancel_open
- open
- begin
- commit
- clear_transaction
- execute
You must also set the 'TYPE' class attribute with a class-unique constant
string.
"""
TYPE: str = NotImplemented
def __init__(self, profile: AdapterRequiredConfig):
self.profile = profile
self.thread_connections: Dict[Hashable, Connection] = {}
self.lock: RLock = flags.MP_CONTEXT.RLock()
self.query_header: Optional[MacroQueryStringSetter] = None
def set_query_header(self, manifest: Manifest) -> None:
self.query_header = MacroQueryStringSetter(self.profile, manifest)
@staticmethod
def get_thread_identifier() -> Hashable:
# note that get_ident() may be re-used, but we should never experience
# that within a single process
return (os.getpid(), get_ident())
def get_thread_connection(self) -> Connection:
key = self.get_thread_identifier()
with self.lock:
if key not in self.thread_connections:
raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections))
return self.thread_connections[key]
def set_thread_connection(self, conn: Connection) -> None:
key = self.get_thread_identifier()
if key in self.thread_connections:
raise dbt.exceptions.DbtInternalError(
"In set_thread_connection, existing connection exists for {}"
)
self.thread_connections[key] = conn
def get_if_exists(self) -> Optional[Connection]:
key = self.get_thread_identifier()
with self.lock:
return self.thread_connections.get(key)
def clear_thread_connection(self) -> None:
key = self.get_thread_identifier()
with self.lock:
if key in self.thread_connections:
del self.thread_connections[key]
def clear_transaction(self) -> None:
"""Clear any existing transactions."""
conn = self.get_thread_connection()
if conn is not None:
if conn.transaction_open:
self._rollback(conn)
self.begin()
self.commit()
def rollback_if_open(self) -> None:
conn = self.get_if_exists()
if conn is not None and conn.handle and conn.transaction_open:
self._rollback(conn)
@abc.abstractmethod
def exception_handler(self, sql: str) -> ContextManager:
"""Create a context manager that handles exceptions caused by database
interactions.
:param str sql: The SQL string that the block inside the context
manager is executing.
:return: A context manager that handles exceptions raised by the
underlying database.
"""
raise dbt.exceptions.NotImplementedError(
"`exception_handler` is not implemented for this adapter!"
)
def set_connection_name(self, name: Optional[str] = None) -> Connection:
"""Called by 'acquire_connection' in BaseAdapter, which is called by
'connection_named', called by 'connection_for(node)'.
Creates a connection for this thread if one doesn't already
exist, and will rename an existing connection."""
conn_name: str = "master" if name is None else name
# Get a connection for this thread
conn = self.get_if_exists()
if conn and conn.name == conn_name and conn.state == "open":
# Found a connection and nothing to do, so just return it
return conn
if conn is None:
# Create a new connection
conn = Connection(
type=Identifier(self.TYPE),
name=conn_name,
state=ConnectionState.INIT,
transaction_open=False,
handle=None,
credentials=self.profile.credentials,
)
conn.handle = LazyHandle(self.open)
# Add the connection to thread_connections for this thread
self.set_thread_connection(conn)
fire_event(
NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
)
else: # existing connection either wasn't open or didn't have the right name
if conn.state != "open":
conn.handle = LazyHandle(self.open)
if conn.name != conn_name:
orig_conn_name: str = conn.name or ""
conn.name = conn_name
fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name))
return conn
@classmethod
def retry_connection(
cls,
connection: Connection,
connect: Callable[[], AdapterHandle],
logger: AdapterLogger,
retryable_exceptions: Iterable[Type[Exception]],
retry_limit: int = 1,
retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1,
_attempts: int = 0,
) -> Connection:
"""Given a Connection, set its handle by calling connect.
The calls to connect will be retried up to retry_limit times to deal with transient
connection errors. By default, one retry will be attempted if retryable_exceptions is set.
:param Connection connection: An instance of a Connection that needs a handle to be set,
usually when attempting to open it.
:param connect: A callable that returns the appropiate connection handle for a
given adapter. This callable will be retried retry_limit times if a subclass of any
Exception in retryable_exceptions is raised by connect.
:type connect: Callable[[], AdapterHandle]
:param AdapterLogger logger: A logger to emit messages on retry attempts or errors. When
handling expected errors, we call debug, and call warning on unexpected errors or when
all retry attempts have been exhausted.
:param retryable_exceptions: An iterable of exception classes that if raised by
connect should trigger a retry.
:type retryable_exceptions: Iterable[Type[Exception]]
:param int retry_limit: How many times to retry the call to connect. If this limit
is exceeded before a successful call, a FailedToConnectError will be raised.
Must be non-negative.
:param retry_timeout: Time to wait between attempts to connect. Can also take a
Callable that takes the number of attempts so far, beginning at 0, and returns an int
or float to be passed to time.sleep.
:type retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1
:param int _attempts: Parameter used to keep track of the number of attempts in calling the
connect function across recursive calls. Passed as an argument to retry_timeout if it
is a Callable. This parameter should not be set by the initial caller.
:raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without
successfully acquiring a handle.
:return: The given connection with its appropriate state and handle attributes set
depending on whether we successfully acquired a handle or not.
"""
timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout
if timeout < 0:
raise dbt.exceptions.FailedToConnectError(
"retry_timeout cannot be negative or return a negative time."
)
if retry_limit < 0 or retry_limit > sys.getrecursionlimit():
# This guard is not perfect others may add to the recursion limit (e.g. built-ins).
connection.handle = None
connection.state = ConnectionState.FAIL
raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative")
try:
connection.handle = connect()
connection.state = ConnectionState.OPEN
return connection
except tuple(retryable_exceptions) as e:
if retry_limit <= 0:
connection.handle = None
connection.state = ConnectionState.FAIL
raise dbt.exceptions.FailedToConnectError(str(e))
logger.debug(
f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n"
f"{retry_limit} attempts remaining. Retrying in {timeout} seconds.\n"
f"Error:\n{e}"
)
sleep(timeout)
return cls.retry_connection(
connection=connection,
connect=connect,
logger=logger,
retry_limit=retry_limit - 1,
retry_timeout=retry_timeout,
retryable_exceptions=retryable_exceptions,
_attempts=_attempts + 1,
)
except Exception as e:
connection.handle = None
connection.state = ConnectionState.FAIL
raise dbt.exceptions.FailedToConnectError(str(e))
@abc.abstractmethod
def cancel_open(self) -> Optional[List[str]]:
"""Cancel all open connections on the adapter. (passable)"""
raise dbt.exceptions.NotImplementedError(
"`cancel_open` is not implemented for this adapter!"
)
@classmethod
@abc.abstractmethod
def open(cls, connection: Connection) -> Connection:
"""Open the given connection on the adapter and return it.
This may mutate the given connection (in particular, its state and its
handle).
This should be thread-safe, or hold the lock if necessary. The given
connection should not be in either in_use or available.
"""
raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!")
def release(self) -> None:
with self.lock:
conn = self.get_if_exists()
if conn is None:
return
try:
# always close the connection. close() calls _rollback() if there
# is an open transaction
self.close(conn)
except Exception:
# if rollback or close failed, remove our busted connection
self.clear_thread_connection()
raise
def cleanup_all(self) -> None:
with self.lock:
for connection in self.thread_connections.values():
if connection.state not in {"closed", "init"}:
fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name)))
else:
fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name)))
self.close(connection)
# garbage collect these connections
self.thread_connections.clear()
@abc.abstractmethod
def begin(self) -> None:
"""Begin a transaction. (passable)"""
raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!")
@abc.abstractmethod
def commit(self) -> None:
"""Commit a transaction. (passable)"""
raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!")
@classmethod
def _rollback_handle(cls, connection: Connection) -> None:
"""Perform the actual rollback operation."""
try:
connection.handle.rollback()
except Exception:
fire_event(
RollbackFailed(
conn_name=cast_to_str(connection.name),
exc_info=traceback.format_exc(),
node_info=get_node_info(),
)
)
@classmethod
def _close_handle(cls, connection: Connection) -> None:
"""Perform the actual close operation."""
# On windows, sometimes connection handles don't have a close() attr.
if hasattr(connection.handle, "close"):
fire_event(
ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info())
)
connection.handle.close()
else:
fire_event(
ConnectionLeftOpen(
conn_name=cast_to_str(connection.name), node_info=get_node_info()
)
)
@classmethod
def _rollback(cls, connection: Connection) -> None:
"""Roll back the given connection."""
if connection.transaction_open is False:
raise dbt.exceptions.DbtInternalError(
f"Tried to rollback transaction on connection "
f'"{connection.name}", but it does not have one open!'
)
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
cls._rollback_handle(connection)
connection.transaction_open = False
@classmethod
def close(cls, connection: Connection) -> Connection:
# if the connection is in closed or init, there's nothing to do
if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}:
return connection
if connection.transaction_open and connection.handle:
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
cls._rollback_handle(connection)
connection.transaction_open = False
cls._close_handle(connection)
connection.state = ConnectionState.CLOSED
return connection
def commit_if_has_connection(self) -> None:
"""If the named connection exists, commit the current transaction."""
connection = self.get_if_exists()
if connection:
self.commit()
def _add_query_comment(self, sql: str) -> str:
if self.query_header is None:
return sql
return self.query_header.add(sql)
@abc.abstractmethod
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[AdapterResponse, agate.Table]:
"""Execute the given SQL.
:param str sql: The sql to execute.
:param bool auto_begin: If set, and dbt is not currently inside a
transaction, automatically begin one.
:param bool fetch: If set, fetch results.
:return: A tuple of the query status and results (empty if fetch=False).
:rtype: Tuple[AdapterResponse, agate.Table]
"""
raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,128 @@
import abc
from functools import wraps
from typing import Callable, Optional, Any, FrozenSet, Dict, Set
from dbt.deprecations import warn, renamed_method
Decorator = Callable[[Any], Callable]
class _Available:
def __call__(self, func: Callable) -> Callable:
func._is_available_ = True # type: ignore
return func
def parse(self, parse_replacement: Callable) -> Decorator:
"""A decorator factory to indicate that a method on the adapter will be
exposed to the database wrapper, and will be stubbed out at parse time
with the given function.
@available.parse()
def my_method(self, a, b):
if something:
return None
return big_expensive_db_query()
@available.parse(lambda *args, **args: {})
def my_other_method(self, a, b):
x = {}
x.update(big_expensive_db_query())
return x
"""
def inner(func):
func._parse_replacement_ = parse_replacement
return self(func)
return inner
def deprecated(
self, supported_name: str, parse_replacement: Optional[Callable] = None
) -> Decorator:
"""A decorator that marks a function as available, but also prints a
deprecation warning. Use like
@available.deprecated('my_new_method')
def my_old_method(self, arg):
args = compatability_shim(arg)
return self.my_new_method(*args)
@available.deprecated('my_new_slow_method', lambda *a, **k: (0, ''))
def my_old_slow_method(self, arg):
args = compatibility_shim(arg)
return self.my_new_slow_method(*args)
To make `adapter.my_old_method` available but also print out a warning
on use directing users to `my_new_method`.
The optional parse_replacement, if provided, will provide a parse-time
replacement for the actual method (see `available.parse`).
"""
def wrapper(func):
func_name = func.__name__
renamed_method(func_name, supported_name)
@wraps(func)
def inner(*args, **kwargs):
warn("adapter:{}".format(func_name))
return func(*args, **kwargs)
if parse_replacement:
available_function = self.parse(parse_replacement)
else:
available_function = self
return available_function(inner)
return wrapper
def parse_none(self, func: Callable) -> Callable:
wrapper = self.parse(lambda *a, **k: None)
return wrapper(func)
def parse_list(self, func: Callable) -> Callable:
wrapper = self.parse(lambda *a, **k: [])
return wrapper(func)
available = _Available()
class AdapterMeta(abc.ABCMeta):
_available_: FrozenSet[str]
_parse_replacements_: Dict[str, Callable]
def __new__(mcls, name, bases, namespace, **kwargs):
# mypy does not like the `**kwargs`. But `ABCMeta` itself takes
# `**kwargs` in its argspec here (and passes them to `type.__new__`.
# I'm not sure there is any benefit to it after poking around a bit,
# but having it doesn't hurt on the python side (and omitting it could
# hurt for obscure metaclass reasons, for all I know)
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) # type: ignore
# this is very much inspired by ABCMeta's own implementation
# dict mapping the method name to whether the model name should be
# injected into the arguments. All methods in here are exposed to the
# context.
available: Set[str] = set()
replacements: Dict[str, Any] = {}
# collect base class data first
for base in bases:
available.update(getattr(base, "_available_", set()))
replacements.update(getattr(base, "_parse_replacements_", set()))
# override with local data if it exists
for name, value in namespace.items():
if getattr(value, "_is_available_", False):
available.add(name)
parse_replacement = getattr(value, "_parse_replacement_", None)
if parse_replacement is not None:
replacements[name] = parse_replacement
cls._available_ = frozenset(available)
# should this be a namedtuple so it will be immutable like _available_?
cls._parse_replacements_ = replacements
return cls

View File

@@ -0,0 +1,42 @@
from typing import List, Optional, Type
from dbt.adapters.base import Credentials
from dbt.exceptions import CompilationError
from dbt.adapters.protocol import AdapterProtocol
def project_name_from_path(include_path: str) -> str:
# avoid an import cycle
from dbt.config.project import Project
partial = Project.partial_load(include_path)
if partial.project_name is None:
raise CompilationError(f"Invalid project at {include_path}: name not set!")
return partial.project_name
class AdapterPlugin:
"""Defines the basic requirements for a dbt adapter plugin.
:param include_path: The path to this adapter plugin's root
:param dependencies: A list of adapter names that this adapter depends
upon.
"""
def __init__(
self,
adapter: Type[AdapterProtocol],
credentials: Type[Credentials],
include_path: str,
dependencies: Optional[List[str]] = None,
):
self.adapter: Type[AdapterProtocol] = adapter
self.credentials: Type[Credentials] = credentials
self.include_path: str = include_path
self.project_name: str = project_name_from_path(include_path)
self.dependencies: List[str]
if dependencies is None:
self.dependencies = []
else:
self.dependencies = dependencies

View File

@@ -0,0 +1,102 @@
from threading import local
from typing import Optional, Callable, Dict, Any
from dbt.clients.jinja import QueryStringGenerator
from dbt.context.manifest import generate_query_header_context
from dbt.contracts.connection import AdapterRequiredConfig, QueryComment
from dbt.contracts.graph.nodes import ResultNode
from dbt.contracts.graph.manifest import Manifest
from dbt.exceptions import DbtRuntimeError
class NodeWrapper:
def __init__(self, node):
self._inner_node = node
def __getattr__(self, name):
return getattr(self._inner_node, name, "")
class _QueryComment(local):
"""A thread-local class storing thread-specific state information for
connection management, namely:
- the current thread's query comment.
- a source_name indicating what set the current thread's query comment
"""
def __init__(self, initial):
self.query_comment: Optional[str] = initial
self.append = False
def add(self, sql: str) -> str:
if not self.query_comment:
return sql
if self.append:
# replace last ';' with '<comment>;'
sql = sql.rstrip()
if sql[-1] == ";":
sql = sql[:-1]
return "{}\n/* {} */;".format(sql, self.query_comment.strip())
return "{}\n/* {} */".format(sql, self.query_comment.strip())
return "/* {} */\n{}".format(self.query_comment.strip(), sql)
def set(self, comment: Optional[str], append: bool):
if isinstance(comment, str) and "*/" in comment:
# tell the user "no" so they don't hurt themselves by writing
# garbage
raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}')
self.query_comment = comment
self.append = append
QueryStringFunc = Callable[[str, Optional[NodeWrapper]], str]
class MacroQueryStringSetter:
def __init__(self, config: AdapterRequiredConfig, manifest: Manifest):
self.manifest = manifest
self.config = config
comment_macro = self._get_comment_macro()
self.generator: QueryStringFunc = lambda name, model: ""
# if the comment value was None or the empty string, just skip it
if comment_macro:
assert isinstance(comment_macro, str)
macro = "\n".join(
(
"{%- macro query_comment_macro(connection_name, node) -%}",
comment_macro,
"{% endmacro %}",
)
)
ctx = self._get_context()
self.generator = QueryStringGenerator(macro, ctx)
self.comment = _QueryComment(None)
self.reset()
def _get_comment_macro(self) -> Optional[str]:
return self.config.query_comment.comment
def _get_context(self) -> Dict[str, Any]:
return generate_query_header_context(self.config, self.manifest)
def add(self, sql: str) -> str:
return self.comment.add(sql)
def reset(self):
self.set("master", None)
def set(self, name: str, node: Optional[ResultNode]):
wrapped: Optional[NodeWrapper] = None
if node is not None:
wrapped = NodeWrapper(node)
comment_str = self.generator(name, wrapped)
append = False
if isinstance(self.config.query_comment, QueryComment):
append = self.config.query_comment.append
self.comment.set(comment_str, append)

View File

@@ -0,0 +1,457 @@
from collections.abc import Hashable
from dataclasses import dataclass, field
from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set
from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode
from dbt.contracts.relation import (
RelationType,
ComponentName,
HasQuoting,
FakeAPIObject,
Policy,
Path,
)
from dbt.exceptions import (
ApproximateMatchError,
DbtInternalError,
MultipleDatabasesNotAllowedError,
)
from dbt.node_types import NodeType
from dbt.utils import filter_null_values, deep_merge, classproperty
import dbt.exceptions
Self = TypeVar("Self", bound="BaseRelation")
@dataclass(frozen=True, eq=False, repr=False)
class BaseRelation(FakeAPIObject, Hashable):
path: Path
type: Optional[RelationType] = None
quote_character: str = '"'
# Python 3.11 requires that these use default_factory instead of simple default
# ValueError: mutable default <class 'dbt.contracts.relation.Policy'> for field include_policy is not allowed: use default_factory
include_policy: Policy = field(default_factory=lambda: Policy())
quote_policy: Policy = field(default_factory=lambda: Policy())
dbt_created: bool = False
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
if self.dbt_created and self.quote_policy.get_part(field) is False:
return self.path.get_lowered_part(field) == value.lower()
else:
return self.path.get_part(field) == value
@classmethod
def _get_field_named(cls, field_name):
for f, _ in cls._get_fields():
if f.name == field_name:
return f
# this should be unreachable
raise ValueError(f"BaseRelation has no {field_name} field!")
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_dict(omit_none=True) == other.to_dict(omit_none=True)
@classmethod
def get_default_quote_policy(cls) -> Policy:
return cls._get_field_named("quote_policy").default_factory()
@classmethod
def get_default_include_policy(cls) -> Policy:
return cls._get_field_named("include_policy").default_factory()
def get(self, key, default=None):
"""Override `.get` to return a metadata object so we don't break
dbt_utils.
"""
if key == "metadata":
return {"type": self.__class__.__name__}
return super().get(key, default)
def matches(
self,
database: Optional[str] = None,
schema: Optional[str] = None,
identifier: Optional[str] = None,
) -> bool:
search = filter_null_values(
{
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier,
}
)
if not search:
# nothing was passed in
raise dbt.exceptions.DbtRuntimeError(
"Tried to match relation, but no search path was passed!"
)
exact_match = True
approximate_match = True
for k, v in search.items():
if not self._is_exactish_match(k, v):
exact_match = False
if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
self.quote_character
):
approximate_match = False # type: ignore[union-attr]
if approximate_match and not exact_match:
target = self.create(database=database, schema=schema, identifier=identifier)
raise ApproximateMatchError(target, self)
return exact_match
def replace_path(self, **kwargs):
return self.replace(path=self.path.replace(**kwargs))
def quote(
self: Self,
database: Optional[bool] = None,
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values(
{
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier,
}
)
new_quote_policy = self.quote_policy.replace_dict(policy)
return self.replace(quote_policy=new_quote_policy)
def include(
self: Self,
database: Optional[bool] = None,
schema: Optional[bool] = None,
identifier: Optional[bool] = None,
) -> Self:
policy = filter_null_values(
{
ComponentName.Database: database,
ComponentName.Schema: schema,
ComponentName.Identifier: identifier,
}
)
new_include_policy = self.include_policy.replace_dict(policy)
return self.replace(include_policy=new_include_policy)
def information_schema(self, view_name=None) -> "InformationSchema":
# some of our data comes from jinja, where things can be `Undefined`.
if not isinstance(view_name, str):
view_name = None
# Kick the user-supplied schema out of the information schema relation
# Instead address this as <database>.information_schema by default
info_schema = InformationSchema.from_relation(self, view_name)
return info_schema.incorporate(path={"schema": None})
def information_schema_only(self) -> "InformationSchema":
return self.information_schema()
def without_identifier(self) -> "BaseRelation":
"""Return a form of this relation that only has the database and schema
set to included. To get the appropriately-quoted form the schema out of
the result (for use as part of a query), use `.render()`. To get the
raw database or schema name, use `.database` or `.schema`.
The hash of the returned object is the result of render().
"""
return self.include(identifier=False).replace_path(identifier=None)
def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
for key in ComponentName:
path_part: Optional[str] = None
if self.include_policy.get_part(key):
path_part = self.path.get_part(key)
if path_part is not None and self.quote_policy.get_part(key):
path_part = self.quoted(path_part)
yield key, path_part
def render(self) -> str:
# if there is nothing set, this will return the empty string.
return ".".join(part for _, part in self._render_iterator() if part is not None)
def quoted(self, identifier):
return "{quote_char}{identifier}{quote_char}".format(
quote_char=self.quote_character,
identifier=identifier,
)
@classmethod
def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self:
source_quoting = source.quoting.to_dict(omit_none=True)
source_quoting.pop("column", None)
quote_policy = deep_merge(
cls.get_default_quote_policy().to_dict(omit_none=True),
source_quoting,
kwargs.get("quote_policy", {}),
)
return cls.create(
database=source.database,
schema=source.schema,
identifier=source.identifier,
quote_policy=quote_policy,
**kwargs,
)
@staticmethod
def add_ephemeral_prefix(name: str):
return f"__dbt__cte__{name}"
@classmethod
def create_ephemeral_from_node(
cls: Type[Self],
config: HasQuoting,
node: ManifestNode,
) -> Self:
# Note that ephemeral models are based on the name.
identifier = cls.add_ephemeral_prefix(node.name)
return cls.create(
type=cls.CTE,
identifier=identifier,
).quote(identifier=False)
@classmethod
def create_from_node(
cls: Type[Self],
config: HasQuoting,
node: ManifestNode,
quote_policy: Optional[Dict[str, bool]] = None,
**kwargs: Any,
) -> Self:
if quote_policy is None:
quote_policy = {}
quote_policy = dbt.utils.merge(config.quoting, quote_policy)
return cls.create(
database=node.database,
schema=node.schema,
identifier=node.alias,
quote_policy=quote_policy,
**kwargs,
)
@classmethod
def create_from(
cls: Type[Self],
config: HasQuoting,
node: ResultNode,
**kwargs: Any,
) -> Self:
if node.resource_type == NodeType.Source:
if not isinstance(node, SourceDefinition):
raise DbtInternalError(
"type mismatch, expected SourceDefinition but got {}".format(type(node))
)
return cls.create_from_source(node, **kwargs)
else:
# Can't use ManifestNode here because of parameterized generics
if not isinstance(node, (ParsedNode)):
raise DbtInternalError(
f"type mismatch, expected ManifestNode but got {type(node)}"
)
return cls.create_from_node(config, node, **kwargs)
@classmethod
def create(
cls: Type[Self],
database: Optional[str] = None,
schema: Optional[str] = None,
identifier: Optional[str] = None,
type: Optional[RelationType] = None,
**kwargs,
) -> Self:
kwargs.update(
{
"path": {
"database": database,
"schema": schema,
"identifier": identifier,
},
"type": type,
}
)
return cls.from_dict(kwargs)
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, self.render())
def __hash__(self) -> int:
return hash(self.render())
def __str__(self) -> str:
return self.render()
@property
def database(self) -> Optional[str]:
return self.path.database
@property
def schema(self) -> Optional[str]:
return self.path.schema
@property
def identifier(self) -> Optional[str]:
return self.path.identifier
@property
def table(self) -> Optional[str]:
return self.path.identifier
# Here for compatibility with old Relation interface
@property
def name(self) -> Optional[str]:
return self.identifier
@property
def is_table(self) -> bool:
return self.type == RelationType.Table
@property
def is_cte(self) -> bool:
return self.type == RelationType.CTE
@property
def is_view(self) -> bool:
return self.type == RelationType.View
@classproperty
def Table(cls) -> str:
return str(RelationType.Table)
@classproperty
def CTE(cls) -> str:
return str(RelationType.CTE)
@classproperty
def View(cls) -> str:
return str(RelationType.View)
@classproperty
def External(cls) -> str:
return str(RelationType.External)
@classproperty
def get_relation_type(cls) -> Type[RelationType]:
return RelationType
Info = TypeVar("Info", bound="InformationSchema")
@dataclass(frozen=True, eq=False, repr=False)
class InformationSchema(BaseRelation):
information_schema_view: Optional[str] = None
def __post_init__(self):
if not isinstance(self.information_schema_view, (type(None), str)):
raise dbt.exceptions.CompilationError(
"Got an invalid name: {}".format(self.information_schema_view)
)
@classmethod
def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
return Path(
database=relation.database,
schema=relation.schema,
identifier="INFORMATION_SCHEMA",
)
@classmethod
def get_include_policy(
cls,
relation,
information_schema_view: Optional[str],
) -> Policy:
return relation.include_policy.replace(
database=relation.database is not None,
schema=False,
identifier=True,
)
@classmethod
def get_quote_policy(
cls,
relation,
information_schema_view: Optional[str],
) -> Policy:
return relation.quote_policy.replace(
identifier=False,
)
@classmethod
def from_relation(
cls: Type[Info],
relation: BaseRelation,
information_schema_view: Optional[str],
) -> Info:
include_policy = cls.get_include_policy(relation, information_schema_view)
quote_policy = cls.get_quote_policy(relation, information_schema_view)
path = cls.get_path(relation, information_schema_view)
return cls(
type=RelationType.View,
path=path,
include_policy=include_policy,
quote_policy=quote_policy,
information_schema_view=information_schema_view,
)
def _render_iterator(self):
for k, v in super()._render_iterator():
yield k, v
yield None, self.information_schema_view
class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
"""A utility class to keep track of what information_schema tables to
search for what schemas. The schema values are all lowercased to avoid
duplication.
"""
def add(self, relation: BaseRelation):
key = relation.information_schema_only()
if key not in self:
self[key] = set()
schema: Optional[str] = None
if relation.schema is not None:
schema = relation.schema.lower()
self[key].add(schema)
def search(self) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
for information_schema_name, schemas in self.items():
for schema in schemas:
yield information_schema_name, schema
def flatten(self, allow_multiple_databases: bool = False):
new = self.__class__()
# make sure we don't have multiple databases if allow_multiple_databases is set to False
if not allow_multiple_databases:
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
raise MultipleDatabasesNotAllowedError(seen)
for information_schema_name, schema in self.search():
path = {"database": information_schema_name.database, "schema": schema}
new.add(
information_schema_name.incorporate(
path=path,
quote_policy={"database": False},
include_policy={"database": False},
)
)
return new

522
core/dbt/adapters/cache.py Normal file
View File

@@ -0,0 +1,522 @@
import threading
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from dbt.adapters.reference_keys import (
_make_ref_key,
_make_ref_key_msg,
_make_msg_from_ref_key,
_ReferenceKey,
)
from dbt.exceptions import (
DependentLinkNotCachedError,
NewNameAlreadyInCacheError,
NoneRelationFoundError,
ReferencedLinkNotCachedError,
TruncatedModelNameCausedCollisionError,
)
from dbt.events.functions import fire_event, fire_event_if
from dbt.events.types import CacheAction, CacheDumpGraph
import dbt.flags as flags
from dbt.utils import lowercase
def dot_separated(key: _ReferenceKey) -> str:
"""Return the key in dot-separated string form.
:param _ReferenceKey key: The key to stringify.
"""
return ".".join(map(str, key))
class _CachedRelation:
"""Nothing about _CachedRelation is guaranteed to be thread-safe!
:attr str schema: The schema of this relation.
:attr str identifier: The identifier of this relation.
:attr Dict[_ReferenceKey, _CachedRelation] referenced_by: The relations
that refer to this relation.
:attr BaseRelation inner: The underlying dbt relation.
"""
def __init__(self, inner):
self.referenced_by = {}
self.inner = inner
def __str__(self) -> str:
return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
self.database, self.schema, self.identifier, self.inner
)
@property
def database(self) -> Optional[str]:
return lowercase(self.inner.database)
@property
def schema(self) -> Optional[str]:
return lowercase(self.inner.schema)
@property
def identifier(self) -> Optional[str]:
return lowercase(self.inner.identifier)
def __copy__(self):
new = self.__class__(self.inner)
new.__dict__.update(self.__dict__)
return new
def __deepcopy__(self, memo):
new = self.__class__(self.inner.incorporate())
new.__dict__.update(self.__dict__)
new.referenced_by = deepcopy(self.referenced_by, memo)
def is_referenced_by(self, key):
return key in self.referenced_by
def key(self):
"""Get the _ReferenceKey that represents this relation
:return _ReferenceKey: A key for this relation.
"""
return _make_ref_key(self)
def add_reference(self, referrer: "_CachedRelation"):
"""Add a reference from referrer to self, indicating that if this node
were drop...cascaded, the referrer would be dropped as well.
:param _CachedRelation referrer: The node that refers to this node.
"""
self.referenced_by[referrer.key()] = referrer
def collect_consequences(self):
"""Recursively collect a set of _ReferenceKeys that would
consequentially get dropped if this were dropped via
"drop ... cascade".
:return Set[_ReferenceKey]: All the relations that would be dropped
"""
consequences = {self.key()}
for relation in self.referenced_by.values():
consequences.update(relation.collect_consequences())
return consequences
def release_references(self, keys):
"""Non-recursively indicate that an iterable of _ReferenceKey no longer
exist. Unknown keys are ignored.
:param Iterable[_ReferenceKey] keys: The keys to drop.
"""
keys = set(self.referenced_by) & set(keys)
for key in keys:
self.referenced_by.pop(key)
def rename(self, new_relation):
"""Rename this cached relation to new_relation.
Note that this will change the output of key(), all refs must be
updated!
:param _CachedRelation new_relation: The new name to apply to the
relation
"""
# Relations store this stuff inside their `path` dict. But they
# also store a table_name, and usually use it in their .render(),
# so we need to update that as well. It doesn't appear that
# table_name is ever anything but the identifier (via .create())
self.inner = self.inner.incorporate(
path={
"database": new_relation.inner.database,
"schema": new_relation.inner.schema,
"identifier": new_relation.inner.identifier,
},
)
def rename_key(self, old_key, new_key):
"""Rename a reference that may or may not exist. Only handles the
reference itself, so this is the other half of what `rename` does.
If old_key is not in referenced_by, this is a no-op.
:param _ReferenceKey old_key: The old key to be renamed.
:param _ReferenceKey new_key: The new key to rename to.
:raises InternalError: If the new key already exists.
"""
if new_key in self.referenced_by:
raise NewNameAlreadyInCacheError(old_key, new_key)
if old_key not in self.referenced_by:
return
value = self.referenced_by.pop(old_key)
self.referenced_by[new_key] = value
def dump_graph_entry(self):
"""Return a key/value pair representing this key and its referents.
return List[str]: The dot-separated form of all referent keys.
"""
return [dot_separated(r) for r in self.referenced_by]
class RelationsCache:
"""A cache of the relations known to dbt. Keeps track of relationships
declared between tables and handles renames/drops as a real database would.
:attr Dict[_ReferenceKey, _CachedRelation] relations: The known relations.
:attr threading.RLock lock: The lock around relations, held during updates.
The adapters also hold this lock while filling the cache.
:attr Set[str] schemas: The set of known/cached schemas, all lowercased.
"""
def __init__(self) -> None:
self.relations: Dict[_ReferenceKey, _CachedRelation] = {}
self.lock = threading.RLock()
self.schemas: Set[Tuple[Optional[str], Optional[str]]] = set()
def add_schema(
self,
database: Optional[str],
schema: Optional[str],
) -> None:
"""Add a schema to the set of known schemas (case-insensitive)
:param database: The database name to add.
:param schema: The schema name to add.
"""
self.schemas.add((lowercase(database), lowercase(schema)))
def drop_schema(
self,
database: Optional[str],
schema: Optional[str],
) -> None:
"""Drop the given schema and remove it from the set of known schemas.
Then remove all its contents (and their dependents, etc) as well.
"""
key = (lowercase(database), lowercase(schema))
if key not in self.schemas:
return
# avoid iterating over self.relations while removing things by
# collecting the list first.
with self.lock:
to_remove = self._list_relations_in_schema(database, schema)
self._remove_all(to_remove)
# handle a drop_schema race by using discard() over remove()
self.schemas.discard(key)
def update_schemas(self, schemas: Iterable[Tuple[Optional[str], str]]):
"""Add multiple schemas to the set of known schemas (case-insensitive)
:param schemas: An iterable of the schema names to add.
"""
self.schemas.update((lowercase(d), s.lower()) for (d, s) in schemas)
def __contains__(self, schema_id: Tuple[Optional[str], str]):
"""A schema is 'in' the relations cache if it is in the set of cached
schemas.
:param schema_id: The db name and schema name to look up.
"""
db, schema = schema_id
return (lowercase(db), schema.lower()) in self.schemas
def dump_graph(self):
"""Dump a key-only representation of the schema to a dictionary. Every
known relation is a key with a value of a list of keys it is referenced
by.
"""
# we have to hold the lock for the entire dump, if other threads modify
# self.relations or any cache entry's referenced_by during iteration
# it's a runtime error!
with self.lock:
return {dot_separated(k): v.dump_graph_entry() for k, v in self.relations.items()}
def _setdefault(self, relation: _CachedRelation):
"""Add a relation to the cache, or return it if it already exists.
:param _CachedRelation relation: The relation to set or get.
:return _CachedRelation: The relation stored under the given relation's
key
"""
self.add_schema(relation.database, relation.schema)
key = relation.key()
return self.relations.setdefault(key, relation)
def _add_link(self, referenced_key, dependent_key):
"""Add a link between two relations to the database. Both the old and
new entries must alraedy exist in the database.
:param _ReferenceKey referenced_key: The key identifying the referenced
model (the one that if dropped will drop the dependent model).
:param _ReferenceKey dependent_key: The key identifying the dependent
model.
:raises InternalError: If either entry does not exist.
"""
referenced = self.relations.get(referenced_key)
if referenced is None:
return
if referenced is None:
raise ReferencedLinkNotCachedError(referenced_key)
dependent = self.relations.get(dependent_key)
if dependent is None:
raise DependentLinkNotCachedError(dependent_key)
assert dependent is not None # we just raised!
referenced.add_reference(dependent)
# This is called in plugins/postgres/dbt/adapters/postgres/impl.py
def add_link(self, referenced, dependent):
"""Add a link between two relations to the database. If either relation
does not exist, it will be added as an "external" relation.
The dependent model refers _to_ the referenced model. So, given
arguments of (jake_test, bar, jake_test, foo):
both values are in the schema jake_test and foo is a view that refers
to bar, so "drop bar cascade" will drop foo and all of foo's
dependents.
:param BaseRelation referenced: The referenced model.
:param BaseRelation dependent: The dependent model.
:raises InternalError: If either entry does not exist.
"""
ref_key = _make_ref_key(referenced)
dep_key = _make_ref_key(dependent)
if (ref_key.database, ref_key.schema) not in self:
# if we have not cached the referenced schema at all, we must be
# referring to a table outside our control. There's no need to make
# a link - we will never drop the referenced relation during a run.
fire_event(
CacheAction(
ref_key=_make_msg_from_ref_key(ref_key),
ref_key_2=_make_msg_from_ref_key(dep_key),
)
)
return
if ref_key not in self.relations:
# Insert a dummy "external" relation.
referenced = referenced.replace(type=referenced.External)
self.add(referenced)
if dep_key not in self.relations:
# Insert a dummy "external" relation.
dependent = dependent.replace(type=referenced.External)
self.add(dependent)
fire_event(
CacheAction(
action="add_link",
ref_key=_make_msg_from_ref_key(dep_key),
ref_key_2=_make_msg_from_ref_key(ref_key),
)
)
with self.lock:
self._add_link(ref_key, dep_key)
def add(self, relation):
"""Add the relation inner to the cache, under the schema schema and
identifier identifier
:param BaseRelation relation: The underlying relation.
"""
cached = _CachedRelation(relation)
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()),
)
fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_msg(cached)))
with self.lock:
self._setdefault(cached)
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()),
)
def _remove_refs(self, keys):
"""Removes all references to all entries in keys. This does not
cascade!
:param Iterable[_ReferenceKey] keys: The keys to remove.
"""
# remove direct refs
for key in keys:
del self.relations[key]
# then remove all entries from each child
for cached in self.relations.values():
cached.release_references(keys)
def drop(self, relation):
"""Drop the named relation and cascade it appropriately to all
dependent relations.
Because dbt proactively does many `drop relation if exist ... cascade`
that are noops, nonexistent relation drops cause a debug log and no
other actions.
:param str schema: The schema of the relation to drop.
:param str identifier: The identifier of the relation to drop.
"""
dropped_key = _make_ref_key(relation)
dropped_key_msg = _make_ref_key_msg(relation)
fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg))
with self.lock:
if dropped_key not in self.relations:
fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg))
return
consequences = self.relations[dropped_key].collect_consequences()
# convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs
consequence_msgs = [_make_msg_from_ref_key(key) for key in consequences]
fire_event(
CacheAction(
action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs
)
)
self._remove_refs(consequences)
def _rename_relation(self, old_key, new_relation):
"""Rename a relation named old_key to new_key, updating references.
Return whether or not there was a key to rename.
:param _ReferenceKey old_key: The existing key, to rename from.
:param _CachedRelation new_key: The new relation, to rename to.
"""
# On the database level, a rename updates all values that were
# previously referenced by old_name to be referenced by new_name.
# basically, the name changes but some underlying ID moves. Kind of
# like an object reference!
relation = self.relations.pop(old_key)
new_key = new_relation.key()
# relation has to rename its innards, so it needs the _CachedRelation.
relation.rename(new_relation)
# update all the relations that refer to it
for cached in self.relations.values():
if cached.is_referenced_by(old_key):
fire_event(
CacheAction(
action="update_reference",
ref_key=_make_ref_key_msg(old_key),
ref_key_2=_make_ref_key_msg(new_key),
ref_key_3=_make_ref_key_msg(cached.key()),
)
)
cached.rename_key(old_key, new_key)
self.relations[new_key] = relation
# also fixup the schemas!
self.add_schema(new_key.database, new_key.schema)
return True
def _check_rename_constraints(self, old_key, new_key):
"""Check the rename constraints, and return whether or not the rename
can proceed.
If the new key is already present, that is an error.
If the old key is absent, we debug log and return False, assuming it's
a temp table being renamed.
:param _ReferenceKey old_key: The existing key, to rename from.
:param _ReferenceKey new_key: The new key, to rename to.
:return bool: If the old relation exists for renaming.
:raises InternalError: If the new key is already present.
"""
if new_key in self.relations:
# Tell user when collision caused by model names truncated during
# materialization.
raise TruncatedModelNameCausedCollisionError(new_key, self.relations)
if old_key not in self.relations:
fire_event(
CacheAction(action="temporary_relation", ref_key=_make_msg_from_ref_key(old_key))
)
return False
return True
def rename(self, old, new):
"""Rename the old schema/identifier to the new schema/identifier and
update references.
If the new schema/identifier is already present, that is an error.
If the schema/identifier key is absent, we only debug log and return,
assuming it's a temp table being renamed.
:param BaseRelation old: The existing relation name information.
:param BaseRelation new: The new relation name information.
:raises InternalError: If the new key is already present.
"""
old_key = _make_ref_key(old)
new_key = _make_ref_key(new)
fire_event(
CacheAction(
action="rename_relation",
ref_key=_make_msg_from_ref_key(old_key),
ref_key_2=_make_msg_from_ref_key(new),
)
)
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()),
)
with self.lock:
if self._check_rename_constraints(old_key, new_key):
self._rename_relation(old_key, _CachedRelation(new))
else:
self._setdefault(_CachedRelation(new))
fire_event_if(
flags.LOG_CACHE_EVENTS,
lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()),
)
def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
"""Case-insensitively yield all relations matching the given schema.
:param str schema: The case-insensitive schema name to list from.
:return List[BaseRelation]: The list of relations with the given
schema
"""
database = lowercase(database)
schema = lowercase(schema)
with self.lock:
results = [
r.inner
for r in self.relations.values()
if (lowercase(r.schema) == schema and lowercase(r.database) == database)
]
if None in results:
raise NoneRelationFoundError()
return results
def clear(self):
"""Clear the cache"""
with self.lock:
self.relations.clear()
self.schemas.clear()
def _list_relations_in_schema(
self, database: Optional[str], schema: Optional[str]
) -> List[_CachedRelation]:
"""Get the relations in a schema. Callers should hold the lock."""
key = (lowercase(database), lowercase(schema))
to_remove: List[_CachedRelation] = []
for cachekey, relation in self.relations.items():
if (cachekey.database, cachekey.schema) == key:
to_remove.append(relation)
return to_remove
def _remove_all(self, to_remove: List[_CachedRelation]):
"""Remove all the listed relations. Ignore relations that have been
cascaded out.
"""
for relation in to_remove:
# it may have been cascaded out already
drop_key = _make_ref_key(relation)
if drop_key in self.relations:
self.drop(drop_key)

View File

@@ -0,0 +1,223 @@
import threading
import traceback
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Type
from dbt.adapters.base.plugin import AdapterPlugin
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
from dbt.events.functions import fire_event
from dbt.events.types import AdapterImportError, PluginLoadError
from dbt.exceptions import DbtInternalError, DbtRuntimeError
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
Adapter = AdapterProtocol
class AdapterContainer:
def __init__(self):
self.lock = threading.Lock()
self.adapters: Dict[str, Adapter] = {}
self.plugins: Dict[str, AdapterPlugin] = {}
# map package names to their include paths
self.packages: Dict[str, Path] = {
GLOBAL_PROJECT_NAME: Path(GLOBAL_PROJECT_PATH),
}
def get_plugin_by_name(self, name: str) -> AdapterPlugin:
with self.lock:
if name in self.plugins:
return self.plugins[name]
names = ", ".join(self.plugins.keys())
message = f"Invalid adapter type {name}! Must be one of {names}"
raise DbtRuntimeError(message)
def get_adapter_class_by_name(self, name: str) -> Type[Adapter]:
plugin = self.get_plugin_by_name(name)
return plugin.adapter
def get_relation_class_by_name(self, name: str) -> Type[RelationProtocol]:
adapter = self.get_adapter_class_by_name(name)
return adapter.Relation
def get_config_class_by_name(self, name: str) -> Type[AdapterConfig]:
adapter = self.get_adapter_class_by_name(name)
return adapter.AdapterSpecificConfigs
def load_plugin(self, name: str) -> Type[Credentials]:
# this doesn't need a lock: in the worst case we'll overwrite packages
# and adapter_type entries with the same value, as they're all
# singletons
try:
# mypy doesn't think modules have any attributes.
mod: Any = import_module("." + name, "dbt.adapters")
except ModuleNotFoundError as exc:
# if we failed to import the target module in particular, inform
# the user about it via a runtime error
if exc.name == "dbt.adapters." + name:
fire_event(AdapterImportError(exc=str(exc)))
raise DbtRuntimeError(f"Could not find adapter type {name}!")
# otherwise, the error had to have come from some underlying
# library. Log the stack trace.
fire_event(PluginLoadError(exc_info=traceback.format_exc()))
raise
plugin: AdapterPlugin = mod.Plugin
plugin_type = plugin.adapter.type()
if plugin_type != name:
raise DbtRuntimeError(
f"Expected to find adapter with type named {name}, got "
f"adapter with type {plugin_type}"
)
with self.lock:
# things do hold the lock to iterate over it so we need it to add
self.plugins[name] = plugin
self.packages[plugin.project_name] = Path(plugin.include_path)
for dep in plugin.dependencies:
self.load_plugin(dep)
return plugin.credentials
def register_adapter(self, config: AdapterRequiredConfig) -> None:
adapter_name = config.credentials.type
adapter_type = self.get_adapter_class_by_name(adapter_name)
with self.lock:
if adapter_name in self.adapters:
# this shouldn't really happen...
return
adapter: Adapter = adapter_type(config) # type: ignore
self.adapters[adapter_name] = adapter
def lookup_adapter(self, adapter_name: str) -> Adapter:
return self.adapters[adapter_name]
def reset_adapters(self):
"""Clear the adapters. This is useful for tests, which change configs."""
with self.lock:
for adapter in self.adapters.values():
adapter.cleanup_connections()
self.adapters.clear()
def cleanup_connections(self):
"""Only clean up the adapter connections list without resetting the
actual adapters.
"""
with self.lock:
for adapter in self.adapters.values():
adapter.cleanup_connections()
def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]:
"""Iterate over the known adapter plugins. If a name is provided,
iterate in dependency order over the named plugin and its dependencies.
"""
if name is None:
return list(self.plugins.values())
plugins: List[AdapterPlugin] = []
seen: Set[str] = set()
plugin_names: List[str] = [name]
while plugin_names:
plugin_name = plugin_names[0]
plugin_names = plugin_names[1:]
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise DbtInternalError(f"No plugin found for {plugin_name}") from None
plugins.append(plugin)
seen.add(plugin_name)
for dep in plugin.dependencies:
if dep not in seen:
plugin_names.append(dep)
return plugins
def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
package_names.append(GLOBAL_PROJECT_NAME)
return package_names
def get_include_paths(self, name: Optional[str]) -> List[Path]:
paths = []
for package_name in self.get_adapter_package_names(name):
try:
path = self.packages[package_name]
except KeyError:
raise DbtInternalError(f"No internal package listing found for {package_name}")
paths.append(path)
return paths
def get_adapter_type_names(self, name: Optional[str]) -> List[str]:
return [p.adapter.type() for p in self.get_adapter_plugins(name)]
FACTORY: AdapterContainer = AdapterContainer()
def register_adapter(config: AdapterRequiredConfig) -> None:
FACTORY.register_adapter(config)
def get_adapter(config: AdapterRequiredConfig):
return FACTORY.lookup_adapter(config.credentials.type)
def get_adapter_by_type(adapter_type):
return FACTORY.lookup_adapter(adapter_type)
def reset_adapters():
"""Clear the adapters. This is useful for tests, which change configs."""
FACTORY.reset_adapters()
def cleanup_connections():
"""Only clean up the adapter connections list without resetting the actual
adapters.
"""
FACTORY.cleanup_connections()
def get_adapter_class_by_name(name: str) -> Type[AdapterProtocol]:
return FACTORY.get_adapter_class_by_name(name)
def get_config_class_by_name(name: str) -> Type[AdapterConfig]:
return FACTORY.get_config_class_by_name(name)
def get_relation_class_by_name(name: str) -> Type[RelationProtocol]:
return FACTORY.get_relation_class_by_name(name)
def load_plugin(name: str) -> Type[Credentials]:
return FACTORY.load_plugin(name)
def get_include_paths(name: Optional[str]) -> List[Path]:
return FACTORY.get_include_paths(name)
def get_adapter_package_names(name: Optional[str]) -> List[str]:
return FACTORY.get_adapter_package_names(name)
def get_adapter_type_names(name: Optional[str]) -> List[str]:
return FACTORY.get_adapter_type_names(name)
@contextmanager
def adapter_management():
reset_adapters()
try:
yield
finally:
cleanup_connections()

View File

@@ -0,0 +1,158 @@
from dataclasses import dataclass
from typing import (
Type,
Hashable,
Optional,
ContextManager,
List,
Generic,
TypeVar,
Tuple,
Dict,
Any,
)
from typing_extensions import Protocol
import agate
from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
from dbt.contracts.graph.nodes import ResultNode, ManifestNode
from dbt.contracts.graph.model_config import BaseConfig
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.relation import Policy, HasQuoting
from dbt.graph import Graph
@dataclass
class AdapterConfig(BaseConfig):
pass
class ConnectionManagerProtocol(Protocol):
TYPE: str
class ColumnProtocol(Protocol):
pass
Self = TypeVar("Self", bound="RelationProtocol")
class RelationProtocol(Protocol):
@classmethod
def get_default_quote_policy(cls) -> Policy:
...
@classmethod
def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self:
...
class CompilerProtocol(Protocol):
def compile(self, manifest: Manifest, write=True) -> Graph:
...
def compile_node(
self,
node: ManifestNode,
manifest: Manifest,
extra_context: Optional[Dict[str, Any]] = None,
) -> ManifestNode:
...
AdapterConfig_T = TypeVar("AdapterConfig_T", bound=AdapterConfig)
ConnectionManager_T = TypeVar("ConnectionManager_T", bound=ConnectionManagerProtocol)
Relation_T = TypeVar("Relation_T", bound=RelationProtocol)
Column_T = TypeVar("Column_T", bound=ColumnProtocol)
Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
# TODO CT-211
class AdapterProtocol( # type: ignore[misc]
Protocol,
Generic[
AdapterConfig_T,
ConnectionManager_T,
Relation_T,
Column_T,
Compiler_T,
],
):
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
# ClassVar due to the restrictiveness of PEP-526
# See: https://github.com/python/mypy/issues/5144
AdapterSpecificConfigs: Type[AdapterConfig_T]
Column: Type[Column_T]
Relation: Type[Relation_T]
ConnectionManager: Type[ConnectionManager_T]
connections: ConnectionManager_T
def __init__(self, config: AdapterRequiredConfig):
...
@classmethod
def type(cls) -> str:
pass
def set_query_header(self, manifest: Manifest) -> None:
...
@staticmethod
def get_thread_identifier() -> Hashable:
...
def get_thread_connection(self) -> Connection:
...
def set_thread_connection(self, conn: Connection) -> None:
...
def get_if_exists(self) -> Optional[Connection]:
...
def clear_thread_connection(self) -> None:
...
def clear_transaction(self) -> None:
...
def exception_handler(self, sql: str) -> ContextManager:
...
def set_connection_name(self, name: Optional[str] = None) -> Connection:
...
def cancel_open(self) -> Optional[List[str]]:
...
def open(cls, connection: Connection) -> Connection:
...
def release(self) -> None:
...
def cleanup_all(self) -> None:
...
def begin(self) -> None:
...
def commit(self) -> None:
...
def close(cls, connection: Connection) -> Connection:
...
def commit_if_has_connection(self) -> None:
...
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[AdapterResponse, agate.Table]:
...
def get_compiler(self) -> Compiler_T:
...

View File

@@ -0,0 +1,40 @@
# this module exists to resolve circular imports with the events module
from collections import namedtuple
from typing import Any, Optional
from dbt.events.proto_types import ReferenceKeyMsg
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
def lowercase(value: Optional[str]) -> Optional[str]:
if value is None:
return None
else:
return value.lower()
# For backwards compatibility. New code should use _make_ref_key
def _make_key(relation: Any) -> _ReferenceKey:
return _make_ref_key(relation)
def _make_ref_key(relation: Any) -> _ReferenceKey:
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
to keep track of quoting
"""
# databases and schemas can both be None
return _ReferenceKey(
lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
)
def _make_ref_key_msg(relation: Any):
return _make_msg_from_ref_key(_make_ref_key(relation))
def _make_msg_from_ref_key(ref_key: _ReferenceKey) -> ReferenceKeyMsg:
return ReferenceKeyMsg(
database=ref_key.database, schema=ref_key.schema, identifier=ref_key.identifier
)

View File

@@ -0,0 +1,3 @@
# these are all just exports, #noqa them so flake8 will be happy
from dbt.adapters.sql.connections import SQLConnectionManager # noqa
from dbt.adapters.sql.impl import SQLAdapter # noqa

View File

@@ -0,0 +1,175 @@
import abc
import time
from typing import List, Optional, Tuple, Any, Iterable, Dict
import agate
import dbt.clients.agate_helper
import dbt.exceptions
from dbt.adapters.base import BaseConnectionManager
from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse
from dbt.events.functions import fire_event
from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus
from dbt.events.contextvars import get_node_info
from dbt.utils import cast_to_str
class SQLConnectionManager(BaseConnectionManager):
"""The default connection manager with some common SQL methods implemented.
Methods to implement:
- exception_handler
- cancel
- get_response
- open
"""
@abc.abstractmethod
def cancel(self, connection: Connection):
"""Cancel the given connection."""
raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!")
def cancel_open(self) -> List[str]:
names = []
this_connection = self.get_if_exists()
with self.lock:
for connection in self.thread_connections.values():
if connection is this_connection:
continue
# if the connection failed, the handle will be None so we have
# nothing to cancel.
if connection.handle is not None and connection.state == ConnectionState.OPEN:
self.cancel(connection)
if connection.name is not None:
names.append(connection.name)
return names
def add_query(
self,
sql: str,
auto_begin: bool = True,
bindings: Optional[Any] = None,
abridge_sql_log: bool = False,
) -> Tuple[Connection, Any]:
connection = self.get_thread_connection()
if auto_begin and connection.transaction_open is False:
self.begin()
fire_event(
ConnectionUsed(
conn_type=self.TYPE,
conn_name=cast_to_str(connection.name),
node_info=get_node_info(),
)
)
with self.exception_handler(sql):
if abridge_sql_log:
log_sql = "{}...".format(sql[:512])
else:
log_sql = sql
fire_event(
SQLQuery(
conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info()
)
)
pre = time.time()
cursor = connection.handle.cursor()
cursor.execute(sql, bindings)
fire_event(
SQLQueryStatus(
status=str(self.get_response(cursor)),
elapsed=round((time.time() - pre)),
node_info=get_node_info(),
)
)
return connection, cursor
@classmethod
@abc.abstractmethod
def get_response(cls, cursor: Any) -> AdapterResponse:
"""Get the status of the cursor."""
raise dbt.exceptions.NotImplementedError(
"`get_response` is not implemented for this adapter!"
)
@classmethod
def process_results(
cls, column_names: Iterable[str], rows: Iterable[Any]
) -> List[Dict[str, Any]]:
# TODO CT-211
unique_col_names = dict() # type: ignore[var-annotated]
# TODO CT-211
for idx in range(len(column_names)): # type: ignore[arg-type]
# TODO CT-211
col_name = column_names[idx] # type: ignore[index]
if col_name in unique_col_names:
unique_col_names[col_name] += 1
# TODO CT-211
column_names[idx] = f"{col_name}_{unique_col_names[col_name]}" # type: ignore[index] # noqa
else:
# TODO CT-211
unique_col_names[column_names[idx]] = 1 # type: ignore[index]
return [dict(zip(column_names, row)) for row in rows]
@classmethod
def get_result_from_cursor(cls, cursor: Any) -> agate.Table:
data: List[Any] = []
column_names: List[str] = []
if cursor.description is not None:
column_names = [col[0] for col in cursor.description]
rows = cursor.fetchall()
data = cls.process_results(column_names, rows)
return dbt.clients.agate_helper.table_from_data_flat(data, column_names)
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[AdapterResponse, agate.Table]:
sql = self._add_query_comment(sql)
_, cursor = self.add_query(sql, auto_begin)
response = self.get_response(cursor)
if fetch:
table = self.get_result_from_cursor(cursor)
else:
table = dbt.clients.agate_helper.empty_table()
return response, table
def add_begin_query(self):
return self.add_query("BEGIN", auto_begin=False)
def add_commit_query(self):
return self.add_query("COMMIT", auto_begin=False)
def begin(self):
connection = self.get_thread_connection()
if connection.transaction_open is True:
raise dbt.exceptions.DbtInternalError(
'Tried to begin a new transaction on connection "{}", but '
"it already had one open!".format(connection.name)
)
self.add_begin_query()
connection.transaction_open = True
return connection
def commit(self):
connection = self.get_thread_connection()
if connection.transaction_open is False:
raise dbt.exceptions.DbtInternalError(
'Tried to commit transaction on connection "{}", but '
"it does not have one open!".format(connection.name)
)
fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info()))
self.add_commit_query()
connection.transaction_open = False
return connection

View File

@@ -0,0 +1,240 @@
import agate
from typing import Any, Optional, Tuple, Type, List
from dbt.contracts.connection import Connection
from dbt.exceptions import RelationTypeNullError
from dbt.adapters.base import BaseAdapter, available
from dbt.adapters.cache import _make_ref_key_msg
from dbt.adapters.sql import SQLConnectionManager
from dbt.events.functions import fire_event
from dbt.events.types import ColTypeChange, SchemaCreation, SchemaDrop
from dbt.adapters.base.relation import BaseRelation
LIST_RELATIONS_MACRO_NAME = "list_relations_without_caching"
GET_COLUMNS_IN_RELATION_MACRO_NAME = "get_columns_in_relation"
LIST_SCHEMAS_MACRO_NAME = "list_schemas"
CHECK_SCHEMA_EXISTS_MACRO_NAME = "check_schema_exists"
CREATE_SCHEMA_MACRO_NAME = "create_schema"
DROP_SCHEMA_MACRO_NAME = "drop_schema"
RENAME_RELATION_MACRO_NAME = "rename_relation"
TRUNCATE_RELATION_MACRO_NAME = "truncate_relation"
DROP_RELATION_MACRO_NAME = "drop_relation"
ALTER_COLUMN_TYPE_MACRO_NAME = "alter_column_type"
class SQLAdapter(BaseAdapter):
"""The default adapter with the common agate conversions and some SQL
methods was implemented. This adapter has a different much shorter list of
methods to implement, but some more macros that must be implemented.
To implement a macro, implement "${adapter_type}__${macro_name}". in the
adapter's internal project.
Methods to implement:
- date_function
Macros to implement:
- get_catalog
- list_relations_without_caching
- get_columns_in_relation
"""
ConnectionManager: Type[SQLConnectionManager]
connections: SQLConnectionManager
@available.parse(lambda *a, **k: (None, None))
def add_query(
self,
sql: str,
auto_begin: bool = True,
bindings: Optional[Any] = None,
abridge_sql_log: bool = False,
) -> Tuple[Connection, Any]:
"""Add a query to the current transaction. A thin wrapper around
ConnectionManager.add_query.
:param sql: The SQL query to add
:param auto_begin: If set and there is no transaction in progress,
begin a new one.
:param bindings: An optional list of bindings for the query.
:param abridge_sql_log: If set, limit the raw sql logged to 512
characters
"""
return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
@classmethod
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "text"
@classmethod
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
# TODO CT-211
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore[attr-defined]
return "float8" if decimals else "integer"
@classmethod
def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "boolean"
@classmethod
def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "timestamp without time zone"
@classmethod
def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "date"
@classmethod
def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "time"
@classmethod
def is_cancelable(cls) -> bool:
return True
def expand_column_types(self, goal, current):
reference_columns = {c.name: c for c in self.get_columns_in_relation(goal)}
target_columns = {c.name: c for c in self.get_columns_in_relation(current)}
for column_name, reference_column in reference_columns.items():
target_column = target_columns.get(column_name)
if target_column is not None and target_column.can_expand_to(reference_column):
col_string_size = reference_column.string_size()
new_type = self.Column.string_type(col_string_size)
fire_event(
ColTypeChange(
orig_type=target_column.data_type,
new_type=new_type,
table=_make_ref_key_msg(current),
)
)
self.alter_column_type(current, column_name, new_type)
def alter_column_type(self, relation, column_name, new_column_type) -> None:
"""
1. Create a new column (w/ temp name and correct type)
2. Copy data over to it
3. Drop the existing column (cascade!)
4. Rename the new column to existing column
"""
kwargs = {
"relation": relation,
"column_name": column_name,
"new_column_type": new_column_type,
}
self.execute_macro(ALTER_COLUMN_TYPE_MACRO_NAME, kwargs=kwargs)
def drop_relation(self, relation):
if relation.type is None:
raise RelationTypeNullError(relation)
self.cache_dropped(relation)
self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
def truncate_relation(self, relation):
self.execute_macro(TRUNCATE_RELATION_MACRO_NAME, kwargs={"relation": relation})
def rename_relation(self, from_relation, to_relation):
self.cache_renamed(from_relation, to_relation)
kwargs = {"from_relation": from_relation, "to_relation": to_relation}
self.execute_macro(RENAME_RELATION_MACRO_NAME, kwargs=kwargs)
def get_columns_in_relation(self, relation):
return self.execute_macro(
GET_COLUMNS_IN_RELATION_MACRO_NAME, kwargs={"relation": relation}
)
def create_schema(self, relation: BaseRelation) -> None:
relation = relation.without_identifier()
fire_event(SchemaCreation(relation=_make_ref_key_msg(relation)))
kwargs = {
"relation": relation,
}
self.execute_macro(CREATE_SCHEMA_MACRO_NAME, kwargs=kwargs)
self.commit_if_has_connection()
# we can't update the cache here, as if the schema already existed we
# don't want to (incorrectly) say that it's empty
def drop_schema(self, relation: BaseRelation) -> None:
relation = relation.without_identifier()
fire_event(SchemaDrop(relation=_make_ref_key_msg(relation)))
kwargs = {
"relation": relation,
}
self.execute_macro(DROP_SCHEMA_MACRO_NAME, kwargs=kwargs)
self.commit_if_has_connection()
# we can update the cache here
self.cache.drop_schema(relation.database, relation.schema)
def list_relations_without_caching(
self,
schema_relation: BaseRelation,
) -> List[BaseRelation]:
kwargs = {"schema_relation": schema_relation}
results = self.execute_macro(LIST_RELATIONS_MACRO_NAME, kwargs=kwargs)
relations = []
quote_policy = {"database": True, "schema": True, "identifier": True}
for _database, name, _schema, _type in results:
try:
_type = self.Relation.get_relation_type(_type)
except ValueError:
_type = self.Relation.External
relations.append(
self.Relation.create(
database=_database,
schema=_schema,
identifier=name,
quote_policy=quote_policy,
type=_type,
)
)
return relations
def quote(self, identifier):
return '"{}"'.format(identifier)
def list_schemas(self, database: str) -> List[str]:
results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
return [row[0] for row in results]
def check_schema_exists(self, database: str, schema: str) -> bool:
information_schema = self.Relation.create(
database=database,
schema=schema,
identifier="INFORMATION_SCHEMA",
quote_policy=self.config.quoting,
).information_schema()
kwargs = {"information_schema": information_schema, "schema": schema}
results = self.execute_macro(CHECK_SCHEMA_EXISTS_MACRO_NAME, kwargs=kwargs)
return results[0][0] > 0
# This is for use in the test suite
def run_sql_for_tests(self, sql, fetch, conn):
cursor = conn.handle.cursor()
try:
cursor.execute(sql)
if hasattr(conn.handle, "commit"):
conn.handle.commit()
if fetch == "one":
return cursor.fetchone()
elif fetch == "all":
return cursor.fetchall()
else:
return
except BaseException as e:
if conn.handle and not getattr(conn.handle, "closed", True):
conn.handle.rollback()
print(sql)
print(e)
raise
finally:
conn.transaction_open = False

View File

@@ -1,74 +0,0 @@
# dbt/artifacts
## Overview
This directory is meant to be a lightweight module that is independent (and upstream of) the rest of `dbt-core` internals.
Its primary responsibility is to define simple data classes that represent the versioned artifact schemas that dbt writes as JSON files throughout execution.
Eventually, this module may be released as a standalone package (e.g. `dbt-artifacts`) to support stable programmatic parsing of dbt artifacts.
`dbt/artifacts` is organized into artifact 'schemas' and 'resources'. Schemas represent the final serialized artifact objects, while resources represent smaller components within those schemas.
### dbt/artifacts/schemas
Each major version of a schema under `dbt/artifacts/schema` is defined in its corresponding `dbt/artifacts/schema/<artifact-name>/v<version>` directory. Before `dbt/artifacts` artifact schemas were always modified in-place, which is why older artifacts are those missing class definitions.
Currently, there are four artifact schemas defined in `dbt/artifact/schemas`:
| Artifact name | File | Class | Latest definition |
|---------------|------------------|----------------------------------|-----------------------------------|
| manifest | manifest.json | WritableManifest | dbt/artifacts/schema/manifest/v12 |
| catalog | catalog.json | CatalogArtifact | dbt/artifacts/schema/catalog/v1 |
| run | run_results.json | RunResultsArtifact | dbt/artifacts/schema/run/v5 |
| freshness | sources.json | FreshnessExecutionResultArtifact | dbt/artifacts/schema/freshness/v3 |
### dbt/artifacts/resources
All existing resources are defined under `dbt/artifacts/resources/v1`.
## Making changes to dbt/artifacts
### All changes
All changes to any fields will require a manual update to [dbt-jsonschema](https://github.com/dbt-labs/dbt-jsonschema) to ensure live checking continues to work.
### Non-breaking changes
Freely make incremental, non-breaking changes in-place to the latest major version of any artifact (minor or patch bumps). The only changes that are fully forward and backward compatible are:
* Adding a new field with a default
* Deleting a field with a default. This is compatible in terms of serialization and deserialization, but still may be lead to suprising behaviour:
* For artifact consumers relying on the fields existence (e.g. `manifest["deleted_field"]` will stop working unless the access was implemented safely)
* Old code (e.g. in dbt-core) that relies on the value of the deleted field may have surprising behaviour given only the default value will be set when instantiated from the new schema
These types of minor, non-breaking changes are tested by [tests/unit/artifacts/test_base_resource.py::TestMinorSchemaChange](https://github.com/dbt-labs/dbt-core/blob/main/tests/unit/artifacts/test_base_resource.py).
#### Updating [schemas.getdbt.com](https://schemas.getdbt.com)
Non-breaking changes to artifact schemas require an update to the corresponding jsonschemas published to [schemas.getdbt.com](https://schemas.getdbt.com), which are defined in https://github.com/dbt-labs/schemas.getdbt.com. To do so:
Note this must be done AFTER the core pull request is merged, otherwise we may end up with unresolvable conflicts and schemas that are invalid prior to base pull request merge. You may create the schemas.getdbt.com pull request prior to merging the base pull request, but do not merge until afterward.
1. Create a PR in https://github.com/dbt-labs/schemas.getdbt.com which reflects the schema changes to the artifact. The schema can be updated in-place for non-breaking changes. Example PR: https://github.com/dbt-labs/schemas.getdbt.com/pull/39
2. Merge the https://github.com/dbt-labs/schemas.getdbt.com PR
Note: Although `jsonschema` validation using the schemas in [schemas.getdbt.com](https://schemas.getdbt.com) is not encouraged or formally supported, `jsonschema` validation should still continue to work once the schemas are updated because they are forward-compatible and can therefore be used to validate previous minor versions of the schema.
### Breaking changes
A breaking change is anything that:
* Deletes a required field
* Changes the name or type of an existing field
* Removes the default value of an existing field
These should be avoided however possible. When necessary, multiple breaking changes should be bundled together, to aim for minimal disruption across the ecosystem of tools that leverage dbt metadata.
When it comes time to make breaking changes, a new versioned artifact should be created as follows:
1. Create a new version directory and file that defines the new artifact schema under `dbt/artifacts/schemas/<artifact>/v<next-artifact-version>/<artifact>.py`
2. If any resources are having breaking changes introduced, create a new resource class that defines the new resource schema under `dbt/artifacts/resources/v<next-resource-version>/<resource>.py`
3. Implement upgrade paths on the new versioned artifact class so it can be constructed given a dictionary representation of any previous version of the same artifact
* TODO: link example once available
4. Implement downgrade paths on all previous versions of the artifact class so they can still be constructed given a dictionary representation of the new artifact schema
* TODO: link example once available
5. Update the 'latest' aliases to point to the new version of the artifact and/or resource:
* Artifact: `dbt/artifacts/schemas/<artifact>/__init__.py `
* Resource: `dbt/artifacts/resources/__init__.py `
Downstream consumers (e.g. `dbt-core`) importing from the latest alias are susceptible to breaking changes. Ideally, any incompatibilities should be caught my static type checking in those systems. However, it is always possible for consumers to pin imports to previous versions via `dbt.artifacts.schemas.<artifact>.v<prev-version>`.

View File

@@ -1 +0,0 @@
from dbt.artifacts.exceptions.schemas import IncompatibleSchemaError

View File

@@ -1,31 +0,0 @@
from typing import Optional
from dbt_common.exceptions import DbtRuntimeError
class IncompatibleSchemaError(DbtRuntimeError):
def __init__(self, expected: str, found: Optional[str] = None) -> None:
self.expected = expected
self.found = found
self.filename = "input file"
super().__init__(msg=self.get_message())
def add_filename(self, filename: str):
self.filename = filename
self.msg = self.get_message()
def get_message(self) -> str:
found_str = "nothing"
if self.found is not None:
found_str = f'"{self.found}"'
msg = (
f'Expected a schema version of "{self.expected}" in '
f"{self.filename}, but found {found_str}. Are you running with a "
f"different version of dbt?"
)
return msg
CODE = 10014
MESSAGE = "Incompatible Schema"

View File

@@ -1,97 +0,0 @@
from dbt.artifacts.resources.base import BaseResource, Docs, FileHash, GraphResource
from dbt.artifacts.resources.v1.analysis import Analysis
# alias to latest resource definitions
from dbt.artifacts.resources.v1.components import (
ColumnInfo,
CompiledResource,
Contract,
DeferRelation,
DependsOn,
FreshnessThreshold,
HasRelationMetadata,
InjectedCTE,
NodeVersion,
ParsedResource,
ParsedResourceMandatory,
Quoting,
RefArgs,
Time,
)
from dbt.artifacts.resources.v1.config import (
Hook,
NodeAndTestConfig,
NodeConfig,
TestConfig,
)
from dbt.artifacts.resources.v1.documentation import Documentation
from dbt.artifacts.resources.v1.exposure import (
Exposure,
ExposureConfig,
ExposureType,
MaturityType,
)
from dbt.artifacts.resources.v1.generic_test import GenericTest, TestMetadata
from dbt.artifacts.resources.v1.group import Group
from dbt.artifacts.resources.v1.hook import HookNode
from dbt.artifacts.resources.v1.macro import Macro, MacroArgument, MacroDependsOn
from dbt.artifacts.resources.v1.metric import (
ConstantPropertyInput,
ConversionTypeParams,
CumulativeTypeParams,
Metric,
MetricConfig,
MetricInput,
MetricInputMeasure,
MetricTimeWindow,
MetricTypeParams,
)
from dbt.artifacts.resources.v1.model import Model, ModelConfig, TimeSpine
from dbt.artifacts.resources.v1.owner import Owner
from dbt.artifacts.resources.v1.saved_query import (
Export,
ExportConfig,
QueryParams,
SavedQuery,
SavedQueryConfig,
SavedQueryMandatory,
)
from dbt.artifacts.resources.v1.seed import Seed, SeedConfig
from dbt.artifacts.resources.v1.semantic_layer_components import (
FileSlice,
SourceFileMetadata,
WhereFilter,
WhereFilterIntersection,
)
from dbt.artifacts.resources.v1.semantic_model import (
Defaults,
Dimension,
DimensionTypeParams,
DimensionValidityParams,
Entity,
Measure,
MeasureAggregationParameters,
NodeRelation,
NonAdditiveDimension,
SemanticModel,
SemanticModelConfig,
)
from dbt.artifacts.resources.v1.singular_test import SingularTest
from dbt.artifacts.resources.v1.snapshot import Snapshot, SnapshotConfig
from dbt.artifacts.resources.v1.source_definition import (
ExternalPartition,
ExternalTable,
ParsedSourceMandatory,
SourceConfig,
SourceDefinition,
)
from dbt.artifacts.resources.v1.sql_operation import SqlOperation
from dbt.artifacts.resources.v1.unit_test_definition import (
UnitTestConfig,
UnitTestDefinition,
UnitTestFormat,
UnitTestInputFixture,
UnitTestNodeVersions,
UnitTestOutputFixture,
UnitTestOverrides,
)

View File

@@ -1,67 +0,0 @@
import hashlib
from dataclasses import dataclass
from typing import List, Optional
from dbt.artifacts.resources.types import NodeType
from dbt_common.dataclass_schema import dbtClassMixin
@dataclass
class BaseResource(dbtClassMixin):
name: str
resource_type: NodeType
package_name: str
path: str
original_file_path: str
unique_id: str
@dataclass
class GraphResource(BaseResource):
fqn: List[str]
@dataclass
class FileHash(dbtClassMixin):
name: str # the hash type name
checksum: str # the hashlib.hash_type().hexdigest() of the file contents
@classmethod
def empty(cls):
return FileHash(name="none", checksum="")
@classmethod
def path(cls, path: str):
return FileHash(name="path", checksum=path)
def __eq__(self, other):
if not isinstance(other, FileHash):
return NotImplemented
if self.name == "none" or self.name != other.name:
return False
return self.checksum == other.checksum
def compare(self, contents: str) -> bool:
"""Compare the file contents with the given hash"""
if self.name == "none":
return False
return self.from_contents(contents, name=self.name) == self.checksum
@classmethod
def from_contents(cls, contents: str, name="sha256") -> "FileHash":
"""Create a file hash from the given file contents. The hash is always
the utf-8 encoding of the contents given, because dbt only reads files
as utf-8.
"""
data = contents.encode("utf-8")
checksum = hashlib.new(name, data).hexdigest()
return cls(name=name, checksum=checksum)
@dataclass
class Docs(dbtClassMixin):
show: bool = True
node_color: Optional[str] = None

View File

@@ -1,77 +0,0 @@
from dbt_common.dataclass_schema import StrEnum
class AccessType(StrEnum):
Private = "private"
Protected = "protected"
Public = "public"
@classmethod
def is_valid(cls, item):
try:
cls(item)
except ValueError:
return False
return True
class NodeType(StrEnum):
Model = "model"
Analysis = "analysis"
Test = "test" # renamed to 'data_test'; preserved as 'test' here for back-compat
Snapshot = "snapshot"
Operation = "operation"
Seed = "seed"
# TODO: rm?
RPCCall = "rpc"
SqlOperation = "sql_operation"
Documentation = "doc"
Source = "source"
Macro = "macro"
Exposure = "exposure"
Metric = "metric"
Group = "group"
SavedQuery = "saved_query"
SemanticModel = "semantic_model"
Unit = "unit_test"
Fixture = "fixture"
def pluralize(self) -> str:
if self is self.Analysis:
return "analyses"
elif self is self.SavedQuery:
return "saved_queries"
elif self is self.Test:
return "data_tests"
return f"{self}s"
class RunHookType(StrEnum):
Start = "on-run-start"
End = "on-run-end"
class ModelLanguage(StrEnum):
python = "python"
sql = "sql"
class ModelHookType(StrEnum):
PreHook = "pre-hook"
PostHook = "post-hook"
class TimePeriod(StrEnum):
minute = "minute"
hour = "hour"
day = "day"
def plural(self) -> str:
return str(self) + "s"
class BatchSize(StrEnum):
hour = "hour"
day = "day"
month = "month"
year = "year"

View File

@@ -1,10 +0,0 @@
from dataclasses import dataclass
from typing import Literal
from dbt.artifacts.resources.types import NodeType
from dbt.artifacts.resources.v1.components import CompiledResource
@dataclass
class Analysis(CompiledResource):
resource_type: Literal[NodeType.Analysis]

View File

@@ -1,240 +0,0 @@
import time
from dataclasses import dataclass, field
from datetime import timedelta
from typing import Any, Dict, List, Optional, Union
from dbt.artifacts.resources.base import Docs, FileHash, GraphResource
from dbt.artifacts.resources.types import NodeType, TimePeriod
from dbt.artifacts.resources.v1.config import NodeConfig
from dbt_common.contracts.config.properties import AdditionalPropertiesMixin
from dbt_common.contracts.constraints import ColumnLevelConstraint
from dbt_common.contracts.util import Mergeable
from dbt_common.dataclass_schema import ExtensibleDbtClassMixin, dbtClassMixin
from dbt_semantic_interfaces.type_enums import TimeGranularity
NodeVersion = Union[str, float]
@dataclass
class MacroDependsOn(dbtClassMixin):
macros: List[str] = field(default_factory=list)
# 'in' on lists is O(n) so this is O(n^2) for # of macros
def add_macro(self, value: str):
if value not in self.macros:
self.macros.append(value)
@dataclass
class DependsOn(MacroDependsOn):
nodes: List[str] = field(default_factory=list)
def add_node(self, value: str):
if value not in self.nodes:
self.nodes.append(value)
@dataclass
class RefArgs(dbtClassMixin):
name: str
package: Optional[str] = None
version: Optional[NodeVersion] = None
@property
def positional_args(self) -> List[str]:
if self.package:
return [self.package, self.name]
else:
return [self.name]
@property
def keyword_args(self) -> Dict[str, Optional[NodeVersion]]:
if self.version:
return {"version": self.version}
else:
return {}
@dataclass
class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin):
"""Used in all ManifestNodes and SourceDefinition"""
name: str
description: str = ""
meta: Dict[str, Any] = field(default_factory=dict)
data_type: Optional[str] = None
constraints: List[ColumnLevelConstraint] = field(default_factory=list)
quote: Optional[bool] = None
tags: List[str] = field(default_factory=list)
_extra: Dict[str, Any] = field(default_factory=dict)
granularity: Optional[TimeGranularity] = None
@dataclass
class InjectedCTE(dbtClassMixin):
"""Used in CompiledNodes as part of ephemeral model processing"""
id: str
sql: str
@dataclass
class Contract(dbtClassMixin):
enforced: bool = False
alias_types: bool = True
checksum: Optional[str] = None
@dataclass
class Quoting(dbtClassMixin, Mergeable):
database: Optional[bool] = None
schema: Optional[bool] = None
identifier: Optional[bool] = None
column: Optional[bool] = None
@dataclass
class Time(dbtClassMixin, Mergeable):
count: Optional[int] = None
period: Optional[TimePeriod] = None
def exceeded(self, actual_age: float) -> bool:
if self.period is None or self.count is None:
return False
kwargs: Dict[str, int] = {self.period.plural(): self.count}
difference = timedelta(**kwargs).total_seconds()
return actual_age > difference
def __bool__(self):
return self.count is not None and self.period is not None
@dataclass
class FreshnessThreshold(dbtClassMixin, Mergeable):
warn_after: Optional[Time] = field(default_factory=Time)
error_after: Optional[Time] = field(default_factory=Time)
filter: Optional[str] = None
def status(self, age: float) -> "dbt.artifacts.schemas.results.FreshnessStatus": # type: ignore # noqa F821
from dbt.artifacts.schemas.results import FreshnessStatus
if self.error_after and self.error_after.exceeded(age):
return FreshnessStatus.Error
elif self.warn_after and self.warn_after.exceeded(age):
return FreshnessStatus.Warn
else:
return FreshnessStatus.Pass
def __bool__(self):
return bool(self.warn_after) or bool(self.error_after)
@dataclass
class HasRelationMetadata(dbtClassMixin):
database: Optional[str]
schema: str
# Can't set database to None like it ought to be
# because it messes up the subclasses and default parameters
# so hack it here
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
if "database" not in data:
data["database"] = None
return data
@property
def quoting_dict(self) -> Dict[str, bool]:
if hasattr(self, "quoting"):
return self.quoting.to_dict(omit_none=True)
else:
return {}
@dataclass
class DeferRelation(HasRelationMetadata):
alias: str
relation_name: Optional[str]
# The rest of these fields match RelationConfig protocol exactly
resource_type: NodeType
name: str
description: str
compiled_code: Optional[str]
meta: Dict[str, Any]
tags: List[str]
config: Optional[NodeConfig]
@property
def identifier(self):
return self.alias
@dataclass
class ParsedResourceMandatory(GraphResource, HasRelationMetadata):
alias: str
checksum: FileHash
config: NodeConfig = field(default_factory=NodeConfig)
@property
def identifier(self):
return self.alias
@dataclass
class ParsedResource(ParsedResourceMandatory):
tags: List[str] = field(default_factory=list)
description: str = field(default="")
columns: Dict[str, ColumnInfo] = field(default_factory=dict)
meta: Dict[str, Any] = field(default_factory=dict)
group: Optional[str] = None
docs: Docs = field(default_factory=Docs)
patch_path: Optional[str] = None
build_path: Optional[str] = None
unrendered_config: Dict[str, Any] = field(default_factory=dict)
created_at: float = field(default_factory=lambda: time.time())
config_call_dict: Dict[str, Any] = field(default_factory=dict)
unrendered_config_call_dict: Dict[str, Any] = field(default_factory=dict)
relation_name: Optional[str] = None
raw_code: str = ""
def __post_serialize__(self, dct: Dict, context: Optional[Dict] = None):
dct = super().__post_serialize__(dct, context)
if context and context.get("artifact") and "config_call_dict" in dct:
del dct["config_call_dict"]
if context and context.get("artifact") and "unrendered_config_call_dict" in dct:
del dct["unrendered_config_call_dict"]
return dct
@dataclass
class CompiledResource(ParsedResource):
"""Contains attributes necessary for SQL files and nodes with refs, sources, etc,
so all ManifestNodes except SeedNode."""
language: str = "sql"
refs: List[RefArgs] = field(default_factory=list)
sources: List[List[str]] = field(default_factory=list)
metrics: List[List[str]] = field(default_factory=list)
depends_on: DependsOn = field(default_factory=DependsOn)
compiled_path: Optional[str] = None
compiled: bool = False
compiled_code: Optional[str] = None
extra_ctes_injected: bool = False
extra_ctes: List[InjectedCTE] = field(default_factory=list)
_pre_injected_sql: Optional[str] = None
contract: Contract = field(default_factory=Contract)
def __post_serialize__(self, dct: Dict, context: Optional[Dict] = None):
dct = super().__post_serialize__(dct, context)
if "_pre_injected_sql" in dct:
del dct["_pre_injected_sql"]
# Remove compiled attributes
if "compiled" in dct and dct["compiled"] is False:
del dct["compiled"]
del dct["extra_ctes_injected"]
del dct["extra_ctes"]
# "omit_none" means these might not be in the dictionary
if "compiled_code" in dct:
del dct["compiled_code"]
return dct

View File

@@ -1,265 +0,0 @@
import re
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
from mashumaro.jsonschema.annotations import Pattern
from typing_extensions import Annotated
from dbt import hooks
from dbt.artifacts.resources.base import Docs
from dbt.artifacts.resources.types import ModelHookType
from dbt.artifacts.utils.validation import validate_color
from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeBehavior
from dbt_common.contracts.config.materialization import OnConfigurationChangeOption
from dbt_common.contracts.config.metadata import Metadata, ShowBehavior
from dbt_common.dataclass_schema import ValidationError, dbtClassMixin
def list_str() -> List[str]:
return []
class Severity(str):
pass
def metas(*metas: Metadata) -> Dict[str, Any]:
existing: Dict[str, Any] = {}
for m in metas:
existing = m.meta(existing)
return existing
@dataclass
class ContractConfig(dbtClassMixin):
enforced: bool = False
alias_types: bool = True
@dataclass
class Hook(dbtClassMixin):
sql: str
transaction: bool = True
index: Optional[int] = None
@dataclass
class NodeAndTestConfig(BaseConfig):
enabled: bool = True
# these fields are included in serialized output, but are not part of
# config comparison (they are part of database_representation)
alias: Optional[str] = field(
default=None,
metadata=CompareBehavior.Exclude.meta(),
)
schema: Optional[str] = field(
default=None,
metadata=CompareBehavior.Exclude.meta(),
)
database: Optional[str] = field(
default=None,
metadata=CompareBehavior.Exclude.meta(),
)
tags: Union[List[str], str] = field(
default_factory=list_str,
metadata=metas(ShowBehavior.Hide, MergeBehavior.Append, CompareBehavior.Exclude),
)
meta: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
group: Optional[str] = field(
default=None,
metadata=CompareBehavior.Exclude.meta(),
)
@dataclass
class NodeConfig(NodeAndTestConfig):
# Note: if any new fields are added with MergeBehavior, also update the
# 'mergebehavior' dictionary
materialized: str = "view"
incremental_strategy: Optional[str] = None
batch_size: Any = None
lookback: Any = 1
begin: Any = None
persist_docs: Dict[str, Any] = field(default_factory=dict)
post_hook: List[Hook] = field(
default_factory=list,
metadata={"merge": MergeBehavior.Append, "alias": "post-hook"},
)
pre_hook: List[Hook] = field(
default_factory=list,
metadata={"merge": MergeBehavior.Append, "alias": "pre-hook"},
)
quoting: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
# This is actually only used by seeds. Should it be available to others?
# That would be a breaking change!
column_types: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
full_refresh: Optional[bool] = None
# 'unique_key' doesn't use 'Optional' because typing.get_type_hints was
# sometimes getting the Union order wrong, causing serialization failures.
unique_key: Union[str, List[str], None] = None
on_schema_change: Optional[str] = "ignore"
on_configuration_change: OnConfigurationChangeOption = field(
default_factory=OnConfigurationChangeOption.default
)
grants: Dict[str, Any] = field(
default_factory=dict, metadata=MergeBehavior.DictKeyAppend.meta()
)
packages: List[str] = field(
default_factory=list,
metadata=MergeBehavior.Append.meta(),
)
docs: Docs = field(
default_factory=Docs,
metadata=MergeBehavior.Update.meta(),
)
contract: ContractConfig = field(
default_factory=ContractConfig,
metadata=MergeBehavior.Update.meta(),
)
event_time: Any = None
concurrent_batches: Any = None
def __post_init__(self):
# we validate that node_color has a suitable value to prevent dbt-docs from crashing
if self.docs.node_color:
node_color = self.docs.node_color
if not validate_color(node_color):
raise ValidationError(
f"Invalid color name for docs.node_color: {node_color}. "
"It is neither a valid HTML color name nor a valid HEX code."
)
if (
self.contract.enforced
and self.materialized == "incremental"
and self.on_schema_change not in ("append_new_columns", "fail")
):
raise ValidationError(
f"Invalid value for on_schema_change: {self.on_schema_change}. Models "
"materialized as incremental with contracts enabled must set "
"on_schema_change to 'append_new_columns' or 'fail'"
)
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
for key in ModelHookType:
if key in data:
data[key] = [hooks.get_hook_dict(h) for h in data[key]]
return data
SEVERITY_PATTERN = r"^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$"
@dataclass
class TestConfig(NodeAndTestConfig):
__test__ = False
# this is repeated because of a different default
schema: Optional[str] = field(
default="dbt_test__audit",
metadata=CompareBehavior.Exclude.meta(),
)
materialized: str = "test"
# Annotated is used by mashumaro for jsonschema generation
severity: Annotated[Severity, Pattern(SEVERITY_PATTERN)] = Severity("ERROR")
store_failures: Optional[bool] = None
store_failures_as: Optional[str] = None
where: Optional[str] = None
limit: Optional[int] = None
fail_calc: str = "count(*)"
warn_if: str = "!= 0"
error_if: str = "!= 0"
def __post_init__(self):
"""
The presence of a setting for `store_failures_as` overrides any existing setting for `store_failures`,
regardless of level of granularity. If `store_failures_as` is not set, then `store_failures` takes effect.
At the time of implementation, `store_failures = True` would always create a table; the user could not
configure this. Hence, if `store_failures = True` and `store_failures_as` is not specified, then it
should be set to "table" to mimic the existing functionality.
A side effect of this overriding functionality is that `store_failures_as="view"` at the project
level cannot be turned off at the model level without setting both `store_failures_as` and
`store_failures`. The former would cascade down and override `store_failures=False`. The proposal
is to include "ephemeral" as a value for `store_failures_as`, which effectively sets
`store_failures=False`.
The exception handling for this is tricky. If we raise an exception here, the entire run fails at
parse time. We would rather well-formed models run successfully, leaving only exceptions to be rerun
if necessary. Hence, the exception needs to be raised in the test materialization. In order to do so,
we need to make sure that we go down the `store_failures = True` route with the invalid setting for
`store_failures_as`. This results in the `.get()` defaulted to `True` below, instead of a normal
dictionary lookup as is done in the `if` block. Refer to the test materialization for the
exception that is raise as a result of an invalid value.
The intention of this block is to behave as if `store_failures_as` is the only setting,
but still allow for backwards compatibility for `store_failures`.
See https://github.com/dbt-labs/dbt-core/issues/6914 for more information.
"""
# if `store_failures_as` is not set, it gets set by `store_failures`
# the settings below mimic existing behavior prior to `store_failures_as`
get_store_failures_as_map = {
True: "table",
False: "ephemeral",
None: None,
}
# if `store_failures_as` is set, it dictates what `store_failures` gets set to
# the settings below overrides whatever `store_failures` is set to by the user
get_store_failures_map = {
"ephemeral": False,
"table": True,
"view": True,
}
if self.store_failures_as is None:
self.store_failures_as = get_store_failures_as_map[self.store_failures]
else:
self.store_failures = get_store_failures_map.get(self.store_failures_as, True)
@classmethod
def same_contents(cls, unrendered: Dict[str, Any], other: Dict[str, Any]) -> bool:
"""This is like __eq__, except it explicitly checks certain fields."""
modifiers = [
"severity",
"where",
"limit",
"fail_calc",
"warn_if",
"error_if",
"store_failures",
"store_failures_as",
]
seen = set()
for _, target_name in cls._get_fields():
key = target_name
seen.add(key)
if key in modifiers:
if not cls.compare_key(unrendered, other, key):
return False
return True
@classmethod
def validate(cls, data):
if data.get("severity") and not re.match(SEVERITY_PATTERN, data.get("severity")):
raise ValidationError(
f"Severity must be either 'warn' or 'error'. Got '{data.get('severity')}'"
)
super().validate(data)
if data.get("materialized") and data.get("materialized") != "test":
raise ValidationError("A test must have a materialized value of 'test'")

View File

@@ -1,11 +0,0 @@
from dataclasses import dataclass
from typing import Literal
from dbt.artifacts.resources.base import BaseResource
from dbt.artifacts.resources.types import NodeType
@dataclass
class Documentation(BaseResource):
resource_type: Literal[NodeType.Documentation]
block_contents: str

View File

@@ -1,49 +0,0 @@
import time
from dataclasses import dataclass, field
from typing import Any, Dict, List, Literal, Optional
from dbt.artifacts.resources.base import GraphResource
from dbt.artifacts.resources.types import NodeType
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
from dbt.artifacts.resources.v1.owner import Owner
from dbt_common.contracts.config.base import BaseConfig
from dbt_common.dataclass_schema import StrEnum
class ExposureType(StrEnum):
Dashboard = "dashboard"
Notebook = "notebook"
Analysis = "analysis"
ML = "ml"
Application = "application"
class MaturityType(StrEnum):
Low = "low"
Medium = "medium"
High = "high"
@dataclass
class ExposureConfig(BaseConfig):
enabled: bool = True
@dataclass
class Exposure(GraphResource):
type: ExposureType
owner: Owner
resource_type: Literal[NodeType.Exposure]
description: str = ""
label: Optional[str] = None
maturity: Optional[MaturityType] = None
meta: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list)
config: ExposureConfig = field(default_factory=ExposureConfig)
unrendered_config: Dict[str, Any] = field(default_factory=dict)
url: Optional[str] = None
depends_on: DependsOn = field(default_factory=DependsOn)
refs: List[RefArgs] = field(default_factory=list)
sources: List[List[str]] = field(default_factory=list)
metrics: List[List[str]] = field(default_factory=list)
created_at: float = field(default_factory=lambda: time.time())

View File

@@ -1,31 +0,0 @@
from dataclasses import dataclass, field
from typing import Any, Dict, Literal, Optional
from dbt.artifacts.resources.types import NodeType
from dbt.artifacts.resources.v1.components import CompiledResource
from dbt.artifacts.resources.v1.config import TestConfig
from dbt_common.dataclass_schema import dbtClassMixin
@dataclass
class TestMetadata(dbtClassMixin):
__test__ = False
name: str = "test" # dummy default to allow default in GenericTestNode. Should always be set.
# kwargs are the args that are left in the test builder after
# removing configs. They are set from the test builder when
# the test node is created.
kwargs: Dict[str, Any] = field(default_factory=dict)
namespace: Optional[str] = None
@dataclass
class GenericTest(CompiledResource):
resource_type: Literal[NodeType.Test]
column_name: Optional[str] = None
file_key_name: Optional[str] = None
# Was not able to make mypy happy and keep the code working. We need to
# refactor the various configs.
config: TestConfig = field(default_factory=TestConfig) # type: ignore
attached_node: Optional[str] = None
test_metadata: TestMetadata = field(default_factory=TestMetadata)

View File

@@ -1,13 +0,0 @@
from dataclasses import dataclass
from typing import Literal
from dbt.artifacts.resources.base import BaseResource
from dbt.artifacts.resources.types import NodeType
from dbt.artifacts.resources.v1.owner import Owner
@dataclass
class Group(BaseResource):
name: str
owner: Owner
resource_type: Literal[NodeType.Group]

View File

@@ -1,11 +0,0 @@
from dataclasses import dataclass
from typing import Literal, Optional
from dbt.artifacts.resources.types import NodeType
from dbt.artifacts.resources.v1.components import CompiledResource
@dataclass
class HookNode(CompiledResource):
resource_type: Literal[NodeType.Operation]
index: Optional[int] = None

View File

@@ -1,29 +0,0 @@
import time
from dataclasses import dataclass, field
from typing import Any, Dict, List, Literal, Optional
from dbt.artifacts.resources.base import BaseResource, Docs
from dbt.artifacts.resources.types import ModelLanguage, NodeType
from dbt.artifacts.resources.v1.components import MacroDependsOn
from dbt_common.dataclass_schema import dbtClassMixin
@dataclass
class MacroArgument(dbtClassMixin):
name: str
type: Optional[str] = None
description: str = ""
@dataclass
class Macro(BaseResource):
macro_sql: str
resource_type: Literal[NodeType.Macro]
depends_on: MacroDependsOn = field(default_factory=MacroDependsOn)
description: str = ""
meta: Dict[str, Any] = field(default_factory=dict)
docs: Docs = field(default_factory=Docs)
patch_path: Optional[str] = None
arguments: List[MacroArgument] = field(default_factory=list)
created_at: float = field(default_factory=lambda: time.time())
supported_languages: Optional[List[ModelLanguage]] = None

Some files were not shown because too many files have changed in this diff Show More