Compare commits

..

7 Commits

Author SHA1 Message Date
Nathaniel May
3144df1fa6 point to rust module 2021-02-09 15:05:25 -05:00
Nathaniel May
992dc5ce5c use relative import in tracking init 2021-02-09 14:00:09 -05:00
Nathaniel May
243c2cb0ed builds with pip. library functions are not actually included though. 2021-02-09 10:49:25 -05:00
Nathaniel May
c888fe52d6 expose functions in a python module 2021-02-08 12:15:08 -05:00
Nathaniel May
32ff2fbfd4 name project 2021-02-08 12:14:52 -05:00
Nathaniel May
7599b9bca1 add special linker rules for mac 2021-02-08 12:14:27 -05:00
Nathaniel May
0b1d93a18b expose tracking string literals in pyo3 library 2021-02-05 11:39:21 -05:00
4592 changed files with 9696 additions and 72361 deletions

View File

@@ -1,27 +1,23 @@
[bumpversion]
current_version = 0.21.0b1
current_version = 0.19.0
parse = (?P<major>\d+)
\.(?P<minor>\d+)
\.(?P<patch>\d+)
((?P<prekind>a|b|rc)
(?P<pre>\d+) # pre-release version num
)?
((?P<prerelease>[a-z]+)(?P<num>\d+))?
serialize =
{major}.{minor}.{patch}{prekind}{pre}
{major}.{minor}.{patch}{prerelease}{num}
{major}.{minor}.{patch}
commit = False
tag = False
[bumpversion:part:prekind]
[bumpversion:part:prerelease]
first_value = a
optional_value = final
values =
a
b
rc
final
[bumpversion:part:pre]
[bumpversion:part:num]
first_value = 1
[bumpversion:file:setup.py]
@@ -30,8 +26,6 @@ first_value = 1
[bumpversion:file:core/dbt/version.py]
[bumpversion:file:core/scripts/create_adapter_plugins.py]
[bumpversion:file:plugins/postgres/setup.py]
[bumpversion:file:plugins/redshift/setup.py]

218
.circleci/config.yml Normal file
View File

@@ -0,0 +1,218 @@
version: 2.1
jobs:
unit:
docker: &test_only
- image: fishtownanalytics/test-container:9
environment:
DBT_INVOCATION_ENV: circle
steps:
- checkout
- run: tox -e flake8,mypy,unit-py36,unit-py38
build-wheels:
docker: *test_only
steps:
- checkout
- run:
name: Build wheels
command: |
python3.8 -m venv "${PYTHON_ENV}"
export PYTHON_BIN="${PYTHON_ENV}/bin/python"
$PYTHON_BIN -m pip install -U pip setuptools
$PYTHON_BIN -m pip install -r requirements.txt
$PYTHON_BIN -m pip install -r dev_requirements.txt
/bin/bash ./scripts/build-wheels.sh
$PYTHON_BIN ./scripts/collect-dbt-contexts.py > ./dist/context_metadata.json
$PYTHON_BIN ./scripts/collect-artifact-schema.py > ./dist/artifact_schemas.json
environment:
PYTHON_ENV: /home/tox/build_venv/
- store_artifacts:
path: ./dist
destination: dist
integration-postgres-py36:
docker: &test_and_postgres
- image: fishtownanalytics/test-container:9
environment:
DBT_INVOCATION_ENV: circle
- image: postgres
name: database
environment: &pgenv
POSTGRES_USER: "root"
POSTGRES_PASSWORD: "password"
POSTGRES_DB: "dbt"
steps:
- checkout
- run: &setupdb
name: Setup postgres
command: bash test/setup_db.sh
environment:
PGHOST: database
PGUSER: root
PGPASSWORD: password
PGDATABASE: postgres
- run:
name: Run tests
command: tox -e integration-postgres-py36
- store_artifacts:
path: ./logs
integration-snowflake-py36:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-snowflake-py36
no_output_timeout: 1h
- store_artifacts:
path: ./logs
integration-redshift-py36:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-redshift-py36
- store_artifacts:
path: ./logs
integration-bigquery-py36:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-bigquery-py36
- store_artifacts:
path: ./logs
integration-postgres-py38:
docker: *test_and_postgres
steps:
- checkout
- run: *setupdb
- run:
name: Run tests
command: tox -e integration-postgres-py38
- store_artifacts:
path: ./logs
integration-snowflake-py38:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-snowflake-py38
no_output_timeout: 1h
- store_artifacts:
path: ./logs
integration-redshift-py38:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-redshift-py38
- store_artifacts:
path: ./logs
integration-bigquery-py38:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-bigquery-py38
- store_artifacts:
path: ./logs
integration-postgres-py39:
docker: *test_and_postgres
steps:
- checkout
- run: *setupdb
- run:
name: Run tests
command: tox -e integration-postgres-py39
- store_artifacts:
path: ./logs
integration-snowflake-py39:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-snowflake-py39
no_output_timeout: 1h
- store_artifacts:
path: ./logs
integration-redshift-py39:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-redshift-py39
- store_artifacts:
path: ./logs
integration-bigquery-py39:
docker: *test_only
steps:
- checkout
- run:
name: Run tests
command: tox -e integration-bigquery-py39
- store_artifacts:
path: ./logs
workflows:
version: 2
test-everything:
jobs:
- unit
- integration-postgres-py36:
requires:
- unit
- integration-redshift-py36:
requires:
- integration-postgres-py36
- integration-bigquery-py36:
requires:
- integration-postgres-py36
- integration-snowflake-py36:
requires:
- integration-postgres-py36
- integration-postgres-py38:
requires:
- unit
- integration-redshift-py38:
requires:
- integration-postgres-py38
- integration-bigquery-py38:
requires:
- integration-postgres-py38
- integration-snowflake-py38:
requires:
- integration-postgres-py38
- integration-postgres-py39:
requires:
- unit
- integration-redshift-py39:
requires:
- integration-postgres-py39
- integration-bigquery-py39:
requires:
- integration-postgres-py39
# - integration-snowflake-py39:
# requires:
# - integration-postgres-py39
- build-wheels:
requires:
- unit
- integration-postgres-py36
- integration-redshift-py36
- integration-bigquery-py36
- integration-snowflake-py36
- integration-postgres-py38
- integration-redshift-py38
- integration-bigquery-py38
- integration-snowflake-py38
- integration-postgres-py39
- integration-redshift-py39
- integration-bigquery-py39
# - integration-snowflake-py39

View File

@@ -1,27 +0,0 @@
---
name: Beta minor version release
about: Creates a tracking checklist of items for a Beta minor version release
title: "[Tracking] v#.##.#B# release "
labels: 'release'
assignees: ''
---
### Release Core
- [ ] [Engineering] Follow [dbt-release workflow](https://www.notion.so/dbtlabs/Releasing-b97c5ea9a02949e79e81db3566bbc8ef#03ff37da697d4d8ba63d24fae1bfa817)
- [ ] [Engineering] Verify new release branch is created in the repo
- [ ] [Product] Finalize migration guide (next.docs.getdbt.com)
### Release Cloud
- [ ] [Engineering] Create a platform issue to update dbt Cloud and verify it is completed. [Example issue](https://github.com/dbt-labs/dbt-cloud/issues/3481)
- [ ] [Engineering] Determine if schemas have changed. If so, generate new schemas and push to schemas.getdbt.com
### Announce
- [ ] [Product] Announce in dbt Slack
### Post-release
- [ ] [Engineering] [Bump plugin versions](https://www.notion.so/dbtlabs/Releasing-b97c5ea9a02949e79e81db3566bbc8ef#f01854e8da3641179fbcbe505bdf515c) (dbt-spark + dbt-presto), add compatibility as needed
- [ ] [Spark](https://github.com/dbt-labs/dbt-spark)
- [ ] [Presto](https://github.com/dbt-labs/dbt-presto)
- [ ] [Engineering] Create a platform issue to update dbt-spark versions to dbt Cloud. [Example issue](https://github.com/dbt-labs/dbt-cloud/issues/3481)
- [ ] [Engineering] Create an epic for the RC release

View File

@@ -1,28 +0,0 @@
---
name: Final minor version release
about: Creates a tracking checklist of items for a final minor version release
title: "[Tracking] v#.##.# final release "
labels: 'release'
assignees: ''
---
### Release Core
- [ ] [Engineering] Verify all necessary changes exist on the release branch
- [ ] [Engineering] Follow [dbt-release workflow](https://www.notion.so/dbtlabs/Releasing-b97c5ea9a02949e79e81db3566bbc8ef#03ff37da697d4d8ba63d24fae1bfa817)
- [ ] [Product] Merge `next` into `current` for docs.getdbt.com
### Release Cloud
- [ ] [Engineering] Create a platform issue to update dbt Cloud and verify it is completed. [Example issue](https://github.com/dbt-labs/dbt-cloud/issues/3481)
- [ ] [Engineering] Determine if schemas have changed. If so, generate new schemas and push to schemas.getdbt.com
### Announce
- [ ] [Product] Update discourse
- [ ] [Product] Announce in dbt Slack
### Post-release
- [ ] [Engineering] [Bump plugin versions](https://www.notion.so/dbtlabs/Releasing-b97c5ea9a02949e79e81db3566bbc8ef#f01854e8da3641179fbcbe505bdf515c) (dbt-spark + dbt-presto), add compatibility as needed
- [ ] [Spark](https://github.com/dbt-labs/dbt-spark)
- [ ] [Presto](https://github.com/dbt-labs/dbt-presto)
- [ ] [Engineering] Create a platform issue to update dbt-spark versions to dbt Cloud. [Example issue](https://github.com/dbt-labs/dbt-cloud/issues/3481)
- [ ] [Product] Release new version of dbt-utils with new dbt version compatibility. If there are breaking changes requiring a minor version, plan upgrades of other packages that depend on dbt-utils.

View File

@@ -1,29 +0,0 @@
---
name: RC minor version release
about: Creates a tracking checklist of items for a RC minor version release
title: "[Tracking] v#.##.#RC# release "
labels: 'release'
assignees: ''
---
### Release Core
- [ ] [Engineering] Verify all necessary changes exist on the release branch
- [ ] [Engineering] Follow [dbt-release workflow](https://www.notion.so/dbtlabs/Releasing-b97c5ea9a02949e79e81db3566bbc8ef#03ff37da697d4d8ba63d24fae1bfa817)
- [ ] [Product] Update migration guide (next.docs.getdbt.com)
### Release Cloud
- [ ] [Engineering] Create a platform issue to update dbt Cloud and verify it is completed. [Example issue](https://github.com/dbt-labs/dbt-cloud/issues/3481)
- [ ] [Engineering] Determine if schemas have changed. If so, generate new schemas and push to schemas.getdbt.com
### Announce
- [ ] [Product] Publish discourse
- [ ] [Product] Announce in dbt Slack
### Post-release
- [ ] [Engineering] [Bump plugin versions](https://www.notion.so/dbtlabs/Releasing-b97c5ea9a02949e79e81db3566bbc8ef#f01854e8da3641179fbcbe505bdf515c) (dbt-spark + dbt-presto), add compatibility as needed
- [ ] [Spark](https://github.com/dbt-labs/dbt-spark)
- [ ] [Presto](https://github.com/dbt-labs/dbt-presto)
- [ ] [Engineering] Create a platform issue to update dbt-spark versions to dbt Cloud. [Example issue](https://github.com/dbt-labs/dbt-cloud/issues/3481)
- [ ] [Product] Release new version of dbt-utils with new dbt version compatibility. If there are breaking changes requiring a minor version, plan upgrades of other packages that depend on dbt-utils.
- [ ] [Engineering] Create an epic for the final release

View File

@@ -1,10 +0,0 @@
name: "Set up postgres (linux)"
description: "Set up postgres service on linux vm for dbt integration tests"
runs:
using: "composite"
steps:
- shell: bash
run: |
sudo systemctl start postgresql.service
pg_isready
sudo -u postgres bash ${{ github.action_path }}/setup_db.sh

View File

@@ -1 +0,0 @@
../../../test/setup_db.sh

View File

@@ -1,24 +0,0 @@
name: "Set up postgres (macos)"
description: "Set up postgres service on macos vm for dbt integration tests"
runs:
using: "composite"
steps:
- shell: bash
run: |
brew services start postgresql
echo "Check PostgreSQL service is running"
i=10
COMMAND='pg_isready'
while [ $i -gt -1 ]; do
if [ $i == 0 ]; then
echo "PostgreSQL service not ready, all attempts exhausted"
exit 1
fi
echo "Check PostgreSQL service status"
eval $COMMAND && break
echo "PostgreSQL service not ready, wait 10 more sec, attempts left: $i"
sleep 10
((i--))
done
createuser -s postgres
bash ${{ github.action_path }}/setup_db.sh

View File

@@ -1 +0,0 @@
../../../test/setup_db.sh

View File

@@ -1,12 +0,0 @@
name: "Set up postgres (windows)"
description: "Set up postgres service on windows vm for dbt integration tests"
runs:
using: "composite"
steps:
- shell: pwsh
run: |
$pgService = Get-Service -Name postgresql*
Set-Service -InputObject $pgService -Status running -StartupType automatic
Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru
$env:Path += ";$env:PGBIN"
bash ${{ github.action_path }}/setup_db.sh

View File

@@ -1 +0,0 @@
../../../test/setup_db.sh

View File

@@ -1,45 +0,0 @@
version: 2
updates:
# python dependencies
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/core"
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/bigquery"
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/postgres"
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/redshift"
schedule:
interval: "daily"
rebase-strategy: "disabled"
- package-ecosystem: "pip"
directory: "/plugins/snowflake"
schedule:
interval: "daily"
rebase-strategy: "disabled"
# docker dependencies
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "weekly"
rebase-strategy: "disabled"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
rebase-strategy: "disabled"

View File

@@ -9,13 +9,14 @@ resolves #
resolves #1234
-->
### Description
<!--- Describe the Pull Request here -->
### Checklist
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.
### Checklist
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.

View File

@@ -1,95 +0,0 @@
module.exports = ({ context }) => {
const defaultPythonVersion = "3.8";
const supportedPythonVersions = ["3.6", "3.7", "3.8", "3.9"];
const supportedAdapters = ["snowflake", "postgres", "bigquery", "redshift"];
// if PR, generate matrix based on files changed and PR labels
if (context.eventName.includes("pull_request")) {
// `changes` is a list of adapter names that have related
// file changes in the PR
// ex: ['postgres', 'snowflake']
const changes = JSON.parse(process.env.CHANGES);
const labels = context.payload.pull_request.labels.map(({ name }) => name);
console.log("labels", labels);
console.log("changes", changes);
const testAllLabel = labels.includes("test all");
const include = [];
for (const adapter of supportedAdapters) {
if (
changes.includes(adapter) ||
testAllLabel ||
labels.includes(`test ${adapter}`)
) {
for (const pythonVersion of supportedPythonVersions) {
if (
pythonVersion === defaultPythonVersion ||
labels.includes(`test python${pythonVersion}`) ||
testAllLabel
) {
// always run tests on ubuntu by default
include.push({
os: "ubuntu-latest",
adapter,
"python-version": pythonVersion,
});
if (labels.includes("test windows") || testAllLabel) {
include.push({
os: "windows-latest",
adapter,
"python-version": pythonVersion,
});
}
if (labels.includes("test macos") || testAllLabel) {
include.push({
os: "macos-latest",
adapter,
"python-version": pythonVersion,
});
}
}
}
}
}
console.log("matrix", { include });
return {
include,
};
}
// if not PR, generate matrix of python version, adapter, and operating
// system to run integration tests on
const include = [];
// run for all adapters and python versions on ubuntu
for (const adapter of supportedAdapters) {
for (const pythonVersion of supportedPythonVersions) {
include.push({
os: 'ubuntu-latest',
adapter: adapter,
"python-version": pythonVersion,
});
}
}
// additionally include runs for all adapters, on macos and windows,
// but only for the default python version
for (const adapter of supportedAdapters) {
for (const operatingSystem of ["windows-latest", "macos-latest"]) {
include.push({
os: operatingSystem,
adapter: adapter,
"python-version": defaultPythonVersion,
});
}
}
console.log("matrix", { include });
return {
include,
};
};

View File

@@ -1,266 +0,0 @@
# **what?**
# This workflow runs all integration tests for supported OS
# and python versions and core adapters. If triggered by PR,
# the workflow will only run tests for adapters related
# to code changes. Use the `test all` and `test ${adapter}`
# label to run all or additional tests. Use `ok to test`
# label to mark PRs from forked repositories that are safe
# to run integration tests for. Requires secrets to run
# against different warehouses.
# **why?**
# This checks the functionality of dbt from a user's perspective
# and attempts to catch functional regressions.
# **when?**
# This workflow will run on every push to a protected branch
# and when manually triggered. It will also run for all PRs, including
# PRs from forks. The workflow will be skipped until there is a label
# to mark the PR as safe to run.
name: Adapter Integration Tests
on:
# pushes to release branches
push:
branches:
- "main"
- "develop"
- "*.latest"
- "releases/*"
# all PRs, important to note that `pull_request_target` workflows
# will run in the context of the target branch of a PR
pull_request_target:
# manual tigger
workflow_dispatch:
# explicitly turn off permissions for `GITHUB_TOKEN`
permissions: read-all
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
cancel-in-progress: true
# sets default shell to bash, for all operating systems
defaults:
run:
shell: bash
jobs:
# generate test metadata about what files changed and the testing matrix to use
test-metadata:
# run if not a PR from a forked repository or has a label to mark as safe to test
if: >-
github.event_name != 'pull_request_target' ||
github.event.pull_request.head.repo.full_name == github.repository ||
contains(github.event.pull_request.labels.*.name, 'ok to test')
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.generate-matrix.outputs.result }}
steps:
- name: Check out the repository (non-PR)
if: github.event_name != 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Check out the repository (PR)
if: github.event_name == 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
ref: ${{ github.event.pull_request.head.sha }}
- name: Check if relevant files changed
# https://github.com/marketplace/actions/paths-changes-filter
# For each filter, it sets output variable named by the filter to the text:
# 'true' - if any of changed files matches any of filter rules
# 'false' - if none of changed files matches any of filter rules
# also, returns:
# `changes` - JSON array with names of all filters matching any of the changed files
uses: dorny/paths-filter@v2
id: get-changes
with:
token: ${{ secrets.GITHUB_TOKEN }}
filters: |
postgres:
- 'core/**'
- 'plugins/postgres/**'
- 'dev-requirements.txt'
snowflake:
- 'core/**'
- 'plugins/snowflake/**'
bigquery:
- 'core/**'
- 'plugins/bigquery/**'
redshift:
- 'core/**'
- 'plugins/redshift/**'
- 'plugins/postgres/**'
- name: Generate integration test matrix
id: generate-matrix
uses: actions/github-script@v4
env:
CHANGES: ${{ steps.get-changes.outputs.changes }}
with:
script: |
const script = require('./.github/scripts/integration-test-matrix.js')
const matrix = script({ context })
console.log(matrix)
return matrix
test:
name: ${{ matrix.adapter }} / python ${{ matrix.python-version }} / ${{ matrix.os }}
# run if not a PR from a forked repository or has a label to mark as safe to test
# also checks that the matrix generated is not empty
if: >-
needs.test-metadata.outputs.matrix &&
fromJSON( needs.test-metadata.outputs.matrix ).include[0] &&
(
github.event_name != 'pull_request_target' ||
github.event.pull_request.head.repo.full_name == github.repository ||
contains(github.event.pull_request.labels.*.name, 'ok to test')
)
runs-on: ${{ matrix.os }}
needs: test-metadata
strategy:
fail-fast: false
matrix: ${{ fromJSON(needs.test-metadata.outputs.matrix) }}
env:
TOXENV: integration-${{ matrix.adapter }}
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
DBT_INVOCATION_ENV: github-actions
steps:
- name: Check out the repository
if: github.event_name != 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
# explicity checkout the branch for the PR,
# this is necessary for the `pull_request_target` event
- name: Check out the repository (PR)
if: github.event_name == 'pull_request_target'
uses: actions/checkout@v2
with:
persist-credentials: false
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Set up postgres (linux)
if: |
matrix.adapter == 'postgres' &&
runner.os == 'Linux'
uses: ./.github/actions/setup-postgres-linux
- name: Set up postgres (macos)
if: |
matrix.adapter == 'postgres' &&
runner.os == 'macOS'
uses: ./.github/actions/setup-postgres-macos
- name: Set up postgres (windows)
if: |
matrix.adapter == 'postgres' &&
runner.os == 'Windows'
uses: ./.github/actions/setup-postgres-windows
- name: Install python dependencies
run: |
pip install --upgrade pip
pip install tox
pip --version
tox --version
- name: Run tox (postgres)
if: matrix.adapter == 'postgres'
run: tox
- name: Run tox (redshift)
if: matrix.adapter == 'redshift'
env:
REDSHIFT_TEST_DBNAME: ${{ secrets.REDSHIFT_TEST_DBNAME }}
REDSHIFT_TEST_PASS: ${{ secrets.REDSHIFT_TEST_PASS }}
REDSHIFT_TEST_USER: ${{ secrets.REDSHIFT_TEST_USER }}
REDSHIFT_TEST_PORT: ${{ secrets.REDSHIFT_TEST_PORT }}
REDSHIFT_TEST_HOST: ${{ secrets.REDSHIFT_TEST_HOST }}
run: tox
- name: Run tox (snowflake)
if: matrix.adapter == 'snowflake'
env:
SNOWFLAKE_TEST_ACCOUNT: ${{ secrets.SNOWFLAKE_TEST_ACCOUNT }}
SNOWFLAKE_TEST_PASSWORD: ${{ secrets.SNOWFLAKE_TEST_PASSWORD }}
SNOWFLAKE_TEST_USER: ${{ secrets.SNOWFLAKE_TEST_USER }}
SNOWFLAKE_TEST_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_WAREHOUSE }}
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: ${{ secrets.SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN }}
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_ID }}
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET }}
SNOWFLAKE_TEST_ALT_DATABASE: ${{ secrets.SNOWFLAKE_TEST_ALT_DATABASE }}
SNOWFLAKE_TEST_ALT_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_ALT_WAREHOUSE }}
SNOWFLAKE_TEST_DATABASE: ${{ secrets.SNOWFLAKE_TEST_DATABASE }}
SNOWFLAKE_TEST_QUOTED_DATABASE: ${{ secrets.SNOWFLAKE_TEST_QUOTED_DATABASE }}
SNOWFLAKE_TEST_ROLE: ${{ secrets.SNOWFLAKE_TEST_ROLE }}
run: tox
- name: Run tox (bigquery)
if: matrix.adapter == 'bigquery'
env:
BIGQUERY_TEST_SERVICE_ACCOUNT_JSON: ${{ secrets.BIGQUERY_TEST_SERVICE_ACCOUNT_JSON }}
BIGQUERY_TEST_ALT_DATABASE: ${{ secrets.BIGQUERY_TEST_ALT_DATABASE }}
run: tox
- uses: actions/upload-artifact@v2
if: always()
with:
name: logs
path: ./logs
- name: Get current date
if: always()
id: date
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
- uses: actions/upload-artifact@v2
if: always()
with:
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.adapter }}-${{ steps.date.outputs.date }}.csv
path: integration_results.csv
require-label-comment:
runs-on: ubuntu-latest
needs: test
permissions:
pull-requests: write
steps:
- name: Needs permission PR comment
if: >-
needs.test.result == 'skipped' &&
github.event_name == 'pull_request_target' &&
github.event.pull_request.head.repo.full_name != github.repository
uses: unsplash/comment-on-pr@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
msg: |
"You do not have permissions to run integration tests, @dbt-labs/core "\
"needs to label this PR with `ok to test` in order to run integration tests!"
check_for_duplicate_msg: true

View File

@@ -1,206 +0,0 @@
# **what?**
# Runs code quality checks, unit tests, and verifies python build on
# all code commited to the repository. This workflow should not
# require any secrets since it runs for PRs from forked repos.
# By default, secrets are not passed to workflows running from
# a forked repo.
# **why?**
# Ensure code for dbt meets a certain quality standard.
# **when?**
# This will run for all PRs, when code is pushed to a release
# branch, and when manually triggered.
name: Tests and Code Checks
on:
push:
branches:
- "main"
- "develop"
- "*.latest"
- "releases/*"
pull_request:
workflow_dispatch:
permissions: read-all
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
cancel-in-progress: true
defaults:
run:
shell: bash
jobs:
code-quality:
name: ${{ matrix.toxenv }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
toxenv: [flake8, mypy]
env:
TOXENV: ${{ matrix.toxenv }}
PYTEST_ADDOPTS: "-v --color=yes"
steps:
- name: Check out the repository
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v2
- name: Install python dependencies
run: |
pip install --upgrade pip
pip install tox
pip --version
tox --version
- name: Run tox
run: tox
unit:
name: unit test / python ${{ matrix.python-version }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: [3.6, 3.7, 3.8] # TODO: support unit testing for python 3.9 (https://github.com/dbt-labs/dbt/issues/3689)
env:
TOXENV: "unit"
PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv"
steps:
- name: Check out the repository
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install python dependencies
run: |
pip install --upgrade pip
pip install tox
pip --version
tox --version
- name: Run tox
run: tox
- name: Get current date
if: always()
id: date
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
- uses: actions/upload-artifact@v2
if: always()
with:
name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv
path: unit_results.csv
build:
name: build packages
runs-on: ubuntu-latest
steps:
- name: Check out the repository
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install python dependencies
run: |
pip install --upgrade pip
pip install --upgrade setuptools wheel twine check-wheel-contents
pip --version
- name: Build distributions
run: ./scripts/build-dist.sh
- name: Show distributions
run: ls -lh dist/
- name: Check distribution descriptions
run: |
twine check dist/*
- name: Check wheel contents
run: |
check-wheel-contents dist/*.whl --ignore W007,W008
- uses: actions/upload-artifact@v2
with:
name: dist
path: dist/
test-build:
name: verify packages / python ${{ matrix.python-version }} / ${{ matrix.os }}
needs: build
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: [3.6, 3.7, 3.8, 3.9]
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install python dependencies
run: |
pip install --upgrade pip
pip install --upgrade wheel
pip --version
- uses: actions/download-artifact@v2
with:
name: dist
path: dist/
- name: Show distributions
run: ls -lh dist/
- name: Install wheel distributions
run: |
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check wheel distributions
run: |
dbt --version
- name: Install source distributions
run: |
find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
- name: Check source distributions
run: |
dbt --version

View File

@@ -1,174 +0,0 @@
name: Performance Regression Tests
# Schedule triggers
on:
# runs twice a day at 10:05am and 10:05pm
schedule:
- cron: "5 10,22 * * *"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
# checks fmt of runner code
# purposefully not a dependency of any other job
# will block merging, but not prevent developing
fmt:
name: Cargo fmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --manifest-path performance/runner/Cargo.toml --all -- --check
# runs any tests associated with the runner
# these tests make sure the runner logic is correct
test-runner:
name: Test Runner
runs-on: ubuntu-latest
env:
# turns errors into warnings
RUSTFLAGS: "-D warnings"
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: test
args: --manifest-path performance/runner/Cargo.toml
# build an optimized binary to be used as the runner in later steps
build-runner:
needs: [test-runner]
name: Build Runner
runs-on: ubuntu-latest
env:
RUSTFLAGS: "-D warnings"
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- uses: actions-rs/cargo@v1
with:
command: build
args: --release --manifest-path performance/runner/Cargo.toml
- uses: actions/upload-artifact@v2
with:
name: runner
path: performance/runner/target/release/runner
# run the performance measurements on the current or default branch
measure-dev:
needs: [build-runner]
name: Measure Dev Branch
runs-on: ubuntu-latest
steps:
- name: checkout dev
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2.2.2
with:
python-version: "3.8"
- name: install dbt
run: pip install -r dev-requirements.txt -r editable-requirements.txt
- name: install hyperfine
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
- uses: actions/download-artifact@v2
with:
name: runner
- name: change permissions
run: chmod +x ./runner
- name: run
run: ./runner measure -b dev -p ${{ github.workspace }}/performance/projects/
- uses: actions/upload-artifact@v2
with:
name: dev-results
path: performance/results/
# run the performance measurements on the release branch which we use
# as a performance baseline. This part takes by far the longest, so
# we do everything we can first so the job fails fast.
# -----
# we need to checkout dbt twice in this job: once for the baseline dbt
# version, and once to get the latest regression testing projects,
# metrics, and runner code from the develop or current branch so that
# the calculations match for both versions of dbt we are comparing.
measure-baseline:
needs: [build-runner]
name: Measure Baseline Branch
runs-on: ubuntu-latest
steps:
- name: checkout latest
uses: actions/checkout@v2
with:
ref: "0.20.latest"
- name: Setup Python
uses: actions/setup-python@v2.2.2
with:
python-version: "3.8"
- name: move repo up a level
run: mkdir ${{ github.workspace }}/../baseline/ && cp -r ${{ github.workspace }} ${{ github.workspace }}/../baseline
- name: "[debug] ls new dbt location"
run: ls ${{ github.workspace }}/../baseline/dbt/
# installation creates egg-links so we have to preserve source
- name: install dbt from new location
run: cd ${{ github.workspace }}/../baseline/dbt/ && pip install -r dev-requirements.txt -r editable-requirements.txt
# checkout the current branch to get all the target projects
# this deletes the old checked out code which is why we had to copy before
- name: checkout dev
uses: actions/checkout@v2
- name: install hyperfine
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
- uses: actions/download-artifact@v2
with:
name: runner
- name: change permissions
run: chmod +x ./runner
- name: run runner
run: ./runner measure -b baseline -p ${{ github.workspace }}/performance/projects/
- uses: actions/upload-artifact@v2
with:
name: baseline-results
path: performance/results/
# detect regressions on the output generated from measuring
# the two branches. Exits with non-zero code if a regression is detected.
calculate-regressions:
needs: [measure-dev, measure-baseline]
name: Compare Results
runs-on: ubuntu-latest
steps:
- uses: actions/download-artifact@v2
with:
name: dev-results
- uses: actions/download-artifact@v2
with:
name: baseline-results
- name: "[debug] ls result files"
run: ls
- uses: actions/download-artifact@v2
with:
name: runner
- name: change permissions
run: chmod +x ./runner
- name: run calculation
run: ./runner calculate -r ./
# always attempt to upload the results even if there were regressions found
- uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: final-calculations
path: ./final_calculations.json

1
.gitignore vendored
View File

@@ -85,7 +85,6 @@ target/
# pycharm
.idea/
venv/
# AWS credentials
.aws/

View File

@@ -1,49 +0,0 @@
The core function of dbt is SQL compilation and execution. Users create projects of dbt resources (models, tests, seeds, snapshots, ...), defined in SQL and YAML files, and they invoke dbt to create, update, or query associated views and tables. Today, dbt makes heavy use of Jinja2 to enable the templating of SQL, and to construct a DAG (Directed Acyclic Graph) from all of the resources in a project. Users can also extend their projects by installing resources (including Jinja macros) from other projects, called "packages."
## dbt-core
Most of the python code in the repository is within the `core/dbt` directory. Currently the main subdirectories are:
- [`adapters`](core/dbt/adapters): Define base classes for behavior that is likely to differ across databases
- [`clients`](core/dbt/clients): Interface with dependencies (agate, jinja) or across operating systems
- [`config`](core/dbt/config): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
- [`context`](core/dbt/context): Build and expose dbt-specific Jinja functionality
- [`contracts`](core/dbt/contracts): Define Python objects (dataclasses) that dbt expects to create and validate
- [`deps`](core/dbt/deps): Package installation and dependency resolution
- [`graph`](core/dbt/graph): Produce a `networkx` DAG of project resources, and selecting those resources given user-supplied criteria
- [`include`](core/dbt/include): The dbt "global project," which defines default implementations of Jinja2 macros
- [`parser`](core/dbt/parser): Read project files, validate, construct python objects
- [`rpc`](core/dbt/rpc): Provide remote procedure call server for invoking dbt, following JSON-RPC 2.0 spec
- [`task`](core/dbt/task): Set forth the actions that dbt can perform when invoked
### Invoking dbt
There are two supported ways of invoking dbt: from the command line and using an RPC server.
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task/rpc should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
core/dbt/include/index.html
This is the docs website code. It comes from the dbt-docs repository, and is generated when a release is packaged.
## Adapters
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. The four core adapters that are in the main repository, contained within the [`plugins`](plugins) subdirectory, are: Postgres Redshift, Snowflake and BigQuery. Other warehouses use adapter plugins defined in separate repositories (e.g. [dbt-spark](https://github.com/dbt-labs/dbt-spark), [dbt-presto](https://github.com/dbt-labs/dbt-presto)).
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.
Each adapter plugin is a standalone python package that includes:
- `dbt/include/[name]`: A "sub-global" dbt project, of YAML and SQL files, that reimplements Jinja macros to use the adapter's supported SQL syntax
- `dbt/adapters/[name]`: Python modules that inherit, and optionally reimplement, the base adapter classes defined in dbt-core
- `setup.py`
The Postgres adapter code is the most central, and many of its implementations are used as the default defined in the dbt-core global project. The greater the distance of a data technology from Postgres, the more its adapter plugin may need to reimplement.
## Testing dbt
The [`test/`](test/) subdirectory includes unit and integration tests that run as continuous integration checks against open pull requests. Unit tests check mock inputs and outputs of specific python functions. Integration tests perform end-to-end dbt invocations against real adapters (Postgres, Redshift, Snowflake, BigQuery) and assert that the results match expectations. See [the contributing guide](CONTRIBUTING.md) for a step-by-step walkthrough of setting up a local development and testing environment.
## Everything else
- [docker](docker/): All dbt versions are published as Docker images on DockerHub. This subfolder contains the `Dockerfile` (constant) and `requirements.txt` (one for each version).
- [etc](etc/): Images for README
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, not are they rigorously tested—they're just handy tools for the dbt maintainers :)

File diff suppressed because it is too large Load Diff

View File

@@ -1,117 +1,112 @@
# Contributing to `dbt`
# Contributing to dbt
1. [About this document](#about-this-document)
2. [Proposing a change](#proposing-a-change)
3. [Getting the code](#getting-the-code)
4. [Setting up an environment](#setting-up-an-environment)
5. [Running `dbt` in development](#running-dbt-in-development)
5. [Running dbt in development](#running-dbt-in-development)
6. [Testing](#testing)
7. [Submitting a Pull Request](#submitting-a-pull-request)
## About this document
This document is a guide intended for folks interested in contributing to `dbt`. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using `dbt`, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
This document is a guide intended for folks interested in contributing to dbt. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using dbt, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
If you're new to python development or contributing to open-source software, we encourage you to read this document from start to finish. If you get stuck, drop us a line in the `#dbt-core-development` channel on [slack](https://community.getdbt.com).
If you're new to python development or contributing to open-source software, we encourage you to read this document from start to finish. If you get stuck, drop us a line in the #development channel on [slack](community.getdbt.com).
### Signing the CLA
Please note that all contributors to `dbt` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the `dbt` codebase. If you are unable to sign the CLA, then the `dbt` maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
Please note that all contributors to dbt must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into the dbt codebase. If you are unable to sign the CLA, then the dbt maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
## Proposing a change
`dbt` is Apache 2.0-licensed open source software. `dbt` is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
dbt is Apache 2.0-licensed open source software. dbt is what it is today because community members like you have opened issues, provided feedback, and contributed to the knowledge loop for the entire communtiy. Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
### Defining the problem
If you have an idea for a new feature or if you've discovered a bug in `dbt`, the first step is to open an issue. Please check the list of [open issues](https://github.com/dbt-labs/dbt/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The `dbt` maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
If you have an idea for a new feature or if you've discovered a bug in dbt, the first step is to open an issue. Please check the list of [open issues](https://github.com/fishtown-analytics/dbt/issues) before creating a new one. If you find a relevant issue, please add a comment to the open issue instead of creating a new one. There are hundreds of open issues in this repository and it can be hard to know where to look for a relevant open issue. **The dbt maintainers are always happy to point contributors in the right direction**, so please err on the side of documenting your idea in a new issue if you are unsure where a problem statement belongs.
> **Note:** All community-contributed Pull Requests _must_ be associated with an open issue. If you submit a Pull Request that does not pertain to an open issue, you will be asked to create an issue describing the problem before the Pull Request can be reviewed.
**Note:** All community-contributed Pull Requests _must_ be associated with an open issue. If you submit a Pull Request that does not pertain to an open issue, you will be asked to create an issue describing the problem before the Pull Request can be reviewed.
### Discussing the idea
After you open an issue, a `dbt` maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The `dbt` maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
After you open an issue, a dbt maintainer will follow up by commenting on your issue (usually within 1-3 days) to explore your idea further and advise on how to implement the suggested changes. In many cases, community members will chime in with their own thoughts on the problem statement. If you as the issue creator are interested in submitting a Pull Request to address the issue, you should indicate this in the body of the issue. The dbt maintainers are _always_ happy to help contributors with the implementation of fixes and features, so please also indicate if there's anything you're unsure about or could use guidance around in the issue.
### Submitting a change
If an issue is appropriately well scoped and describes a beneficial change to the `dbt` codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
If an issue is appropriately well scoped and describes a beneficial change to the dbt codebase, then anyone may submit a Pull Request to implement the functionality described in the issue. See the sections below on how to do this.
The `dbt` maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/dbt-labs/dbt/contribute) page.
The dbt maintainers will add a `good first issue` label if an issue is suitable for a first-time contributor. This label often means that the required code change is small, limited to one database adapter, or a net-new addition that does not impact existing functionality. You can see the list of currently open issues on the [Contribute](https://github.com/fishtown-analytics/dbt/contribute) page.
Here's a good workflow:
- Comment on the open issue, expressing your interest in contributing the required code change
- Outline your planned implementation. If you want help getting started, ask!
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the `dbt` maintainers will work with you to review your code.
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding `dbt`'s [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
- Follow the steps outlined below to develop locally. Once you have opened a PR, one of the dbt maintainers will work with you to review your code.
- Add a test! Tests are crucial for both fixes and new features alike. We want to make sure that code works as intended, and that it avoids any bugs previously encountered. Currently, the best resource for understanding dbt's [unit](test/unit) and [integration](test/integration) tests is the tests themselves. One of the maintainers can help by pointing out relevant examples.
In some cases, the right resolution to an open issue might be tangential to the `dbt` codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the `dbt` maintainers are unwilling or unable to incorporate into the `dbt` codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the `dbt` repository, the issue will be tagged with the `wontfix` label (see below) and closed.
In some cases, the right resolution to an open issue might be tangential to the dbt codebase. The right path forward might be a documentation update or a change that can be made in user-space. In other cases, the issue might describe functionality that the dbt maintainers are unwilling or unable to incorporate into the dbt codebase. When it is determined that an open issue describes functionality that will not translate to a code change in the dbt repository, the issue will be tagged with the `wontfix` label (see below) and closed.
### Using issue labels
The `dbt` maintainers use labels to categorize open issues. Some labels indicate the databases impacted by the issue, while others describe the domain in the `dbt` codebase germane to the discussion. While most of these labels are self-explanatory (eg. `snowflake` or `bigquery`), there are others that are worth describing.
The dbt maintainers use labels to categorize open issues. Some labels indicate the databases impacted by the issue, while others describe the domain in the dbt codebase germane to the discussion. While most of these labels are self-explanatory (eg. `snowflake` or `bigquery`), there are others that are worth describing.
| tag | description |
| --- | ----------- |
| [triage](https://github.com/dbt-labs/dbt/labels/triage) | This is a new issue which has not yet been reviewed by a `dbt` maintainer. This label is removed when a maintainer reviews and responds to the issue. |
| [bug](https://github.com/dbt-labs/dbt/labels/bug) | This issue represents a defect or regression in `dbt` |
| [enhancement](https://github.com/dbt-labs/dbt/labels/enhancement) | This issue represents net-new functionality in `dbt` |
| [good first issue](https://github.com/dbt-labs/dbt/labels/good%20first%20issue) | This issue does not require deep knowledge of the `dbt` codebase to implement. This issue is appropriate for a first-time contributor. |
| [help wanted](https://github.com/dbt-labs/dbt/labels/help%20wanted) / [discussion](https://github.com/dbt-labs/dbt/labels/discussion) | Conversation around this issue in ongoing, and there isn't yet a clear path forward. Input from community members is most welcome. |
| [duplicate](https://github.com/dbt-labs/dbt/issues/duplicate) | This issue is functionally identical to another open issue. The `dbt` maintainers will close this issue and encourage community members to focus conversation on the other one. |
| [snoozed](https://github.com/dbt-labs/dbt/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The `dbt` maintainers will revist these issues periodically and re-prioritize them accordingly. |
| [stale](https://github.com/dbt-labs/dbt/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by `dbt` maintainers, but they can be re-opened if the discussion is restarted. |
| [wontfix](https://github.com/dbt-labs/dbt/labels/wontfix) | This issue does not require a code change in the `dbt` repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
| [triage](https://github.com/fishtown-analytics/dbt/labels/triage) | This is a new issue which has not yet been reviewed by a dbt maintainer. This label is removed when a maintainer reviews and responds to the issue. |
| [bug](https://github.com/fishtown-analytics/dbt/labels/bug) | This issue represents a defect or regression in dbt |
| [enhancement](https://github.com/fishtown-analytics/dbt/labels/enhancement) | This issue represents net-new functionality in dbt |
| [good first issue](https://github.com/fishtown-analytics/dbt/labels/good%20first%20issue) | This issue does not require deep knowledge of the dbt codebase to implement. This issue is appropriate for a first-time contributor. |
| [help wanted](https://github.com/fishtown-analytics/dbt/labels/help%20wanted) / [discussion](https://github.com/fishtown-analytics/dbt/labels/discussion) | Conversation around this issue in ongoing, and there isn't yet a clear path forward. Input from community members is most welcome. |
| [duplicate](https://github.com/fishtown-analytics/dbt/issues/duplicate) | This issue is functionally identical to another open issue. The dbt maintainers will close this issue and encourage community members to focus conversation on the other one. |
| [snoozed](https://github.com/fishtown-analytics/dbt/labels/snoozed) | This issue describes a good idea, but one which will probably not be addressed in a six-month time horizon. The dbt maintainers will revist these issues periodically and re-prioritize them accordingly. |
| [stale](https://github.com/fishtown-analytics/dbt/labels/stale) | This is an old issue which has not recently been updated. Stale issues will periodically be closed by dbt maintainers, but they can be re-opened if the discussion is restarted. |
| [wontfix](https://github.com/fishtown-analytics/dbt/labels/wontfix) | This issue does not require a code change in the dbt repository, or the maintainers are unwilling/unable to merge a Pull Request which implements the behavior described in the issue. |
#### Branching Strategy
`dbt` has three types of branches:
- **Trunks** are where active development of the next release takes place. There is one trunk named `develop` at the time of writing this, and will be the default branch of the repository.
- **Release Branches** track a specific, not yet complete release of `dbt`. Each minor version release has a corresponding release branch. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of `dbt`.
- **Feature Branches** track individual features and fixes. On completion they should be merged into the trunk brnach or a specific release branch.
## Getting the code
### Installing git
You will need `git` in order to download and modify the `dbt` source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
You will need `git` in order to download and modify the dbt source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).
### External contributors
If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt` by forking the `dbt` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
If you are not a member of the `fishtown-analytics` GitHub organization, you can contribute to dbt by forking the dbt repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
1. fork the `dbt` repository
1. fork the dbt repository
2. clone your fork locally
3. check out a new branch for your proposed changes
4. push changes to your fork
5. open a pull request against `dbt-labs/dbt` from your forked repository
5. open a pull request against `fishtown-analytics/dbt` from your forked repository
### Core contributors
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt` repo. Rather than forking `dbt` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
If you are a member of the `fishtown-analytics` GitHub organization, you will have push access to the dbt repo. Rather than
forking dbt to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
## Setting up an environment
There are some tools that will be helpful to you in developing locally. While this is the list relevant for `dbt` development, many of these tools are used commonly across open-source python projects.
There are some tools that will be helpful to you in developing locally. While this is the list relevant for dbt development, many of these tools are used commonly across open-source python projects.
### Tools
A short list of tools used in `dbt` testing that will be helpful to your understanding:
A short list of tools used in dbt testing that will be helpful to your understanding:
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.6, Python 3.7, Python 3.8, and Python 3.9
- [`pytest`](https://docs.pytest.org/en/latest/) to discover/run tests
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) - but don't worry too much, nobody _really_ understands how make works and our Makefile is super simple
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
- [`mypy`](https://mypy.readthedocs.io/en/stable/) for static type checking
- [virtualenv](https://virtualenv.pypa.io/en/stable/) to manage dependencies
- [tox](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions
- [pytest](https://docs.pytest.org/en/latest/) to discover/run tests
- [make](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) - but don't worry too much, nobody _really_ understands how make works and our Makefile is super simple
- [flake8](https://gitlab.com/pycqa/flake8) for code linting
- [CircleCI](https://circleci.com/product/) and [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/)
A deep understanding of these tools in not required to effectively contribute to `dbt`, but we recommend checking out the attached documentation if you're interested in learning more about them.
A deep understanding of these tools in not required to effectively contribute to dbt, but we recommend checking out the attached documentation if you're interested in learning more about them.
#### virtual environments
We strongly recommend using virtual environments when developing code in `dbt`. We recommend creating this virtualenv
in the root of the `dbt` repository. To create a new virtualenv, run:
```sh
We strongly recommend using virtual environments when developing code in dbt. We recommend creating this virtualenv
in the root of the dbt repository. To create a new virtualenv, run:
```
python3 -m venv env
source env/bin/activate
```
@@ -120,32 +115,30 @@ This will create and activate a new Python virtual environment.
#### docker and docker-compose
Docker and docker-compose are both used in testing. Specific instructions for you OS can be found [here](https://docs.docker.com/get-docker/).
Docker and docker-compose are both used in testing. For macOS, the easiest thing to do is to [download docker for mac](https://store.docker.com/editions/community/docker-ce-desktop-mac). You'll need to make an account. On Linux, you can use one of the packages [here](https://docs.docker.com/install/#server). We recommend installing from docker.com instead of from your package manager. On Linux you also have to install docker-compose separately, following [these instructions](https://docs.docker.com/compose/install/#install-compose).
#### postgres (optional)
For testing, and later in the examples in this document, you may want to have `psql` available so you can poke around in the database and see what happened. We recommend that you use [homebrew](https://brew.sh/) for that on macOS, and your package manager on Linux. You can install any version of the postgres client that you'd like. On macOS, with homebrew setup, you can run:
```sh
```
brew install postgresql
```
## Running `dbt` in development
## Running dbt in development
### Installation
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Next, install `dbt` (and its dependencies) with:
First make sure that you set up your `virtualenv` as described in section _Setting up an environment_. Next, install dbt (and its dependencies) with:
```sh
make dev
# or
pip install -r dev-requirements.txt -r editable-requirements.txt
```
pip install -r editable_requirements.txt
```
When `dbt` is installed this way, any changes you make to the `dbt` source code will be reflected immediately in your next `dbt` run.
When dbt is installed from source in this way, any changes you make to the dbt source code will be reflected immediately in your next `dbt` run.
### Running `dbt`
### Running dbt
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
@@ -153,79 +146,77 @@ Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as
## Testing
Getting the `dbt` integration tests set up in your local environment will be very helpful as you start to make changes to your local version of `dbt`. The section that follows outlines some helpful tips for setting up the test environment.
Getting the dbt integration tests set up in your local environment will be very helpful as you start to make changes to your local version of dbt. The section that follows outlines some helpful tips for setting up the test environment.
Since `dbt` works with a number of different databases, you will need to supply credentials for one or more of these databases in your test environment. Most organizations don't have access to each of a BigQuery, Redshift, Snowflake, and Postgres database, so it's likely that you will be unable to run every integration test locally. Fortunately, dbt Labs provides a CI environment with access to sandboxed Redshift, Snowflake, BigQuery, and Postgres databases. See the section on [_Submitting a Pull Request_](#submitting-a-pull-request) below for more information on this CI setup.
### Running tests via Docker
### Initial setup
dbt's unit and integration tests run in Docker. Because dbt works with a number of different databases, you will need to supply credentials for one or more of these databases in your test environment. Most organizations don't have access to each of a BigQuery, Redshift, Snowflake, and Postgres database, so it's likely that you will be unable to run every integration test locally. Fortunately, Fishtown Analytics provides a CI environment with access to sandboxed Redshift, Snowflake, BigQuery, and Postgres databases. See the section on [_Submitting a Pull Request_](#submitting-a-pull-request) below for more information on this CI setup.
We recommend starting with `dbt`'s Postgres tests. These tests cover most of the functionality in `dbt`, are the fastest to run, and are the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
```sh
make setup-db
### Specifying your test credentials
dbt uses test credentials specified in a `test.env` file in the root of the repository. This `test.env` file is git-ignored, but please be _extra_ careful to never check in credentials or other sensitive information when developing against dbt. To create your `test.env` file, copy the provided sample file, then supply your relevant credentials:
```
cp test.env.sample test.env
atom test.env # supply your credentials
```
We recommend starting with dbt's Postgres tests. These tests cover most of the functionality in dbt, are the fastest to run, and are the easiest to set up. dbt's test suite runs Postgres in a Docker container, so no setup should be required to run these tests.
If you additionally want to test Snowflake, Bigquery, or Redshift, locally you'll need to get credentials and add them to the `test.env` file. In general, it's most important to have successful unit and Postgres tests. Once you open a PR, dbt will automatically run integration tests for the other three core database adapters. Of course, if you are a BigQuery user, contributing a BigQuery-only feature, it's important to run BigQuery tests as well.
### Test commands
dbt's unit tests and Python linter can be run with:
```
make test-unit
```
To run the Postgres + Python 3.6 integration tests, you'll have to do one extra step of setting up the test database:
```
or, alternatively:
```sh
docker-compose up -d database
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
```
`dbt` uses test credentials specified in a `test.env` file in the root of the repository for non-Postgres databases. This `test.env` file is git-ignored, but please be _extra_ careful to never check in credentials or other sensitive information when developing against `dbt`. To create your `test.env` file, copy the provided sample file, then supply your relevant credentials. This step is only required to use non-Postgres databases.
To run a quick test for Python3 integration tests on Postgres, you can run:
```
cp test.env.sample test.env
$EDITOR test.env
make test-quick
```
> In general, it's most important to have successful unit and Postgres tests. Once you open a PR, `dbt` will automatically run integration tests for the other three core database adapters. Of course, if you are a BigQuery user, contributing a BigQuery-only feature, it's important to run BigQuery tests as well.
### Test commands
There are a few methods for running tests locally.
#### Makefile
There are multiple targets in the Makefile to run common test suites and code
checks, most notably:
```sh
# Runs unit tests with py38 and code checks in parallel.
make test
# Runs postgres integration tests with py38 in "fail fast" mode.
make integration
To run tests for a specific database, invoke `tox` directly with the required flags:
```
> These make targets assume you have a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) installed locally,
> unless you use choose a Docker container to run tests. Run `make help` for more info.
# Run Postgres py36 tests
docker-compose run test tox -e integration-postgres-py36 -- -x
Check out the other targets in the Makefile to see other commonly used test
suites.
# Run Snowflake py36 tests
docker-compose run test tox -e integration-snowflake-py36 -- -x
#### `tox`
# Run BigQuery py36 tests
docker-compose run test tox -e integration-bigquery-py36 -- -x
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run
tests. You can also run tests in parallel, for example, you can run unit tests
for Python 3.6, Python 3.7, Python 3.8, `flake8` checks, and `mypy` checks in
parallel with `tox -p`. Also, you can run unit tests for specific python versions
with `tox -e py36`. The configuration for these tests in located in `tox.ini`.
#### `pytest`
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. With a virtualenv
active and dev dependencies installed you can do things like:
```sh
# run specific postgres integration tests
python -m pytest -m profile_postgres test/integration/001_simple_copy_test
# run all unit tests in a file
python -m pytest test/unit/test_graph.py
# run a specific unit test
python -m pytest test/unit/test_graph.py::GraphTest::test__dependency_list
# Run Redshift py36 tests
docker-compose run test tox -e integration-redshift-py36 -- -x
```
> [Here](https://docs.pytest.org/en/reorganize-docs/new-docs/user/commandlineuseful.html)
> is a list of useful command-line options for `pytest` to use while developing.
To run a specific test by itself:
```
docker-compose run test tox -e explicit-py36 -- -s -x -m profile_{adapter} {path_to_test_file_or_folder}
```
E.g.
```
docker-compose run test tox -e explicit-py36 -- -s -x -m profile_snowflake test/integration/001_simple_copy_test
```
See the `Makefile` contents for more some other examples of ways to run `tox`.
## Submitting a Pull Request
dbt Labs provides a sandboxed Redshift, Snowflake, and BigQuery database for use in a CI environment. When pull requests are submitted to the `dbt-labs/dbt` repo, GitHub will trigger automated tests in CircleCI and Azure Pipelines.
Fishtown Analytics provides a sandboxed Redshift, Snowflake, and BigQuery database for use in a CI environment. When pull requests are submitted to the `fishtown-analytics/dbt` repo, GitHub will trigger automated tests in CircleCI and Azure Pipelines.
A `dbt` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
A dbt maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
Once all tests are passing and your PR has been approved, a `dbt` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
Once all tests are passing and your PR has been approved, a dbt maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:

View File

@@ -1,11 +1,8 @@
FROM ubuntu:20.04
FROM ubuntu:18.04
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
software-properties-common \
&& add-apt-repository ppa:git-core/ppa -y \
&& apt-get dist-upgrade -y \
&& apt-get install -y --no-install-recommends \
netcat \
@@ -49,7 +46,9 @@ RUN curl -LO https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_V
&& tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
&& rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
RUN pip3 install -U tox wheel six setuptools
RUN pip3 install -U "tox==3.14.4" wheel "six>=1.14.0,<1.15.0" "virtualenv==20.0.3" setuptools
# tox fails if the 'python' interpreter (python2) doesn't have `tox` installed
RUN pip install -U "tox==3.14.4" "six>=1.14.0,<1.15.0" "virtualenv==20.0.3" setuptools
# These args are passed in via docker-compose, which reads then from the .env file.
# On Linux, run `make .env` to create the .env file for the current user.

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2021 dbt Labs, Inc.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

101
Makefile
View File

@@ -1,81 +1,29 @@
.DEFAULT_GOAL:=help
.PHONY: install test test-unit test-integration
# Optional flag to run target in a docker container.
# (example `make test USE_DOCKER=true`)
ifeq ($(USE_DOCKER),true)
DOCKER_CMD := docker-compose run --rm test
endif
changed_tests := `git status --porcelain | grep '^\(M\| M\|A\| A\)' | awk '{ print $$2 }' | grep '\/test_[a-zA-Z_\-\.]\+.py'`
.PHONY: dev
dev: ## Installs dbt-* packages in develop mode along with development dependencies.
pip install -r dev-requirements.txt -r editable-requirements.txt
install:
pip install -e .
.PHONY: mypy
mypy: .env ## Runs mypy for static type checking.
$(DOCKER_CMD) tox -e mypy
test: .env
@echo "Full test run starting..."
@time docker-compose run --rm test tox
.PHONY: flake8
flake8: .env ## Runs flake8 to enforce style guide.
$(DOCKER_CMD) tox -e flake8
test-unit: .env
@echo "Unit test run starting..."
@time docker-compose run --rm test tox -e unit-py36,flake8
.PHONY: lint
lint: .env ## Runs all code checks in parallel.
$(DOCKER_CMD) tox -p -e flake8,mypy
test-integration: .env
@echo "Integration test run starting..."
@time docker-compose run --rm test tox -e integration-postgres-py36,integration-redshift-py36,integration-snowflake-py36,integration-bigquery-py36
.PHONY: unit
unit: .env ## Runs unit tests with py38.
$(DOCKER_CMD) tox -e py38
.PHONY: test
test: .env ## Runs unit tests with py38 and code checks in parallel.
$(DOCKER_CMD) tox -p -e py38,flake8,mypy
.PHONY: integration
integration: .env integration-postgres ## Alias for integration-postgres.
.PHONY: integration-fail-fast
integration-fail-fast: .env integration-postgres-fail-fast ## Alias for integration-postgres-fail-fast.
.PHONY: integration-postgres
integration-postgres: .env ## Runs postgres integration tests with py38.
$(DOCKER_CMD) tox -e py38-postgres -- -nauto
.PHONY: integration-postgres-fail-fast
integration-postgres-fail-fast: .env ## Runs postgres integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-postgres -- -x -nauto
.PHONY: integration-redshift
integration-redshift: .env ## Runs redshift integration tests with py38.
$(DOCKER_CMD) tox -e py38-redshift -- -nauto
.PHONY: integration-redshift-fail-fast
integration-redshift-fail-fast: .env ## Runs redshift integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-redshift -- -x -nauto
.PHONY: integration-snowflake
integration-snowflake: .env ## Runs snowflake integration tests with py38.
$(DOCKER_CMD) tox -e py38-snowflake -- -nauto
.PHONY: integration-snowflake-fail-fast
integration-snowflake-fail-fast: .env ## Runs snowflake integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-snowflake -- -x -nauto
.PHONY: integration-bigquery
integration-bigquery: .env ## Runs bigquery integration tests with py38.
$(DOCKER_CMD) tox -e py38-bigquery -- -nauto
.PHONY: integration-bigquery-fail-fast
integration-bigquery-fail-fast: .env ## Runs bigquery integration tests with py38 in "fail fast" mode.
$(DOCKER_CMD) tox -e py38-bigquery -- -x -nauto
.PHONY: setup-db
setup-db: ## Setup Postgres database with docker-compose for system testing.
docker-compose up -d database
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
test-quick: .env
@echo "Integration test run starting..."
@time docker-compose run --rm test tox -e integration-postgres-py36 -- -x
# This rule creates a file named .env that is used by docker-compose for passing
# the USER_ID and GROUP_ID arguments to the Docker image.
.env: ## Setup step for using using docker-compose with make target.
.env:
@touch .env
ifneq ($(OS),Windows_NT)
ifneq ($(shell uname -s), Darwin)
@@ -83,9 +31,9 @@ ifneq ($(shell uname -s), Darwin)
@echo GROUP_ID=$(shell id -g) >> .env
endif
endif
@time docker-compose build
.PHONY: clean
clean: ## Resets development environment.
clean:
rm -f .coverage
rm -rf .eggs/
rm -f .env
@@ -99,14 +47,3 @@ clean: ## Resets development environment.
rm -rf target/
find . -type f -name '*.pyc' -delete
find . -type d -name '__pycache__' -depth -delete
.PHONY: help
help: ## Show this help message.
@echo 'usage: make [target] [USE_DOCKER=true]'
@echo
@echo 'targets:'
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
@echo
@echo 'options:'
@echo 'use USE_DOCKER=true to run target in a docker container'

View File

@@ -1,18 +1,28 @@
<p align="center">
<img src="https://raw.githubusercontent.com/dbt-labs/dbt/ec7dee39f793aa4f7dd3dae37282cc87664813e4/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
<img src="/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
</p>
<p align="center">
<a href="https://github.com/dbt-labs/dbt/actions/workflows/main.yml">
<img src="https://github.com/dbt-labs/dbt/actions/workflows/main.yml/badge.svg?event=push" alt="Unit Tests Badge"/>
<a href="https://codeclimate.com/github/fishtown-analytics/dbt">
<img src="https://codeclimate.com/github/fishtown-analytics/dbt/badges/gpa.svg" alt="Code Climate"/>
</a>
<a href="https://github.com/dbt-labs/dbt/actions/workflows/integration.yml">
<img src="https://github.com/dbt-labs/dbt/actions/workflows/integration.yml/badge.svg?event=push" alt="Integration Tests Badge"/>
<a href="https://circleci.com/gh/fishtown-analytics/dbt/tree/master">
<img src="https://circleci.com/gh/fishtown-analytics/dbt/tree/master.svg?style=svg" alt="CircleCI" />
</a>
<a href="https://ci.appveyor.com/project/DrewBanin/dbt/branch/development">
<img src="https://ci.appveyor.com/api/projects/status/v01rwd3q91jnwp9m/branch/development?svg=true" alt="AppVeyor" />
</a>
<a href="https://community.getdbt.com">
<img src="https://community.getdbt.com/badge.svg" alt="Slack" />
</a>
</p>
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
**[dbt](https://www.getdbt.com/)** (data build tool) enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
![architecture](https://raw.githubusercontent.com/dbt-labs/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-arch.png)
dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
![dbt architecture](https://github.com/fishtown-analytics/dbt/blob/master/etc/dbt-arch.png?raw=true)
dbt can be used to [aggregate pageviews into sessions](https://github.com/fishtown-analytics/snowplow), calculate [ad spend ROI](https://github.com/fishtown-analytics/facebook-ads), or report on [email campaign performance](https://github.com/fishtown-analytics/mailchimp).
## Understanding dbt
@@ -20,22 +30,28 @@ Analysts using dbt can transform their data by simply writing select statements,
These select statements, or "models", form a dbt project. Models frequently build on top of one another dbt makes it easy to [manage relationships](https://docs.getdbt.com/docs/ref) between models, and [visualize these relationships](https://docs.getdbt.com/docs/documentation), as well as assure the quality of your transformations through [testing](https://docs.getdbt.com/docs/testing).
![dbt dag](https://raw.githubusercontent.com/dbt-labs/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-dag.png)
![dbt dag](https://github.com/fishtown-analytics/dbt/blob/master/etc/dbt-dag.png?raw=true)
## Getting started
- [Install dbt](https://docs.getdbt.com/docs/installation)
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
- [Install dbt](https://docs.getdbt.com/docs/installation)
- Read the [documentation](https://docs.getdbt.com/).
- Productionize your dbt project with [dbt Cloud](https://www.getdbt.com)
## Join the dbt Community
## Find out more
- Be part of the conversation in the [dbt Community Slack](http://community.getdbt.com/)
- Read more on the [dbt Community Discourse](https://discourse.getdbt.com)
- Check out the [Introduction to dbt](https://docs.getdbt.com/docs/introduction/).
- Read the [dbt Viewpoint](https://docs.getdbt.com/docs/about/viewpoint/).
## Join thousands of analysts in the dbt community
- Join the [chat](http://community.getdbt.com/) on Slack.
- Find community posts on [dbt Discourse](https://discourse.getdbt.com).
## Reporting bugs and contributing code
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt/issues/new)
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt/blob/HEAD/CONTRIBUTING.md)
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/fishtown-analytics/dbt/issues/new).
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](/CONTRIBUTING.md)
## Code of Conduct

92
RELEASE.md Normal file
View File

@@ -0,0 +1,92 @@
### Release Procedure :shipit:
#### Branching Strategy
dbt has three types of branches:
- **Trunks** track the latest release of a minor version of dbt. Historically, we used the `master` branch as the trunk. Each minor version release has a corresponding trunk. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of dbt.
- **Release Branches** track a specific, not yet complete release of dbt. These releases are codenamed since we don't always know what their semantic version will be. Example: `dev/lucretia-mott` became `0.11.1`.
- **Feature Branches** track individual features and fixes. On completion they should be merged into a release branch.
#### Git & PyPI
1. Update CHANGELOG.md with the most recent changes
2. If this is a release candidate, you want to create it off of your release branch. If it's an actual release, you must first merge to a master branch. Open a Pull Request in Github to merge it into the appropriate trunk (`X.X.latest`)
3. Bump the version using `bumpversion`:
- Dry run first by running `bumpversion --new-version <desired-version> <part>` and checking the diff. If it looks correct, clean up the chanages and move on:
- Alpha releases: `bumpversion --commit --no-tag --new-version 0.10.2a1 num`
- Patch releases: `bumpversion --commit --no-tag --new-version 0.10.2 patch`
- Minor releases: `bumpversion --commit --no-tag --new-version 0.11.0 minor`
- Major releases: `bumpversion --commit --no-tag --new-version 1.0.0 major`
4. (If this is a not a release candidate) Merge to `x.x.latest` and (optionally) `master`.
5. Update the default branch to the next dev release branch.
6. Build source distributions for all packages by running `./scripts/build-sdists.sh`. Note that this will clean out your `dist/` folder, so if you have important stuff in there, don't run it!!!
7. Deploy to pypi
- `twine upload dist/*`
8. Deploy to homebrew (see below)
9. Deploy to conda-forge (see below)
10. Git release notes (points to changelog)
11. Post to slack (point to changelog)
After releasing a new version, it's important to merge the changes back into the other outstanding release branches. This avoids merge conflicts moving forward.
In some cases, where the branches have diverged wildly, it's ok to skip this step. But this means that the changes you just released won't be included in future releases.
#### Homebrew Release Process
1. Clone the `homebrew-dbt` repository:
```
git clone git@github.com:fishtown-analytics/homebrew-dbt.git
```
2. For ALL releases (prereleases and version releases), copy the relevant formula. To copy from the latest version release of dbt, do:
```bash
cp Formula/dbt.rb Formula/dbt@{NEW-VERSION}.rb
```
To copy from a different version, simply copy the corresponding file.
3. Open the file, and edit the following:
- the name of the ruby class: this is important, homebrew won't function properly if the class name is wrong. Check historical versions to figure out the right name.
- under the `bottle` section, remove all of the hashes (lines starting with `sha256`)
4. Create a **Python 3.7** virtualenv, activate it, and then install two packages: `homebrew-pypi-poet`, and the version of dbt you are preparing. I use:
```
pyenv virtualenv 3.7.0 homebrew-dbt-{VERSION}
pyenv activate homebrew-dbt-{VERSION}
pip install dbt=={VERSION} homebrew-pypi-poet
```
homebrew-pypi-poet is a program that generates a valid homebrew formula for an installed pip package. You want to use it to generate a diff against the existing formula. Then you want to apply the diff for the dependency packages only -- e.g. it will tell you that `google-api-core` has been updated and that you need to use the latest version.
5. reinstall, test, and audit dbt. if the test or audit fails, fix the formula with step 1.
```bash
brew uninstall --force Formula/{YOUR-FILE}.rb
brew install Formula/{YOUR-FILE}.rb
brew test dbt
brew audit --strict dbt
```
6. Ask Connor to bottle the change (only his laptop can do it!)
#### Conda Forge Release Process
1. Clone the fork of `conda-forge/dbt-feedstock` [here](https://github.com/fishtown-analytics/dbt-feedstock)
```bash
git clone git@github.com:fishtown-analytics/dbt-feedstock.git
```
2. Update the version and sha256 in `recipe/meta.yml`. To calculate the sha256, run:
```bash
wget https://github.com/fishtown-analytics/dbt/archive/v{version}.tar.gz
openssl sha256 v{version}.tar.gz
```
3. Push the changes and create a PR against `conda-forge/dbt-feedstock`
4. Confirm that all automated conda-forge tests are passing

154
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,154 @@
# Python package
# Create and test a Python package on multiple Python versions.
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
trigger:
branches:
include:
- master
- dev/*
- pr/*
jobs:
- job: UnitTest
pool:
vmImage: 'vs2017-win2016'
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
architecture: 'x64'
- script: python -m pip install --upgrade pip && pip install tox
displayName: 'Install dependencies'
- script: python -m tox -e pywin-unit
displayName: Run unit tests
- job: PostgresIntegrationTest
pool:
vmImage: 'vs2017-win2016'
dependsOn: UnitTest
steps:
- pwsh: |
$serviceName = Get-Service -Name postgresql*
Set-Service -InputObject $serviceName -StartupType Automatic
Start-Service -InputObject $serviceName
& $env:PGBIN\createdb.exe -U postgres dbt
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE root WITH PASSWORD 'password';"
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE root WITH LOGIN;"
& $env:PGBIN\psql.exe -U postgres -c "GRANT CREATE, CONNECT ON DATABASE dbt TO root WITH GRANT OPTION;"
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE noaccess WITH PASSWORD 'password' NOSUPERUSER;"
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE noaccess WITH LOGIN;"
& $env:PGBIN\psql.exe -U postgres -c "GRANT CONNECT ON DATABASE dbt TO noaccess;"
displayName: Install postgresql and set up database
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
architecture: 'x64'
- script: python -m pip install --upgrade pip && pip install tox
displayName: 'Install dependencies'
- script: python -m tox -e pywin-postgres
displayName: Run integration tests
# These three are all similar except secure environment variables, which MUST be passed along to their tasks,
# but there's probably a better way to do this!
- job: SnowflakeIntegrationTest
pool:
vmImage: 'vs2017-win2016'
dependsOn: PostgresIntegrationTest
condition: succeeded()
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
architecture: 'x64'
- script: python -m pip install --upgrade pip && pip install tox
displayName: 'Install dependencies'
- script: python -m tox -e pywin-snowflake
env:
SNOWFLAKE_TEST_ACCOUNT: $(SNOWFLAKE_TEST_ACCOUNT)
SNOWFLAKE_TEST_PASSWORD: $(SNOWFLAKE_TEST_PASSWORD)
SNOWFLAKE_TEST_USER: $(SNOWFLAKE_TEST_USER)
SNOWFLAKE_TEST_WAREHOUSE: $(SNOWFLAKE_TEST_WAREHOUSE)
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: $(SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN)
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: $(SNOWFLAKE_TEST_OAUTH_CLIENT_ID)
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: $(SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET)
displayName: Run integration tests
- job: BigQueryIntegrationTest
pool:
vmImage: 'vs2017-win2016'
dependsOn: PostgresIntegrationTest
condition: succeeded()
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
architecture: 'x64'
- script: python -m pip install --upgrade pip && pip install tox
displayName: 'Install dependencies'
- script: python -m tox -e pywin-bigquery
env:
BIGQUERY_SERVICE_ACCOUNT_JSON: $(BIGQUERY_SERVICE_ACCOUNT_JSON)
displayName: Run integration tests
- job: RedshiftIntegrationTest
pool:
vmImage: 'vs2017-win2016'
dependsOn: PostgresIntegrationTest
condition: succeeded()
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
architecture: 'x64'
- script: python -m pip install --upgrade pip && pip install tox
displayName: 'Install dependencies'
- script: python -m tox -e pywin-redshift
env:
REDSHIFT_TEST_DBNAME: $(REDSHIFT_TEST_DBNAME)
REDSHIFT_TEST_PASS: $(REDSHIFT_TEST_PASS)
REDSHIFT_TEST_USER: $(REDSHIFT_TEST_USER)
REDSHIFT_TEST_PORT: $(REDSHIFT_TEST_PORT)
REDSHIFT_TEST_HOST: $(REDSHIFT_TEST_HOST)
displayName: Run integration tests
- job: BuildWheel
pool:
vmImage: 'vs2017-win2016'
dependsOn:
- UnitTest
- PostgresIntegrationTest
- RedshiftIntegrationTest
- SnowflakeIntegrationTest
- BigQueryIntegrationTest
condition: succeeded()
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.7'
architecture: 'x64'
- script: python -m pip install --upgrade pip setuptools && python -m pip install -r requirements.txt && python -m pip install -r dev_requirements.txt
displayName: Install dependencies
- task: ShellScript@2
inputs:
scriptPath: scripts/build-wheels.sh
- task: CopyFiles@2
inputs:
contents: 'dist\?(*.whl|*.tar.gz)'
TargetFolder: '$(Build.ArtifactStagingDirectory)'
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
artifactName: dists

73
converter.py Executable file
View File

@@ -0,0 +1,73 @@
#!/usr/bin/env python
import json
import yaml
import sys
import argparse
from datetime import datetime, timezone
import dbt.clients.registry as registry
def yaml_type(fname):
with open(fname) as f:
return yaml.load(f)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--project", type=yaml_type, default="dbt_project.yml")
parser.add_argument("--namespace", required=True)
return parser.parse_args()
def get_full_name(args):
return "{}/{}".format(args.namespace, args.project["name"])
def init_project_in_packages(args, packages):
full_name = get_full_name(args)
if full_name not in packages:
packages[full_name] = {
"name": args.project["name"],
"namespace": args.namespace,
"latest": args.project["version"],
"assets": {},
"versions": {},
}
return packages[full_name]
def add_version_to_package(args, project_json):
project_json["versions"][args.project["version"]] = {
"id": "{}/{}".format(get_full_name(args), args.project["version"]),
"name": args.project["name"],
"version": args.project["version"],
"description": "",
"published_at": datetime.now(timezone.utc).astimezone().isoformat(),
"packages": args.project.get("packages") or [],
"works_with": [],
"_source": {
"type": "github",
"url": "",
"readme": "",
},
"downloads": {
"tarball": "",
"format": "tgz",
"sha1": "",
},
}
def main():
args = parse_args()
packages = registry.packages()
project_json = init_project_in_packages(args, packages)
if args.project["version"] in project_json["versions"]:
raise Exception("Version {} already in packages JSON"
.format(args.project["version"]),
file=sys.stderr)
add_version_to_package(args, project_json)
print(json.dumps(packages, indent=2))
if __name__ == "__main__":
main()

View File

@@ -1 +1 @@
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore
recursive-include dbt/include *.py *.sql *.yml *.html *.md

View File

@@ -1,12 +1,14 @@
from dataclasses import dataclass
import re
from typing import Dict, ClassVar, Any, Optional
from hologram import JsonSchemaMixin
from dbt.exceptions import RuntimeException
from typing import Dict, ClassVar, Any, Optional
@dataclass
class Column:
class Column(JsonSchemaMixin):
TYPE_LABELS: ClassVar[Dict[str, str]] = {
'STRING': 'TEXT',
'TIMESTAMP': 'TIMESTAMP',

View File

@@ -28,9 +28,10 @@ from dbt.clients.jinja import MacroGenerator
from dbt.contracts.graph.compiled import (
CompileResultNode, CompiledSeedNode
)
from dbt.contracts.graph.manifest import Manifest, MacroManifest
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.parsed import ParsedSeedNode
from dbt.exceptions import warn_or_error
from dbt.node_types import NodeType
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.utils import filter_null_values, executor
@@ -159,7 +160,7 @@ class BaseAdapter(metaclass=AdapterMeta):
self.config = config
self.cache = RelationsCache()
self.connections = self.ConnectionManager(config)
self._macro_manifest_lazy: Optional[MacroManifest] = None
self._macro_manifest_lazy: Optional[Manifest] = None
###
# Methods that pass through to the connection manager
@@ -258,22 +259,22 @@ class BaseAdapter(metaclass=AdapterMeta):
return cls.ConnectionManager.TYPE
@property
def _macro_manifest(self) -> MacroManifest:
def _macro_manifest(self) -> Manifest:
if self._macro_manifest_lazy is None:
return self.load_macro_manifest()
return self._macro_manifest_lazy
def check_macro_manifest(self) -> Optional[MacroManifest]:
def check_macro_manifest(self) -> Optional[Manifest]:
"""Return the internal manifest (used for executing macros) if it's
been initialized, otherwise return None.
"""
return self._macro_manifest_lazy
def load_macro_manifest(self) -> MacroManifest:
def load_macro_manifest(self) -> Manifest:
if self._macro_manifest_lazy is None:
# avoid a circular import
from dbt.parser.manifest import ManifestLoader
manifest = ManifestLoader.load_macros(
from dbt.parser.manifest import load_macro_manifest
manifest = load_macro_manifest(
self.config, self.connections.set_query_header
)
self._macro_manifest_lazy = manifest
@@ -309,7 +310,8 @@ class BaseAdapter(metaclass=AdapterMeta):
self.Relation.create_from(self.config, node).without_identifier()
for node in manifest.nodes.values()
if (
node.is_relational and not node.is_ephemeral_model
node.resource_type in NodeType.executable() and
not node.is_ephemeral_model
)
}
@@ -511,7 +513,7 @@ class BaseAdapter(metaclass=AdapterMeta):
def get_columns_in_relation(
self, relation: BaseRelation
) -> List[BaseColumn]:
"""Get a list of the columns in the given Relation. """
"""Get a list of the columns in the given Relation."""
raise NotImplementedException(
'`get_columns_in_relation` is not implemented for this adapter!'
)

View File

@@ -21,8 +21,8 @@ Self = TypeVar('Self', bound='BaseRelation')
@dataclass(frozen=True, eq=False, repr=False)
class BaseRelation(FakeAPIObject, Hashable):
type: Optional[RelationType]
path: Path
type: Optional[RelationType] = None
quote_character: str = '"'
include_policy: Policy = Policy()
quote_policy: Policy = Policy()
@@ -45,7 +45,7 @@ class BaseRelation(FakeAPIObject, Hashable):
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_dict(omit_none=True) == other.to_dict(omit_none=True)
return self.to_dict() == other.to_dict()
@classmethod
def get_default_quote_policy(cls) -> Policy:
@@ -185,10 +185,10 @@ class BaseRelation(FakeAPIObject, Hashable):
def create_from_source(
cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
) -> Self:
source_quoting = source.quoting.to_dict(omit_none=True)
source_quoting = source.quoting.to_dict()
source_quoting.pop('column', None)
quote_policy = deep_merge(
cls.get_default_quote_policy().to_dict(omit_none=True),
cls.get_default_quote_policy().to_dict(),
source_quoting,
kwargs.get('quote_policy', {}),
)
@@ -433,14 +433,13 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
for schema in schemas:
yield information_schema_name, schema
def flatten(self, allow_multiple_databases: bool = False):
def flatten(self):
new = self.__class__()
# make sure we don't have multiple databases if allow_multiple_databases is set to False
if not allow_multiple_databases:
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
dbt.exceptions.raise_compiler_error(str(seen))
# make sure we don't have duplicates
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
dbt.exceptions.raise_compiler_error(str(seen))
for information_schema_name, schema in self.search():
path = {

View File

@@ -99,14 +99,7 @@ class SQLConnectionManager(BaseConnectionManager):
column_names: Iterable[str],
rows: Iterable[Any]
) -> List[Dict[str, Any]]:
unique_col_names = dict()
for idx in range(len(column_names)):
col_name = column_names[idx]
if col_name in unique_col_names:
unique_col_names[col_name] += 1
column_names[idx] = f'{col_name}_{unique_col_names[col_name]}'
else:
unique_col_names[column_names[idx]] = 1
return [dict(zip(column_names, row)) for row in rows]
@classmethod

View File

@@ -35,11 +35,7 @@ class ISODateTime(agate.data_types.DateTime):
)
def build_type_tester(
text_columns: Iterable[str],
string_null_values: Optional[Iterable[str]] = ('null', '')
) -> agate.TypeTester:
def build_type_tester(text_columns: Iterable[str]) -> agate.TypeTester:
types = [
agate.data_types.Number(null_values=('null', '')),
agate.data_types.Date(null_values=('null', ''),
@@ -50,10 +46,10 @@ def build_type_tester(
agate.data_types.Boolean(true_values=('true',),
false_values=('false',),
null_values=('null', '')),
agate.data_types.Text(null_values=string_null_values)
agate.data_types.Text(null_values=('null', ''))
]
force = {
k: agate.data_types.Text(null_values=string_null_values)
k: agate.data_types.Text(null_values=('null', ''))
for k in text_columns
}
return agate.TypeTester(force=force, types=types)
@@ -70,13 +66,7 @@ def table_from_rows(
if text_only_columns is None:
column_types = DEFAULT_TYPE_TESTER
else:
# If text_only_columns are present, prevent coercing empty string or
# literal 'null' strings to a None representation.
column_types = build_type_tester(
text_only_columns,
string_null_values=()
)
column_types = build_type_tester(text_only_columns)
return agate.Table(rows, column_names, column_types=column_types)
@@ -96,34 +86,19 @@ def table_from_data(data, column_names: Iterable[str]) -> agate.Table:
def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:
"""
Convert a list of dictionaries into an Agate table. This method does not
coerce string values into more specific types (eg. '005' will not be
coerced to '5'). Additionally, this method does not coerce values to
None (eg. '' or 'null' will retain their string literal representations).
"""
"Convert list of dictionaries into an Agate table"
rows = []
text_only_columns = set()
for _row in data:
row = []
for col_name in column_names:
value = _row[col_name]
for value in list(_row.values()):
if isinstance(value, (dict, list, tuple)):
# Represent container types as json strings
value = json.dumps(value, cls=dbt.utils.JSONEncoder)
text_only_columns.add(col_name)
elif isinstance(value, str):
text_only_columns.add(col_name)
row.append(value)
row.append(json.dumps(value, cls=dbt.utils.JSONEncoder))
else:
row.append(value)
rows.append(row)
return table_from_rows(
rows=rows,
column_names=column_names,
text_only_columns=text_only_columns
)
return table_from_rows(rows=rows, column_names=column_names)
def empty_table():

View File

@@ -4,42 +4,20 @@ import os.path
from dbt.clients.system import run_cmd, rmdir
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.exceptions
from packaging import version
def _is_commit(revision: str) -> bool:
# match SHA-1 git commit
return bool(re.match(r"\b[0-9a-f]{40}\b", revision))
def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None):
has_revision = revision is not None
is_commit = _is_commit(revision or "")
def clone(repo, cwd, dirname=None, remove_git_dir=False, branch=None):
clone_cmd = ['git', 'clone', '--depth', '1']
if subdirectory:
logger.debug(' Subdirectory specified: {}, using sparse checkout.'.format(subdirectory))
out, _ = run_cmd(cwd, ['git', '--version'], env={'LC_ALL': 'C'})
git_version = version.parse(re.search(r"\d+\.\d+\.\d+", out.decode("utf-8")).group(0))
if not git_version >= version.parse("2.25.0"):
# 2.25.0 introduces --sparse
raise RuntimeError(
"Please update your git version to pull a dbt package "
"from a subdirectory: your version is {}, >= 2.25.0 needed".format(git_version)
)
clone_cmd.extend(['--filter=blob:none', '--sparse'])
if has_revision and not is_commit:
clone_cmd.extend(['--branch', revision])
if branch is not None:
clone_cmd.extend(['--branch', branch])
clone_cmd.append(repo)
if dirname is not None:
clone_cmd.append(dirname)
result = run_cmd(cwd, clone_cmd, env={'LC_ALL': 'C'})
if subdirectory:
run_cmd(os.path.join(cwd, dirname or ''), ['git', 'sparse-checkout', 'set', subdirectory])
result = run_cmd(cwd, clone_cmd, env={'LC_ALL': 'C'})
if remove_git_dir:
rmdir(os.path.join(dirname, '.git'))
@@ -53,38 +31,33 @@ def list_tags(cwd):
return tags
def _checkout(cwd, repo, revision):
logger.debug(' Checking out revision {}.'.format(revision))
def _checkout(cwd, repo, branch):
logger.debug(' Checking out branch {}.'.format(branch))
fetch_cmd = ["git", "fetch", "origin", "--depth", "1"]
run_cmd(cwd, ['git', 'remote', 'set-branches', 'origin', branch])
run_cmd(cwd, ['git', 'fetch', '--tags', '--depth', '1', 'origin', branch])
if _is_commit(revision):
run_cmd(cwd, fetch_cmd + [revision])
else:
run_cmd(cwd, ['git', 'remote', 'set-branches', 'origin', revision])
run_cmd(cwd, fetch_cmd + ["--tags", revision])
tags = list_tags(cwd)
if _is_commit(revision):
spec = revision
# Prefer tags to branches if one exists
elif revision in list_tags(cwd):
spec = 'tags/{}'.format(revision)
if branch in tags:
spec = 'tags/{}'.format(branch)
else:
spec = 'origin/{}'.format(revision)
spec = 'origin/{}'.format(branch)
out, err = run_cmd(cwd, ['git', 'reset', '--hard', spec],
env={'LC_ALL': 'C'})
return out, err
def checkout(cwd, repo, revision=None):
if revision is None:
revision = 'HEAD'
def checkout(cwd, repo, branch=None):
if branch is None:
branch = 'master'
try:
return _checkout(cwd, repo, revision)
return _checkout(cwd, repo, branch)
except dbt.exceptions.CommandResultError as exc:
stderr = exc.stderr.decode('utf-8').strip()
dbt.exceptions.bad_package_spec(repo, revision, stderr)
dbt.exceptions.bad_package_spec(repo, branch, stderr)
def get_current_sha(cwd):
@@ -98,16 +71,11 @@ def remove_remote(cwd):
def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
revision=None, subdirectory=None):
branch=None):
exists = None
try:
_, err = clone(
repo,
cwd,
dirname=dirname,
remove_git_dir=remove_git_dir,
subdirectory=subdirectory,
)
_, err = clone(repo, cwd, dirname=dirname,
remove_git_dir=remove_git_dir)
except dbt.exceptions.CommandResultError as exc:
err = exc.stderr.decode('utf-8')
exists = re.match("fatal: destination path '(.+)' already exists", err)
@@ -129,7 +97,7 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
logger.debug('Pulling new dependency {}.', directory)
full_path = os.path.join(cwd, directory)
start_sha = get_current_sha(full_path)
checkout(full_path, repo, revision)
checkout(full_path, repo, branch)
end_sha = get_current_sha(full_path)
if exists:
if start_sha == end_sha:
@@ -139,4 +107,4 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
start_sha[:7], end_sha[:7])
else:
logger.debug(' Checked out at {}.', end_sha[:7])
return os.path.join(directory, subdirectory or '')
return directory

View File

@@ -21,7 +21,7 @@ import jinja2.sandbox
from dbt.utils import (
get_dbt_macro_name, get_docs_macro_name, get_materialization_macro_name,
get_test_macro_name, deep_map
deep_map
)
from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag
@@ -29,8 +29,7 @@ from dbt.contracts.graph.compiled import CompiledSchemaTestNode
from dbt.contracts.graph.parsed import ParsedSchemaTestNode
from dbt.exceptions import (
InternalException, raise_compiler_error, CompilationException,
invalid_materialization_argument, MacroReturn, JinjaRenderingException,
UndefinedMacroException
invalid_materialization_argument, MacroReturn, JinjaRenderingException
)
from dbt import flags
from dbt.logger import GLOBAL_LOGGER as logger # noqa
@@ -232,7 +231,6 @@ class BaseMacroGenerator:
template = self.get_template()
# make the module. previously we set both vars and local, but that's
# redundant: They both end up in the same place
# make_module is in jinja2.environment. It returns a TemplateModule
module = template.make_module(vars=self.context, shared=False)
macro = module.__dict__[get_dbt_macro_name(name)]
module.__dict__.update(self.context)
@@ -246,7 +244,6 @@ class BaseMacroGenerator:
raise_compiler_error(str(e))
def call_macro(self, *args, **kwargs):
# called from __call__ methods
if self.context is None:
raise InternalException(
'Context is still None in call_macro!'
@@ -309,10 +306,8 @@ class MacroGenerator(BaseMacroGenerator):
e.stack.append(self.macro)
raise e
# This adds the macro's unique id to the node's 'depends_on'
@contextmanager
def track_call(self):
# This is only called from __call__
if self.stack is None or self.node is None:
yield
else:
@@ -327,7 +322,6 @@ class MacroGenerator(BaseMacroGenerator):
finally:
self.stack.pop(unique_id)
# this makes MacroGenerator objects callable like functions
def __call__(self, *args, **kwargs):
with self.track_call():
return self.call_macro(*args, **kwargs)
@@ -409,20 +403,6 @@ class DocumentationExtension(jinja2.ext.Extension):
return node
class TestExtension(jinja2.ext.Extension):
tags = ['test']
def parse(self, parser):
node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno)
test_name = parser.parse_assign_target(name_only=True).name
parser.parse_signature(node)
node.name = get_test_macro_name(test_name)
node.body = parser.parse_statements(('name:endtest',),
drop_needle=True)
return node
def _is_dunder_name(name):
return name.startswith('__') and name.endswith('__')
@@ -494,7 +474,6 @@ def get_environment(
args['extensions'].append(MaterializationExtension)
args['extensions'].append(DocumentationExtension)
args['extensions'].append(TestExtension)
env_cls: Type[jinja2.Environment]
text_filter: Type
@@ -519,7 +498,7 @@ def catch_jinja(node=None) -> Iterator[None]:
e.translated = False
raise CompilationException(str(e), node) from e
except jinja2.exceptions.UndefinedError as e:
raise UndefinedMacroException(str(e), node) from e
raise CompilationException(str(e), node) from e
except CompilationException as exc:
exc.add_node(node)
raise

View File

@@ -1,225 +0,0 @@
import jinja2
from dbt.clients.jinja import get_environment
from dbt.exceptions import raise_compiler_error
def statically_extract_macro_calls(string, ctx, db_wrapper=None):
# set 'capture_macros' to capture undefined
env = get_environment(None, capture_macros=True)
parsed = env.parse(string)
standard_calls = ['source', 'ref', 'config']
possible_macro_calls = []
for func_call in parsed.find_all(jinja2.nodes.Call):
func_name = None
if hasattr(func_call, 'node') and hasattr(func_call.node, 'name'):
func_name = func_call.node.name
else:
# func_call for dbt_utils.current_timestamp macro
# Call(
# node=Getattr(
# node=Name(
# name='dbt_utils',
# ctx='load'
# ),
# attr='current_timestamp',
# ctx='load
# ),
# args=[],
# kwargs=[],
# dyn_args=None,
# dyn_kwargs=None
# )
if (hasattr(func_call, 'node') and
hasattr(func_call.node, 'node') and
type(func_call.node.node).__name__ == 'Name' and
hasattr(func_call.node, 'attr')):
package_name = func_call.node.node.name
macro_name = func_call.node.attr
if package_name == 'adapter':
if macro_name == 'dispatch':
ad_macro_calls = statically_parse_adapter_dispatch(
func_call, ctx, db_wrapper)
possible_macro_calls.extend(ad_macro_calls)
else:
# This skips calls such as adapter.parse_index
continue
else:
func_name = f'{package_name}.{macro_name}'
else:
continue
if not func_name:
continue
if func_name in standard_calls:
continue
elif ctx.get(func_name):
continue
else:
if func_name not in possible_macro_calls:
possible_macro_calls.append(func_name)
return possible_macro_calls
# Call(
# node=Getattr(
# node=Name(
# name='adapter',
# ctx='load'
# ),
# attr='dispatch',
# ctx='load'
# ),
# args=[
# Const(value='test_pkg_and_dispatch')
# ],
# kwargs=[
# Keyword(
# key='packages',
# value=Call(node=Getattr(node=Name(name='local_utils', ctx='load'),
# attr='_get_utils_namespaces', ctx='load'), args=[], kwargs=[],
# dyn_args=None, dyn_kwargs=None)
# )
# ],
# dyn_args=None,
# dyn_kwargs=None
# )
def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper):
possible_macro_calls = []
# This captures an adapter.dispatch('<macro_name>') call.
func_name = None
# macro_name positional argument
if len(func_call.args) > 0:
func_name = func_call.args[0].value
if func_name:
possible_macro_calls.append(func_name)
# packages positional argument
packages = None
macro_namespace = None
packages_arg = None
packages_arg_type = None
if len(func_call.args) > 1:
packages_arg = func_call.args[1]
# This can be a List or a Call
packages_arg_type = type(func_call.args[1]).__name__
# keyword arguments
if func_call.kwargs:
for kwarg in func_call.kwargs:
if kwarg.key == 'packages':
# The packages keyword will be deprecated and
# eventually removed
packages_arg = kwarg.value
# This can be a List or a Call
packages_arg_type = type(kwarg.value).__name__
elif kwarg.key == 'macro_name':
# This will remain to enable static resolution
if type(kwarg.value).__name__ == 'Const':
func_name = kwarg.value.value
possible_macro_calls.append(func_name)
else:
raise_compiler_error(f"The macro_name parameter ({kwarg.value.value}) "
"to adapter.dispatch was not a string")
elif kwarg.key == 'macro_namespace':
# This will remain to enable static resolution
kwarg_type = type(kwarg.value).__name__
if kwarg_type == 'Const':
macro_namespace = kwarg.value.value
else:
raise_compiler_error("The macro_namespace parameter to adapter.dispatch "
f"is a {kwarg_type}, not a string")
# positional arguments
if packages_arg:
if packages_arg_type == 'List':
# This will remain to enable static resolution
packages = []
for item in packages_arg.items:
packages.append(item.value)
elif packages_arg_type == 'Const':
# This will remain to enable static resolution
macro_namespace = packages_arg.value
elif packages_arg_type == 'Call':
# This is deprecated and should be removed eventually.
# It is here to support (hackily) common ways of providing
# a packages list to adapter.dispatch
if (hasattr(packages_arg, 'node') and
hasattr(packages_arg.node, 'node') and
hasattr(packages_arg.node.node, 'name') and
hasattr(packages_arg.node, 'attr')):
package_name = packages_arg.node.node.name
macro_name = packages_arg.node.attr
if (macro_name.startswith('_get') and 'namespaces' in macro_name):
# noqa: https://github.com/dbt-labs/dbt-utils/blob/9e9407b/macros/cross_db_utils/_get_utils_namespaces.sql
var_name = f'{package_name}_dispatch_list'
# hard code compatibility for fivetran_utils, just a teensy bit different
# noqa: https://github.com/fivetran/dbt_fivetran_utils/blob/0978ba2/macros/_get_utils_namespaces.sql
if package_name == 'fivetran_utils':
default_packages = ['dbt_utils', 'fivetran_utils']
else:
default_packages = [package_name]
namespace_names = get_dispatch_list(ctx, var_name, default_packages)
packages = []
if namespace_names:
packages.extend(namespace_names)
else:
msg = (
f"As of v0.19.2, custom macros, such as '{macro_name}', are no longer "
"supported in the 'packages' argument of 'adapter.dispatch()'.\n"
f"See https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch "
"for details."
).strip()
raise_compiler_error(msg)
elif packages_arg_type == 'Add':
# This logic is for when there is a variable and an addition of a list,
# like: packages = (var('local_utils_dispatch_list', []) + ['local_utils2'])
# This is deprecated and should be removed eventually.
namespace_var = None
default_namespaces = []
# This might be a single call or it might be the 'left' piece in an addition
for var_call in packages_arg.find_all(jinja2.nodes.Call):
if (hasattr(var_call, 'node') and
var_call.node.name == 'var' and
hasattr(var_call, 'args')):
namespace_var = var_call.args[0].value
if hasattr(packages_arg, 'right'): # we have a default list of namespaces
for item in packages_arg.right.items:
default_namespaces.append(item.value)
if namespace_var:
namespace_names = get_dispatch_list(ctx, namespace_var, default_namespaces)
packages = []
if namespace_names:
packages.extend(namespace_names)
if db_wrapper:
macro = db_wrapper.dispatch(
func_name,
packages=packages,
macro_namespace=macro_namespace
).macro
func_name = f'{macro.package_name}.{macro.name}'
possible_macro_calls.append(func_name)
else: # this is only for test/unit/test_macro_calls.py
if macro_namespace:
packages = [macro_namespace]
if packages is None:
packages = []
for package_name in packages:
possible_macro_calls.append(f'{package_name}.{func_name}')
return possible_macro_calls
def get_dispatch_list(ctx, var_name, default_packages):
namespace_list = None
try:
# match the logic currently used in package _get_namespaces() macro
namespace_list = ctx['var'](var_name) + default_packages
except Exception:
pass
namespace_list = namespace_list if namespace_list else default_packages
return namespace_list

View File

@@ -1,9 +1,10 @@
import functools
from functools import wraps
import requests
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
from dbt.exceptions import RegistryException
from dbt.utils import memoized
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import deprecations
import os
import time
if os.getenv('DBT_PACKAGE_HUB_URL'):
DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')
@@ -18,15 +19,31 @@ def _get_url(url, registry_base_url=None):
return '{}{}'.format(registry_base_url, url)
def _get_with_retries(path, registry_base_url=None):
get_fn = functools.partial(_get, path, registry_base_url)
return connection_exception_retry(get_fn, 5)
def _wrap_exceptions(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
max_attempts = 5
attempt = 0
while True:
attempt += 1
try:
return fn(*args, **kwargs)
except requests.exceptions.ConnectionError as exc:
if attempt < max_attempts:
time.sleep(1)
continue
raise RegistryException(
'Unable to connect to registry hub'
) from exc
return wrapper
@_wrap_exceptions
def _get(path, registry_base_url=None):
url = _get_url(path, registry_base_url)
logger.debug('Making package registry request: GET {}'.format(url))
resp = requests.get(url, timeout=30)
resp = requests.get(url)
logger.debug('Response from registry: GET {} {}'.format(url,
resp.status_code))
resp.raise_for_status()
@@ -34,44 +51,22 @@ def _get(path, registry_base_url=None):
def index(registry_base_url=None):
return _get_with_retries('api/v1/index.json', registry_base_url)
return _get('api/v1/index.json', registry_base_url)
index_cached = memoized(index)
def packages(registry_base_url=None):
return _get_with_retries('api/v1/packages.json', registry_base_url)
return _get('api/v1/packages.json', registry_base_url)
def package(name, registry_base_url=None):
response = _get_with_retries('api/v1/{}.json'.format(name), registry_base_url)
# Either redirectnamespace or redirectname in the JSON response indicate a redirect
# redirectnamespace redirects based on package ownership
# redirectname redirects based on package name
# Both can be present at the same time, or neither. Fails gracefully to old name
if ('redirectnamespace' in response) or ('redirectname' in response):
if ('redirectnamespace' in response) and response['redirectnamespace'] is not None:
use_namespace = response['redirectnamespace']
else:
use_namespace = response['namespace']
if ('redirectname' in response) and response['redirectname'] is not None:
use_name = response['redirectname']
else:
use_name = response['name']
new_nwo = use_namespace + "/" + use_name
deprecations.warn('package-redirect', old_name=name, new_name=new_nwo)
return response
return _get('api/v1/{}.json'.format(name), registry_base_url)
def package_version(name, version, registry_base_url=None):
return _get_with_retries('api/v1/{}/{}.json'.format(name, version), registry_base_url)
return _get('api/v1/{}/{}.json'.format(name, version), registry_base_url)
def get_available_versions(name):

View File

@@ -1,5 +1,4 @@
import errno
import functools
import fnmatch
import json
import os
@@ -16,8 +15,9 @@ from typing import (
)
import dbt.exceptions
import dbt.utils
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.utils import _connection_exception_retry as connection_exception_retry
if sys.platform == 'win32':
from ctypes import WinDLL, c_bool
@@ -416,9 +416,6 @@ def run_cmd(
full_env.update(env)
try:
exe_pth = shutil.which(cmd[0])
if exe_pth:
cmd = [os.path.abspath(exe_pth)] + list(cmd[1:])
proc = subprocess.Popen(
cmd,
cwd=cwd,
@@ -441,16 +438,7 @@ def run_cmd(
return out, err
def download_with_retries(
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
) -> None:
download_fn = functools.partial(download, url, path, timeout)
connection_exception_retry(download_fn, 5)
def download(
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
) -> None:
def download(url: str, path: str, timeout: Union[float, tuple] = None) -> None:
path = convert_path(path)
connection_timeout = timeout or float(os.getenv('DBT_HTTP_TIMEOUT', 10))
response = requests.get(url, timeout=connection_timeout)

View File

@@ -1,19 +1,16 @@
from typing import Any
import dbt.exceptions
from typing import Any, Dict, Optional
import yaml
import yaml.scanner
# the C version is faster, but it doesn't always exist
YamlLoader: Any
try:
from yaml import (
CLoader as Loader,
CSafeLoader as SafeLoader,
CDumper as Dumper
)
from yaml import CSafeLoader as YamlLoader
except ImportError:
from yaml import ( # type: ignore # noqa: F401
Loader, SafeLoader, Dumper
)
from yaml import SafeLoader as YamlLoader
YAML_ERROR_MESSAGE = """
@@ -56,8 +53,8 @@ def contextualized_yaml_error(raw_contents, error):
raw_error=error)
def safe_load(contents) -> Optional[Dict[str, Any]]:
return yaml.load(contents, Loader=SafeLoader)
def safe_load(contents):
return yaml.load(contents, Loader=YamlLoader)
def load_yaml_text(contents):

View File

@@ -12,8 +12,9 @@ from dbt.clients.system import make_directory
from dbt.context.providers import generate_runtime_model
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.compiled import (
COMPILED_TYPES,
CompiledDataTestNode,
CompiledSchemaTestNode,
COMPILED_TYPES,
GraphMemberNode,
InjectedCTE,
ManifestNode,
@@ -29,7 +30,6 @@ from dbt.graph import Graph
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.node_types import NodeType
from dbt.utils import pluralize
import dbt.tracking
graph_file_name = 'graph.gpickle'
@@ -58,11 +58,6 @@ def print_compile_stats(stats):
results = {k: 0 for k in names.keys()}
results.update(stats)
# create tracking event for resource_counts
if dbt.tracking.active_user is not None:
resource_counts = {k.pluralize(): v for k, v in results.items()}
dbt.tracking.track_resource_counts(resource_counts)
stat_line = ", ".join([
pluralize(ct, names.get(t)) for t, ct in results.items()
if t in names
@@ -143,7 +138,7 @@ class Linker:
"""
out_graph = self.graph.copy()
for node_id in self.graph.nodes():
data = manifest.expect(node_id).to_dict(omit_none=True)
data = manifest.expect(node_id).to_dict()
out_graph.add_node(node_id, **data)
nx.write_gpickle(out_graph, outfile)
@@ -182,7 +177,8 @@ class Compiler:
def _get_relation_name(self, node: ParsedNode):
relation_name = None
if node.is_relational and not node.is_ephemeral_model:
if (node.resource_type in NodeType.refable() and
not node.is_ephemeral_model):
adapter = get_adapter(self.config)
relation_cls = adapter.Relation
relation_name = str(relation_cls.create_from(self.config, node))
@@ -249,19 +245,22 @@ class Compiler:
return str(parsed)
def _get_dbt_test_name(self) -> str:
return 'dbt__cte__internal_test'
# This method is called by the 'compile_node' method. Starting
# from the node that it is passed in, it will recursively call
# itself using the 'extra_ctes'. The 'ephemeral' models do
# not produce SQL that is executed directly, instead they
# are rolled up into the models that refer to them by
# inserting CTEs into the SQL.
def _recursively_prepend_ctes(
self,
model: NonSourceCompiledNode,
manifest: Manifest,
extra_context: Optional[Dict[str, Any]],
) -> Tuple[NonSourceCompiledNode, List[InjectedCTE]]:
"""This method is called by the 'compile_node' method. Starting
from the node that it is passed in, it will recursively call
itself using the 'extra_ctes'. The 'ephemeral' models do
not produce SQL that is executed directly, instead they
are rolled up into the models that refer to them by
inserting CTEs into the SQL.
"""
if model.compiled_sql is None:
raise RuntimeException(
'Cannot inject ctes into an unparsed node', model
@@ -279,67 +278,101 @@ class Compiler:
# gathered and then "injected" into the model.
prepended_ctes: List[InjectedCTE] = []
dbt_test_name = self._get_dbt_test_name()
# extra_ctes are added to the model by
# RuntimeRefResolver.create_relation, which adds an
# extra_cte for every model relation which is an
# ephemeral model.
for cte in model.extra_ctes:
if cte.id not in manifest.nodes:
raise InternalException(
f'During compilation, found a cte reference that '
f'could not be resolved: {cte.id}'
)
cte_model = manifest.nodes[cte.id]
if not cte_model.is_ephemeral_model:
raise InternalException(f'{cte.id} is not ephemeral')
# This model has already been compiled, so it's been
# through here before
if getattr(cte_model, 'compiled', False):
assert isinstance(cte_model, tuple(COMPILED_TYPES.values()))
cte_model = cast(NonSourceCompiledNode, cte_model)
new_prepended_ctes = cte_model.extra_ctes
# if the cte_model isn't compiled, i.e. first time here
if cte.id == dbt_test_name:
sql = cte.sql
else:
# This is an ephemeral parsed model that we can compile.
# Compile and update the node
cte_model = self._compile_node(
cte_model, manifest, extra_context)
# recursively call this method
cte_model, new_prepended_ctes = \
self._recursively_prepend_ctes(
cte_model, manifest, extra_context
if cte.id not in manifest.nodes:
raise InternalException(
f'During compilation, found a cte reference that '
f'could not be resolved: {cte.id}'
)
# Save compiled SQL file and sync manifest
self._write_node(cte_model)
manifest.sync_update_node(cte_model)
cte_model = manifest.nodes[cte.id]
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
if not cte_model.is_ephemeral_model:
raise InternalException(f'{cte.id} is not ephemeral')
new_cte_name = self.add_ephemeral_prefix(cte_model.name)
rendered_sql = (
cte_model._pre_injected_sql or cte_model.compiled_sql
)
sql = f' {new_cte_name} as (\n{rendered_sql}\n)'
# This model has already been compiled, so it's been
# through here before
if getattr(cte_model, 'compiled', False):
assert isinstance(cte_model,
tuple(COMPILED_TYPES.values()))
cte_model = cast(NonSourceCompiledNode, cte_model)
new_prepended_ctes = cte_model.extra_ctes
# if the cte_model isn't compiled, i.e. first time here
else:
# This is an ephemeral parsed model that we can compile.
# Compile and update the node
cte_model = self._compile_node(
cte_model, manifest, extra_context)
# recursively call this method
cte_model, new_prepended_ctes = \
self._recursively_prepend_ctes(
cte_model, manifest, extra_context
)
# Save compiled SQL file and sync manifest
self._write_node(cte_model)
manifest.sync_update_node(cte_model)
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
new_cte_name = self.add_ephemeral_prefix(cte_model.name)
sql = f' {new_cte_name} as (\n{cte_model.compiled_sql}\n)'
_add_prepended_cte(prepended_ctes, InjectedCTE(id=cte.id, sql=sql))
injected_sql = self._inject_ctes_into_sql(
model.compiled_sql,
prepended_ctes,
)
model._pre_injected_sql = model.compiled_sql
model.compiled_sql = injected_sql
# We don't save injected_sql into compiled sql for ephemeral models
# because it will cause problems with processing of subsequent models.
# Ephemeral models do not produce executable SQL of their own.
if not model.is_ephemeral_model:
injected_sql = self._inject_ctes_into_sql(
model.compiled_sql,
prepended_ctes,
)
model.compiled_sql = injected_sql
model.extra_ctes_injected = True
model.extra_ctes = prepended_ctes
model.validate(model.to_dict(omit_none=True))
model.validate(model.to_dict())
manifest.update_node(model)
return model, prepended_ctes
def _add_ctes(
self,
compiled_node: NonSourceCompiledNode,
manifest: Manifest,
extra_context: Dict[str, Any],
) -> NonSourceCompiledNode:
"""Wrap the data test SQL in a CTE."""
# for data tests, we need to insert a special CTE at the end of the
# list containing the test query, and then have the "real" query be a
# select count(*) from that model.
# the benefit of doing it this way is that _add_ctes() can be
# rewritten for different adapters to handle databases that don't
# support CTEs, or at least don't have full support.
if isinstance(compiled_node, CompiledDataTestNode):
# the last prepend (so last in order) should be the data test body.
# then we can add our select count(*) from _that_ cte as the "real"
# compiled_sql, and do the regular prepend logic from CTEs.
name = self._get_dbt_test_name()
cte = InjectedCTE(
id=name,
sql=f' {name} as (\n{compiled_node.compiled_sql}\n)'
)
compiled_node.extra_ctes.append(cte)
compiled_node.compiled_sql = f'\nselect count(*) from {name}'
return compiled_node
# creates a compiled_node from the ManifestNode passed in,
# creates a "context" dictionary for jinja rendering,
# and then renders the "compiled_sql" using the node, the
@@ -355,7 +388,7 @@ class Compiler:
logger.debug("Compiling {}".format(node.unique_id))
data = node.to_dict(omit_none=True)
data = node.to_dict()
data.update({
'compiled': False,
'compiled_sql': None,
@@ -378,6 +411,12 @@ class Compiler:
compiled_node.compiled = True
# add ctes for specific test nodes, and also for
# possible future use in adapters
compiled_node = self._add_ctes(
compiled_node, manifest, extra_context
)
return compiled_node
def write_graph_file(self, linker: Linker, manifest: Manifest):
@@ -441,13 +480,18 @@ class Compiler:
logger.debug(f'Writing injected SQL for node "{node.unique_id}"')
if node.compiled_sql:
node.compiled_path = node.write_node(
node.build_path = node.write_node(
self.config.target_path,
'compiled',
node.compiled_sql
)
return node
# This is the main entry point into this code. It's called by
# CompileRunner.compile, GenericRPCRunner.compile, and
# RunTask.get_hook_sql. It calls '_compile_node' to convert
# the node into a compiled node, and then calls the
# recursive method to "prepend" the ctes.
def compile_node(
self,
node: ManifestNode,
@@ -455,12 +499,6 @@ class Compiler:
extra_context: Optional[Dict[str, Any]] = None,
write: bool = True,
) -> NonSourceCompiledNode:
"""This is the main entry point into this code. It's called by
CompileRunner.compile, GenericRPCRunner.compile, and
RunTask.get_hook_sql. It calls '_compile_node' to convert
the node into a compiled node, and then calls the
recursive method to "prepend" the ctes.
"""
node = self._compile_node(node, manifest, extra_context)
node, _ = self._recursively_prepend_ctes(

View File

@@ -2,7 +2,7 @@ from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple
import os
from dbt.dataclass_schema import ValidationError
from hologram import ValidationError
from dbt.clients.system import load_file_contents
from dbt.clients.yaml_helper import load_yaml_text
@@ -75,7 +75,6 @@ def read_user_config(directory: str) -> UserConfig:
if profile:
user_cfg = coerce_dict_str(profile.get('config', {}))
if user_cfg is not None:
UserConfig.validate(user_cfg)
return UserConfig.from_dict(user_cfg)
except (RuntimeException, ValidationError):
pass
@@ -84,8 +83,7 @@ def read_user_config(directory: str) -> UserConfig:
# The Profile class is included in RuntimeConfig, so any attribute
# additions must also be set where the RuntimeConfig class is created
# `init=False` is a workaround for https://bugs.python.org/issue45081
@dataclass(init=False)
@dataclass
class Profile(HasCredentials):
profile_name: str
target_name: str
@@ -93,23 +91,6 @@ class Profile(HasCredentials):
threads: int
credentials: Credentials
def __init__(
self,
profile_name: str,
target_name: str,
config: UserConfig,
threads: int,
credentials: Credentials
):
"""Explicitly defining `__init__` to work around bug in Python 3.9.7
https://bugs.python.org/issue45081
"""
self.profile_name = profile_name
self.target_name = target_name
self.config = config
self.threads = threads
self.credentials = credentials
def to_profile_info(
self, serialize_credentials: bool = False
) -> Dict[str, Any]:
@@ -129,8 +110,8 @@ class Profile(HasCredentials):
'credentials': self.credentials,
}
if serialize_credentials:
result['config'] = self.config.to_dict(omit_none=True)
result['credentials'] = self.credentials.to_dict(omit_none=True)
result['config'] = self.config.to_dict()
result['credentials'] = self.credentials.to_dict()
return result
def to_target_dict(self) -> Dict[str, Any]:
@@ -143,7 +124,7 @@ class Profile(HasCredentials):
'name': self.target_name,
'target_name': self.target_name,
'profile_name': self.profile_name,
'config': self.config.to_dict(omit_none=True),
'config': self.config.to_dict(),
})
return target
@@ -156,10 +137,10 @@ class Profile(HasCredentials):
def validate(self):
try:
if self.credentials:
dct = self.credentials.to_dict(omit_none=True)
self.credentials.validate(dct)
dct = self.to_profile_info(serialize_credentials=True)
ProfileConfig.validate(dct)
self.credentials.to_dict(validate=True)
ProfileConfig.from_dict(
self.to_profile_info(serialize_credentials=True)
)
except ValidationError as exc:
raise DbtProfileError(validator_error_message(exc)) from exc
@@ -179,9 +160,7 @@ class Profile(HasCredentials):
typename = profile.pop('type')
try:
cls = load_plugin(typename)
data = cls.translate_aliases(profile)
cls.validate(data)
credentials = cls.from_dict(data)
credentials = cls.from_dict(profile)
except (RuntimeException, ValidationError) as e:
msg = str(e) if isinstance(e, RuntimeException) else e.message
raise DbtProfileError(
@@ -254,7 +233,6 @@ class Profile(HasCredentials):
"""
if user_cfg is None:
user_cfg = {}
UserConfig.validate(user_cfg)
config = UserConfig.from_dict(user_cfg)
profile = cls(

View File

@@ -26,12 +26,15 @@ from dbt.version import get_installed_version
from dbt.utils import MultiDict
from dbt.node_types import NodeType
from dbt.config.selectors import SelectorDict
from dbt.contracts.project import (
Project as ProjectContract,
SemverString,
)
from dbt.contracts.project import PackageConfig
from dbt.dataclass_schema import ValidationError
from hologram import ValidationError
from .renderer import DbtProjectYamlRenderer
from .selectors import (
selector_config_from_data,
@@ -98,7 +101,6 @@ def package_config_from_data(packages_data: Dict[str, Any]):
packages_data = {'packages': []}
try:
PackageConfig.validate(packages_data)
packages = PackageConfig.from_dict(packages_data)
except ValidationError as e:
raise DbtProjectError(
@@ -304,10 +306,7 @@ class PartialProject(RenderComponents):
)
try:
ProjectContract.validate(rendered.project_dict)
cfg = ProjectContract.from_dict(
rendered.project_dict
)
cfg = ProjectContract.from_dict(rendered.project_dict)
except ValidationError as e:
raise DbtProjectError(validator_error_message(e)) from e
# name/version are required in the Project definition, so we can assume
@@ -347,22 +346,18 @@ class PartialProject(RenderComponents):
# break many things
quoting: Dict[str, Any] = {}
if cfg.quoting is not None:
quoting = cfg.quoting.to_dict(omit_none=True)
quoting = cfg.quoting.to_dict()
dispatch: List[Dict[str, Any]]
models: Dict[str, Any]
seeds: Dict[str, Any]
snapshots: Dict[str, Any]
sources: Dict[str, Any]
tests: Dict[str, Any]
vars_value: VarProvider
dispatch = cfg.dispatch
models = cfg.models
seeds = cfg.seeds
snapshots = cfg.snapshots
sources = cfg.sources
tests = cfg.tests
if cfg.vars is None:
vars_dict: Dict[str, Any] = {}
else:
@@ -404,7 +399,6 @@ class PartialProject(RenderComponents):
models=models,
on_run_start=on_run_start,
on_run_end=on_run_end,
dispatch=dispatch,
seeds=seeds,
snapshots=snapshots,
dbt_version=dbt_version,
@@ -413,7 +407,6 @@ class PartialProject(RenderComponents):
selectors=selectors,
query_comment=query_comment,
sources=sources,
tests=tests,
vars=vars_value,
config_version=cfg.config_version,
unrendered=unrendered,
@@ -516,11 +509,9 @@ class Project:
models: Dict[str, Any]
on_run_start: List[str]
on_run_end: List[str]
dispatch: List[Dict[str, Any]]
seeds: Dict[str, Any]
snapshots: Dict[str, Any]
sources: Dict[str, Any]
tests: Dict[str, Any]
vars: VarProvider
dbt_version: List[VersionSpecifier]
packages: Dict[str, Any]
@@ -576,11 +567,9 @@ class Project:
'models': self.models,
'on-run-start': self.on_run_start,
'on-run-end': self.on_run_end,
'dispatch': self.dispatch,
'seeds': self.seeds,
'snapshots': self.snapshots,
'sources': self.sources,
'tests': self.tests,
'vars': self.vars.to_dict(),
'require-dbt-version': [
v.to_version_string() for v in self.dbt_version
@@ -588,17 +577,16 @@ class Project:
'config-version': self.config_version,
})
if self.query_comment:
result['query-comment'] = \
self.query_comment.to_dict(omit_none=True)
result['query-comment'] = self.query_comment.to_dict()
if with_packages:
result.update(self.packages.to_dict(omit_none=True))
result.update(self.packages.to_dict())
return result
def validate(self):
try:
ProjectContract.validate(self.to_project_config())
ProjectContract.from_dict(self.to_project_config())
except ValidationError as e:
raise DbtProjectError(validator_error_message(e)) from e
@@ -652,9 +640,3 @@ class Project:
f'{list(self.selectors)}'
)
return self.selectors[name]
def get_macro_search_order(self, macro_namespace: str):
for dispatch_entry in self.dispatch:
if dispatch_entry['macro_namespace'] == macro_namespace:
return dispatch_entry['search_order']
return None

View File

@@ -145,9 +145,9 @@ class DbtProjectYamlRenderer(BaseRenderer):
if first == 'vars':
return False
if first in {'seeds', 'models', 'snapshots', 'tests'}:
if first in {'seeds', 'models', 'snapshots', 'seeds'}:
keypath_parts = {
(k.lstrip('+ ') if isinstance(k, str) else k)
(k.lstrip('+') if isinstance(k, str) else k)
for k in keypath
}
# model-level hooks

View File

@@ -33,7 +33,7 @@ from dbt.exceptions import (
raise_compiler_error
)
from dbt.dataclass_schema import ValidationError
from hologram import ValidationError
def _project_quoting_dict(
@@ -78,7 +78,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
get_relation_class_by_name(profile.credentials.type)
.get_default_quote_policy()
.replace_dict(_project_quoting_dict(project, profile))
).to_dict(omit_none=True)
).to_dict()
cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, 'vars', '{}'))
@@ -102,7 +102,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
models=project.models,
on_run_start=project.on_run_start,
on_run_end=project.on_run_end,
dispatch=project.dispatch,
seeds=project.seeds,
snapshots=project.snapshots,
dbt_version=project.dbt_version,
@@ -111,7 +110,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
selectors=project.selectors,
query_comment=project.query_comment,
sources=project.sources,
tests=project.tests,
vars=project.vars,
config_version=project.config_version,
unrendered=project.unrendered,
@@ -176,7 +174,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
:raises DbtProjectError: If the configuration fails validation.
"""
try:
Configuration.validate(self.serialize())
Configuration.from_dict(self.serialize())
except ValidationError as e:
raise DbtProjectError(validator_error_message(e)) from e
@@ -274,7 +272,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
return frozenset(paths)
def get_resource_config_paths(self) -> Dict[str, PathSet]:
"""Return a dictionary with resource type keys whose values are
"""Return a dictionary with 'seeds' and 'models' keys whose values are
lists of lists of strings, where each inner list of strings represents
a configured path in the resource.
"""
@@ -283,7 +281,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
'seeds': self._get_config_paths(self.seeds),
'snapshots': self._get_config_paths(self.snapshots),
'sources': self._get_config_paths(self.sources),
'tests': self._get_config_paths(self.tests),
}
def get_unused_resource_config_paths(
@@ -329,17 +326,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
if self.dependencies is None:
all_projects = {self.project_name: self}
internal_packages = get_include_paths(self.credentials.type)
# raise exception if fewer installed packages than in packages.yml
count_packages_specified = len(self.packages.packages) # type: ignore
count_packages_installed = len(tuple(self._get_project_directories()))
if count_packages_specified > count_packages_installed:
raise_compiler_error(
f'dbt found {count_packages_specified} package(s) '
f'specified in packages.yml, but only '
f'{count_packages_installed} package(s) installed '
f'in {self.modules_path}. Run "dbt deps" to '
f'install package dependencies.'
)
project_paths = itertools.chain(
internal_packages,
self._get_project_directories()
@@ -391,10 +377,6 @@ class UnsetCredentials(Credentials):
def type(self):
return None
@property
def unique_field(self):
return None
def connection_info(self, *args, **kwargs):
return {}
@@ -409,7 +391,7 @@ class UnsetConfig(UserConfig):
f"'UnsetConfig' object has no attribute {name}"
)
def __post_serialize__(self, dct):
def to_dict(self):
return {}
@@ -498,7 +480,6 @@ class UnsetProfileConfig(RuntimeConfig):
models=project.models,
on_run_start=project.on_run_start,
on_run_end=project.on_run_end,
dispatch=project.dispatch,
seeds=project.seeds,
snapshots=project.snapshots,
dbt_version=project.dbt_version,
@@ -507,7 +488,6 @@ class UnsetProfileConfig(RuntimeConfig):
selectors=project.selectors,
query_comment=project.query_comment,
sources=project.sources,
tests=project.tests,
vars=project.vars,
config_version=project.config_version,
unrendered=project.unrendered,

View File

@@ -1,9 +1,8 @@
from pathlib import Path
from typing import Dict, Any
from dbt.clients.yaml_helper import ( # noqa: F401
yaml, Loader, Dumper, load_yaml_text
)
from dbt.dataclass_schema import ValidationError
import yaml
from hologram import ValidationError
from .renderer import SelectorRenderer
@@ -12,6 +11,7 @@ from dbt.clients.system import (
path_exists,
resolve_path_from_base,
)
from dbt.clients.yaml_helper import load_yaml_text
from dbt.contracts.selection import SelectorFile
from dbt.exceptions import DbtSelectorsError, RuntimeException
from dbt.graph import parse_from_selectors_definition, SelectionSpec
@@ -30,11 +30,9 @@ Validator Error:
class SelectorConfig(Dict[str, SelectionSpec]):
@classmethod
def selectors_from_dict(cls, data: Dict[str, Any]) -> 'SelectorConfig':
def from_dict(cls, data: Dict[str, Any]) -> 'SelectorConfig':
try:
SelectorFile.validate(data)
selector_file = SelectorFile.from_dict(data)
selectors = parse_from_selectors_definition(selector_file)
except ValidationError as exc:
@@ -68,7 +66,7 @@ class SelectorConfig(Dict[str, SelectionSpec]):
f'Could not render selector data: {exc}',
result_type='invalid_selector',
) from exc
return cls.selectors_from_dict(rendered)
return cls.from_dict(rendered)
@classmethod
def from_path(
@@ -109,7 +107,7 @@ def selector_config_from_data(
selectors_data = {'selectors': []}
try:
selectors = SelectorConfig.selectors_from_dict(selectors_data)
selectors = SelectorConfig.from_dict(selectors_data)
except ValidationError as e:
raise DbtSelectorsError(
MALFORMED_SELECTOR_ERROR.format(error=str(e.message)),

View File

@@ -7,14 +7,13 @@ from typing import (
from dbt import flags
from dbt import tracking
from dbt.clients.jinja import undefined_error, get_rendered
from dbt.clients.yaml_helper import ( # noqa: F401
yaml, safe_load, SafeLoader, Loader, Dumper
)
from dbt.clients import yaml_helper
from dbt.contracts.graph.compiled import CompiledResource
from dbt.exceptions import raise_compiler_error, MacroReturn
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.version import __version__ as dbt_version
import yaml
# These modules are added to the context. Consider alternative
# approaches which will extend well to potentially many modules
import pytz
@@ -173,7 +172,6 @@ class BaseContext(metaclass=ContextMeta):
builtins[key] = value
return builtins
# no dbtClassMixin so this is not an actual override
def to_dict(self):
self._ctx['context'] = self._ctx
builtins = self.generate_builtins()
@@ -396,7 +394,7 @@ class BaseContext(metaclass=ContextMeta):
-- ["good"]
"""
try:
return safe_load(value)
return yaml_helper.safe_load(value)
except (AttributeError, ValueError, yaml.YAMLError):
return default
@@ -538,5 +536,4 @@ class BaseContext(metaclass=ContextMeta):
def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]:
ctx = BaseContext(cli_vars)
# This is not a Mashumaro to_dict call
return ctx.to_dict()

View File

@@ -75,26 +75,8 @@ class SchemaYamlContext(ConfiguredContext):
)
class MacroResolvingContext(ConfiguredContext):
def __init__(self, config):
super().__init__(config)
@contextproperty
def var(self) -> ConfiguredVar:
return ConfiguredVar(
self._ctx, self.config, self.config.project_name
)
def generate_schema_yml(
config: AdapterRequiredConfig, project_name: str
) -> Dict[str, Any]:
ctx = SchemaYamlContext(config, project_name)
return ctx.to_dict()
def generate_macro_context(
config: AdapterRequiredConfig,
) -> Dict[str, Any]:
ctx = MacroResolvingContext(config)
return ctx.to_dict()

View File

@@ -41,8 +41,6 @@ class UnrenderedConfig(ConfigSource):
model_configs = unrendered.get('snapshots')
elif resource_type == NodeType.Source:
model_configs = unrendered.get('sources')
elif resource_type == NodeType.Test:
model_configs = unrendered.get('tests')
else:
model_configs = unrendered.get('models')
@@ -63,8 +61,6 @@ class RenderedConfig(ConfigSource):
model_configs = self.project.snapshots
elif resource_type == NodeType.Source:
model_configs = self.project.sources
elif resource_type == NodeType.Test:
model_configs = self.project.tests
else:
model_configs = self.project.models
return model_configs
@@ -97,7 +93,7 @@ class BaseContextConfigGenerator(Generic[T]):
result = {}
for key, value in level_config.items():
if key.startswith('+'):
result[key[1:].strip()] = deepcopy(value)
result[key[1:]] = deepcopy(value)
elif not isinstance(value, dict):
result[key] = deepcopy(value)
@@ -120,12 +116,11 @@ class BaseContextConfigGenerator(Generic[T]):
def calculate_node_config(
self,
config_call_dict: Dict[str, Any],
config_calls: List[Dict[str, Any]],
fqn: List[str],
resource_type: NodeType,
project_name: str,
base: bool,
patch_config_dict: Dict[str, Any] = None
) -> BaseConfig:
own_config = self.get_node_project(project_name)
@@ -135,15 +130,8 @@ class BaseContextConfigGenerator(Generic[T]):
for fqn_config in project_configs:
result = self._update_from_config(result, fqn_config)
# When schema files patch config, it has lower precedence than
# config in the models (config_call_dict), so we add the patch_config_dict
# before the config_call_dict
if patch_config_dict:
result = self._update_from_config(result, patch_config_dict)
# config_calls are created in the 'experimental' model parser and
# the ParseConfigObject (via add_config_call)
result = self._update_from_config(result, config_call_dict)
for config_call in config_calls:
result = self._update_from_config(result, config_call)
if own_config.project_name != self._active_project.project_name:
for fqn_config in self._active_project_configs(fqn, resource_type):
@@ -155,12 +143,11 @@ class BaseContextConfigGenerator(Generic[T]):
@abstractmethod
def calculate_node_config_dict(
self,
config_call_dict: Dict[str, Any],
config_calls: List[Dict[str, Any]],
fqn: List[str],
resource_type: NodeType,
project_name: str,
base: bool,
patch_config_dict: Dict[str, Any],
) -> Dict[str, Any]:
...
@@ -178,7 +165,7 @@ class ContextConfigGenerator(BaseContextConfigGenerator[C]):
# Calculate the defaults. We don't want to validate the defaults,
# because it might be invalid in the case of required config members
# (such as on snapshots!)
result = config_cls.from_dict({})
result = config_cls.from_dict({}, validate=False)
return result
def _update_from_config(
@@ -195,23 +182,21 @@ class ContextConfigGenerator(BaseContextConfigGenerator[C]):
def calculate_node_config_dict(
self,
config_call_dict: Dict[str, Any],
config_calls: List[Dict[str, Any]],
fqn: List[str],
resource_type: NodeType,
project_name: str,
base: bool,
patch_config_dict: dict = None
) -> Dict[str, Any]:
config = self.calculate_node_config(
config_call_dict=config_call_dict,
config_calls=config_calls,
fqn=fqn,
resource_type=resource_type,
project_name=project_name,
base=base,
patch_config_dict=patch_config_dict
)
finalized = config.finalize_and_validate()
return finalized.to_dict(omit_none=True)
return finalized.to_dict()
class UnrenderedConfigGenerator(BaseContextConfigGenerator[Dict[str, Any]]):
@@ -220,20 +205,18 @@ class UnrenderedConfigGenerator(BaseContextConfigGenerator[Dict[str, Any]]):
def calculate_node_config_dict(
self,
config_call_dict: Dict[str, Any],
config_calls: List[Dict[str, Any]],
fqn: List[str],
resource_type: NodeType,
project_name: str,
base: bool,
patch_config_dict: dict = None
) -> Dict[str, Any]:
return self.calculate_node_config(
config_call_dict=config_call_dict,
config_calls=config_calls,
fqn=fqn,
resource_type=resource_type,
project_name=project_name,
base=base,
patch_config_dict=patch_config_dict
)
def initial_result(
@@ -264,39 +247,20 @@ class ContextConfig:
resource_type: NodeType,
project_name: str,
) -> None:
self._config_call_dict: Dict[str, Any] = {}
self._config_calls: List[Dict[str, Any]] = []
self._active_project = active_project
self._fqn = fqn
self._resource_type = resource_type
self._project_name = project_name
def add_config_call(self, opts: Dict[str, Any]) -> None:
dct = self._config_call_dict
self._add_config_call(dct, opts)
@classmethod
def _add_config_call(cls, config_call_dict, opts: Dict[str, Any]) -> None:
for k, v in opts.items():
# MergeBehavior for post-hook and pre-hook is to collect all
# values, instead of overwriting
if k in BaseConfig.mergebehavior['append']:
if not isinstance(v, list):
v = [v]
if k in BaseConfig.mergebehavior['update'] and not isinstance(v, dict):
raise InternalException(f'expected dict, got {v}')
if k in config_call_dict and isinstance(config_call_dict[k], list):
config_call_dict[k].extend(v)
elif k in config_call_dict and isinstance(config_call_dict[k], dict):
config_call_dict[k].update(v)
else:
config_call_dict[k] = v
def update_in_model_config(self, opts: Dict[str, Any]) -> None:
self._config_calls.append(opts)
def build_config_dict(
self,
base: bool = False,
*,
rendered: bool = True,
patch_config_dict: dict = None
) -> Dict[str, Any]:
if rendered:
src = ContextConfigGenerator(self._active_project)
@@ -304,10 +268,9 @@ class ContextConfig:
src = UnrenderedConfigGenerator(self._active_project)
return src.calculate_node_config_dict(
config_call_dict=self._config_call_dict,
config_calls=self._config_calls,
fqn=self._fqn,
resource_type=self._resource_type,
project_name=self._project_name,
base=base,
patch_config_dict=patch_config_dict
)

View File

@@ -57,19 +57,14 @@ class DocsRuntimeContext(SchemaYamlContext):
else:
doc_invalid_args(self.node, args)
# ParsedDocumentation
target_doc = self.manifest.resolve_doc(
doc_name,
doc_package_name,
self._project_name,
self.node.package_name,
)
if target_doc:
file_id = target_doc.file_id
if file_id in self.manifest.files:
source_file = self.manifest.files[file_id]
source_file.add_node(self.node.unique_id)
else:
if target_doc is None:
doc_target_not_found(self.node, doc_name, doc_package_name)
return target_doc.block_contents
@@ -82,5 +77,4 @@ def generate_runtime_docs(
current_project: str,
) -> Dict[str, Any]:
ctx = DocsRuntimeContext(config, target, manifest, current_project)
# This is not a Mashumaro to_dict call
return ctx.to_dict()

View File

@@ -1,199 +0,0 @@
from typing import (
Dict, MutableMapping, Optional
)
from dbt.contracts.graph.parsed import ParsedMacro
from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
from dbt.clients.jinja import MacroGenerator
MacroNamespace = Dict[str, ParsedMacro]
# This class builds the MacroResolver by adding macros
# to various categories for finding macros in the right order,
# so that higher precedence macros are found first.
# This functionality is also provided by the MacroNamespace,
# but the intention is to eventually replace that class.
# This enables us to get the macro unique_id without
# processing every macro in the project.
# Note: the root project macros override everything in the
# dbt internal projects. External projects (dependencies) will
# use their own macros first, then pull from the root project
# followed by dbt internal projects.
class MacroResolver:
def __init__(
self,
macros: MutableMapping[str, ParsedMacro],
root_project_name: str,
internal_package_names,
) -> None:
self.root_project_name = root_project_name
self.macros = macros
# internal packages comes from get_adapter_package_names
self.internal_package_names = internal_package_names
# To be filled in from macros.
self.internal_packages: Dict[str, MacroNamespace] = {}
self.packages: Dict[str, MacroNamespace] = {}
self.root_package_macros: MacroNamespace = {}
# add the macros to internal_packages, packages, and root packages
self.add_macros()
self._build_internal_packages_namespace()
self._build_macros_by_name()
def _build_internal_packages_namespace(self):
# Iterate in reverse-order and overwrite: the packages that are first
# in the list are the ones we want to "win".
self.internal_packages_namespace: MacroNamespace = {}
for pkg in reversed(self.internal_package_names):
if pkg in self.internal_packages:
# Turn the internal packages into a flat namespace
self.internal_packages_namespace.update(
self.internal_packages[pkg])
# search order:
# local_namespace (package of particular node), not including
# the internal packages or the root package
# This means that within an extra package, it uses its own macros
# root package namespace
# non-internal packages (that aren't local or root)
# dbt internal packages
def _build_macros_by_name(self):
macros_by_name = {}
# all internal packages (already in the right order)
for macro in self.internal_packages_namespace.values():
macros_by_name[macro.name] = macro
# non-internal packages
for fnamespace in self.packages.values():
for macro in fnamespace.values():
macros_by_name[macro.name] = macro
# root package macros
for macro in self.root_package_macros.values():
macros_by_name[macro.name] = macro
self.macros_by_name = macros_by_name
def _add_macro_to(
self,
package_namespaces: Dict[str, MacroNamespace],
macro: ParsedMacro,
):
if macro.package_name in package_namespaces:
namespace = package_namespaces[macro.package_name]
else:
namespace = {}
package_namespaces[macro.package_name] = namespace
if macro.name in namespace:
raise_duplicate_macro_name(
macro, macro, macro.package_name
)
package_namespaces[macro.package_name][macro.name] = macro
def add_macro(self, macro: ParsedMacro):
macro_name: str = macro.name
# internal macros (from plugins) will be processed separately from
# project macros, so store them in a different place
if macro.package_name in self.internal_package_names:
self._add_macro_to(self.internal_packages, macro)
else:
# if it's not an internal package
self._add_macro_to(self.packages, macro)
# add to root_package_macros if it's in the root package
if macro.package_name == self.root_project_name:
self.root_package_macros[macro_name] = macro
def add_macros(self):
for macro in self.macros.values():
self.add_macro(macro)
def get_macro(self, local_package, macro_name):
local_package_macros = {}
if (local_package not in self.internal_package_names and
local_package in self.packages):
local_package_macros = self.packages[local_package]
# First: search the local packages for this macro
if macro_name in local_package_macros:
return local_package_macros[macro_name]
# Now look up in the standard search order
if macro_name in self.macros_by_name:
return self.macros_by_name[macro_name]
return None
def get_macro_id(self, local_package, macro_name):
macro = self.get_macro(local_package, macro_name)
if macro is None:
return None
else:
return macro.unique_id
# Currently this is just used by test processing in the schema
# parser (in connection with the MacroResolver). Future work
# will extend the use of these classes to other parsing areas.
# One of the features of this class compared to the MacroNamespace
# is that you can limit the number of macros provided to the
# context dictionary in the 'to_dict' manifest method.
class TestMacroNamespace:
def __init__(
self, macro_resolver, ctx, node, thread_ctx, depends_on_macros
):
self.macro_resolver = macro_resolver
self.ctx = ctx
self.node = node # can be none
self.thread_ctx = thread_ctx
self.local_namespace = {}
self.project_namespace = {}
if depends_on_macros:
dep_macros = []
self.recursively_get_depends_on_macros(depends_on_macros, dep_macros)
for macro_unique_id in dep_macros:
if macro_unique_id in self.macro_resolver.macros:
# Split up the macro unique_id to get the project_name
(_, project_name, macro_name) = macro_unique_id.split('.')
# Save the plain macro_name in the local_namespace
macro = self.macro_resolver.macros[macro_unique_id]
macro_gen = MacroGenerator(
macro, self.ctx, self.node, self.thread_ctx,
)
self.local_namespace[macro_name] = macro_gen
# We also need the two part macro name
if project_name not in self.project_namespace:
self.project_namespace[project_name] = {}
self.project_namespace[project_name][macro_name] = macro_gen
def recursively_get_depends_on_macros(self, depends_on_macros, dep_macros):
for macro_unique_id in depends_on_macros:
if macro_unique_id in dep_macros:
continue
dep_macros.append(macro_unique_id)
if macro_unique_id in self.macro_resolver.macros:
macro = self.macro_resolver.macros[macro_unique_id]
if macro.depends_on.macros:
self.recursively_get_depends_on_macros(macro.depends_on.macros, dep_macros)
def get_from_package(
self, package_name: Optional[str], name: str
) -> Optional[MacroGenerator]:
macro = None
if package_name is None:
macro = self.macro_resolver.macros_by_name.get(name)
elif package_name == GLOBAL_PROJECT_NAME:
macro = self.macro_resolver.internal_packages_namespace.get(name)
elif package_name in self.macro_resolver.packages:
macro = self.macro_resolver.packages[package_name].get(name)
else:
raise_compiler_error(
f"Could not find package '{package_name}'"
)
if not macro:
return None
macro_func = MacroGenerator(
macro, self.ctx, self.node, self.thread_ctx
)
return macro_func

View File

@@ -15,21 +15,13 @@ NamespaceMember = Union[FlatNamespace, MacroGenerator]
FullNamespace = Dict[str, NamespaceMember]
# The point of this class is to collect the various macros
# and provide the ability to flatten them into the ManifestContexts
# that are created for jinja, so that macro calls can be resolved.
# Creates special iterators and _keys methods to flatten the lists.
# When this class is created it has a static 'local_namespace' which
# depends on the package of the node, so it only works for one
# particular local package at a time for "flattening" into a context.
# 'get_by_package' should work for any macro.
class MacroNamespace(Mapping):
def __init__(
self,
global_namespace: FlatNamespace, # root package macros
local_namespace: FlatNamespace, # packages for *this* node
global_project_namespace: FlatNamespace, # internal packages
packages: Dict[str, FlatNamespace], # non-internal packages
global_namespace: FlatNamespace,
local_namespace: FlatNamespace,
global_project_namespace: FlatNamespace,
packages: Dict[str, FlatNamespace],
):
self.global_namespace: FlatNamespace = global_namespace
self.local_namespace: FlatNamespace = local_namespace
@@ -37,24 +29,20 @@ class MacroNamespace(Mapping):
self.global_project_namespace: FlatNamespace = global_project_namespace
def _search_order(self) -> Iterable[Union[FullNamespace, FlatNamespace]]:
yield self.local_namespace # local package
yield self.global_namespace # root package
yield self.packages # non-internal packages
yield self.local_namespace
yield self.global_namespace
yield self.packages
yield {
GLOBAL_PROJECT_NAME: self.global_project_namespace, # dbt
GLOBAL_PROJECT_NAME: self.global_project_namespace,
}
yield self.global_project_namespace # other internal project besides dbt
yield self.global_project_namespace
# provides special keys method for MacroNamespace iterator
# returns keys from local_namespace, global_namespace, packages,
# global_project_namespace
def _keys(self) -> Set[str]:
keys: Set[str] = set()
for search in self._search_order():
keys.update(search)
return keys
# special iterator using special keys
def __iter__(self) -> Iterator[str]:
for key in self._keys():
yield key
@@ -84,10 +72,6 @@ class MacroNamespace(Mapping):
)
# This class builds the MacroNamespace by adding macros to
# internal_packages or packages, and locals/globals.
# Call 'build_namespace' to return a MacroNamespace.
# This is used by ManifestContext (and subclasses)
class MacroNamespaceBuilder:
def __init__(
self,
@@ -99,17 +83,10 @@ class MacroNamespaceBuilder:
) -> None:
self.root_package = root_package
self.search_package = search_package
# internal packages comes from get_adapter_package_names
self.internal_package_names = set(internal_packages)
self.internal_package_names_order = internal_packages
# macro_func is added here if in root package, since
# the root package acts as a "global" namespace, overriding
# everything else except local external package macro calls
self.globals: FlatNamespace = {}
# macro_func is added here if it's the package for this node
self.locals: FlatNamespace = {}
# Create a dictionary of [package name][macro name] =
# MacroGenerator object which acts like a function
self.internal_packages: Dict[str, FlatNamespace] = {}
self.packages: Dict[str, FlatNamespace] = {}
self.thread_ctx = thread_ctx
@@ -117,28 +94,25 @@ class MacroNamespaceBuilder:
def _add_macro_to(
self,
hierarchy: Dict[str, FlatNamespace],
heirarchy: Dict[str, FlatNamespace],
macro: ParsedMacro,
macro_func: MacroGenerator,
):
if macro.package_name in hierarchy:
namespace = hierarchy[macro.package_name]
if macro.package_name in heirarchy:
namespace = heirarchy[macro.package_name]
else:
namespace = {}
hierarchy[macro.package_name] = namespace
heirarchy[macro.package_name] = namespace
if macro.name in namespace:
raise_duplicate_macro_name(
macro_func.macro, macro, macro.package_name
)
hierarchy[macro.package_name][macro.name] = macro_func
heirarchy[macro.package_name][macro.name] = macro_func
def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]):
macro_name: str = macro.name
# MacroGenerator is in clients/jinja.py
# a MacroGenerator object is a callable object that will
# execute the MacroGenerator.__call__ function
macro_func: MacroGenerator = MacroGenerator(
macro, ctx, self.node, self.thread_ctx
)
@@ -148,12 +122,10 @@ class MacroNamespaceBuilder:
if macro.package_name in self.internal_package_names:
self._add_macro_to(self.internal_packages, macro, macro_func)
else:
# if it's not an internal package
self._add_macro_to(self.packages, macro, macro_func)
# add to locals if it's the package this node is in
if macro.package_name == self.search_package:
self.locals[macro_name] = macro_func
# add to globals if it's in the root package
elif macro.package_name == self.root_package:
self.globals[macro_name] = macro_func
@@ -171,12 +143,11 @@ class MacroNamespaceBuilder:
global_project_namespace: FlatNamespace = {}
for pkg in reversed(self.internal_package_names_order):
if pkg in self.internal_packages:
# add the macros pointed to by this package name
global_project_namespace.update(self.internal_packages[pkg])
return MacroNamespace(
global_namespace=self.globals, # root package macros
local_namespace=self.locals, # packages for *this* node
global_project_namespace=global_project_namespace, # internal packages
packages=self.packages, # non internal_packages
global_namespace=self.globals,
local_namespace=self.locals,
global_project_namespace=global_project_namespace,
packages=self.packages,
)

View File

@@ -3,7 +3,6 @@ from typing import List
from dbt.clients.jinja import MacroStack
from dbt.contracts.connection import AdapterRequiredConfig
from dbt.contracts.graph.manifest import Manifest
from dbt.context.macro_resolver import TestMacroNamespace
from .configured import ConfiguredContext
@@ -25,20 +24,12 @@ class ManifestContext(ConfiguredContext):
) -> None:
super().__init__(config)
self.manifest = manifest
# this is the package of the node for which this context was built
self.search_package = search_package
self.macro_stack = MacroStack()
# This namespace is used by the BaseDatabaseWrapper in jinja rendering.
# The namespace is passed to it when it's constructed. It expects
# to be able to do: namespace.get_from_package(..)
self.namespace = self._build_namespace()
def _build_namespace(self):
# this takes all the macros in the manifest and adds them
# to the MacroNamespaceBuilder stored in self.namespace
builder = self._get_namespace_builder()
return builder.build_namespace(
self.manifest.macros.values(), self._ctx
self.namespace = builder.build_namespace(
self.manifest.macros.values(),
self._ctx,
)
def _get_namespace_builder(self) -> MacroNamespaceBuilder:
@@ -55,16 +46,9 @@ class ManifestContext(ConfiguredContext):
None,
)
# This does not use the Mashumaro code
def to_dict(self):
dct = super().to_dict()
# This moves all of the macros in the 'namespace' into top level
# keys in the manifest dictionary
if isinstance(self.namespace, TestMacroNamespace):
dct.update(self.namespace.local_namespace)
dct.update(self.namespace.project_namespace)
else:
dct.update(self.namespace)
dct.update(self.namespace)
return dct

View File

@@ -8,22 +8,17 @@ from typing_extensions import Protocol
from dbt import deprecations
from dbt.adapters.base.column import Column
from dbt.adapters.factory import (
get_adapter, get_adapter_package_names, get_adapter_type_names
)
from dbt.adapters.factory import get_adapter, get_adapter_package_names
from dbt.clients import agate_helper
from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack
from dbt.clients.jinja import get_rendered, MacroGenerator
from dbt.config import RuntimeConfig, Project
from .base import contextmember, contextproperty, Var
from .configured import FQNLookup
from .context_config import ContextConfig
from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace
from .macros import MacroNamespaceBuilder, MacroNamespace
from .manifest import ManifestContext
from dbt.contracts.graph.manifest import Manifest, Disabled
from dbt.contracts.connection import AdapterResponse
from dbt.contracts.graph.manifest import (
Manifest, Disabled
)
from dbt.contracts.graph.compiled import (
CompiledResource,
CompiledSeedNode,
@@ -109,18 +104,14 @@ class BaseDatabaseWrapper:
return self._adapter.commit_if_has_connection()
def _get_adapter_macro_prefixes(self) -> List[str]:
# order matters for dispatch:
# 1. current adapter
# 2. any parent adapters (dependencies)
# 3. 'default'
search_prefixes = get_adapter_type_names(self._adapter.type()) + ['default']
# a future version of this could have plugins automatically call fall
# back to their dependencies' dependencies by using
# `get_adapter_type_names` instead of `[self.config.credentials.type]`
search_prefixes = [self._adapter.type(), 'default']
return search_prefixes
def dispatch(
self,
macro_name: str,
macro_namespace: Optional[str] = None,
packages: Optional[List[str]] = None,
self, macro_name: str, packages: Optional[List[str]] = None
) -> MacroGenerator:
search_packages: List[Optional[str]]
@@ -134,25 +125,15 @@ class BaseDatabaseWrapper:
)
raise CompilationException(msg)
if packages is not None:
deprecations.warn('dispatch-packages', macro_name=macro_name)
namespace = packages if packages else macro_namespace
if namespace is None:
if packages is None:
search_packages = [None]
elif isinstance(namespace, str):
search_packages = self._adapter.config.get_macro_search_order(namespace)
if not search_packages and namespace in self._adapter.config.dependencies:
search_packages = [namespace]
if not search_packages:
raise CompilationException(
f'In adapter.dispatch, got a string packages argument '
f'("{packages}"), but packages should be None or a list.'
)
elif isinstance(packages, str):
raise CompilationException(
f'In adapter.dispatch, got a string packages argument '
f'("{packages}"), but packages should be None or a list.'
)
else:
# Not a string and not None so must be a list
search_packages = namespace
search_packages = packages
attempts = []
@@ -160,7 +141,6 @@ class BaseDatabaseWrapper:
for prefix in self._get_adapter_macro_prefixes():
search_name = f'{prefix}__{macro_name}'
try:
# this uses the namespace from the context
macro = self._namespace.get_from_package(
package_name, search_name
)
@@ -279,7 +259,7 @@ class Config(Protocol):
...
# Implementation of "config(..)" calls in models
# `config` implementations
class ParseConfigObject(Config):
def __init__(self, model, context_config: Optional[ContextConfig]):
self.model = model
@@ -316,7 +296,7 @@ class ParseConfigObject(Config):
raise RuntimeException(
'At parse time, did not receive a context config'
)
self.context_config.add_config_call(opts)
self.context_config.update_in_model_config(opts)
return ''
def set(self, name, value):
@@ -658,13 +638,10 @@ class ProviderContext(ManifestContext):
self.context_config: Optional[ContextConfig] = context_config
self.provider: Provider = provider
self.adapter = get_adapter(self.config)
# The macro namespace is used in creating the DatabaseWrapper
self.db_wrapper = self.provider.DatabaseWrapper(
self.adapter, self.namespace
)
# This overrides the method in ManifestContext, and provides
# a model, which the ManifestContext builder does not
def _get_namespace_builder(self):
internal_packages = get_adapter_package_names(
self.config.credentials.type
@@ -1131,7 +1108,7 @@ class ProviderContext(ManifestContext):
@contextproperty('model')
def ctx_model(self) -> Dict[str, Any]:
return self.model.to_dict(omit_none=True)
return self.model.to_dict()
@contextproperty
def pre_hooks(self) -> Optional[List[Dict[str, Any]]]:
@@ -1195,13 +1172,14 @@ class ProviderContext(ManifestContext):
"""
deprecations.warn('adapter-macro', macro_name=name)
original_name = name
package_name = None
package_names: Optional[List[str]] = None
if '.' in name:
package_name, name = name.split('.', 1)
package_names = [package_name]
try:
macro = self.db_wrapper.dispatch(
macro_name=name, macro_namespace=package_name
macro_name=name, packages=package_names
)
except CompilationException as exc:
raise CompilationException(
@@ -1243,18 +1221,18 @@ class ModelContext(ProviderContext):
@contextproperty
def pre_hooks(self) -> List[Dict[str, Any]]:
if self.model.resource_type in [NodeType.Source, NodeType.Test]:
if isinstance(self.model, ParsedSourceDefinition):
return []
return [
h.to_dict(omit_none=True) for h in self.model.config.pre_hook
h.to_dict() for h in self.model.config.pre_hook
]
@contextproperty
def post_hooks(self) -> List[Dict[str, Any]]:
if self.model.resource_type in [NodeType.Source, NodeType.Test]:
if isinstance(self.model, ParsedSourceDefinition):
return []
return [
h.to_dict(omit_none=True) for h in self.model.config.post_hook
h.to_dict() for h in self.model.config.post_hook
]
@contextproperty
@@ -1311,21 +1289,27 @@ class ModelContext(ProviderContext):
return self.db_wrapper.Relation.create_from(self.config, self.model)
# This is called by '_context_for', used in 'render_with_context'
def generate_parser_model(
model: ManifestNode,
config: RuntimeConfig,
manifest: Manifest,
context_config: ContextConfig,
) -> Dict[str, Any]:
# The __init__ method of ModelContext also initializes
# a ManifestContext object which creates a MacroNamespaceBuilder
# which adds every macro in the Manifest.
ctx = ModelContext(
model, config, manifest, ParseProvider(), context_config
)
# The 'to_dict' method in ManifestContext moves all of the macro names
# in the macro 'namespace' up to top level keys
return ctx.to_dict()
def generate_parser_macro(
macro: ParsedMacro,
config: RuntimeConfig,
manifest: Manifest,
package_name: Optional[str],
) -> Dict[str, Any]:
ctx = MacroContext(
macro, config, manifest, ParseProvider(), package_name
)
return ctx.to_dict()
@@ -1403,68 +1387,3 @@ def generate_parse_exposure(
manifest,
)
}
# This class is currently used by the schema parser in order
# to limit the number of macros in the context by using
# the TestMacroNamespace
class TestContext(ProviderContext):
def __init__(
self,
model,
config: RuntimeConfig,
manifest: Manifest,
provider: Provider,
context_config: Optional[ContextConfig],
macro_resolver: MacroResolver,
) -> None:
# this must be before super init so that macro_resolver exists for
# build_namespace
self.macro_resolver = macro_resolver
self.thread_ctx = MacroStack()
super().__init__(model, config, manifest, provider, context_config)
self._build_test_namespace()
# We need to rebuild this because it's already been built by
# the ProviderContext with the wrong namespace.
self.db_wrapper = self.provider.DatabaseWrapper(
self.adapter, self.namespace
)
def _build_namespace(self):
return {}
# this overrides _build_namespace in ManifestContext which provides a
# complete namespace of all macros to only specify macros in the depends_on
# This only provides a namespace with macros in the test node
# 'depends_on.macros' by using the TestMacroNamespace
def _build_test_namespace(self):
depends_on_macros = []
if self.model.depends_on and self.model.depends_on.macros:
depends_on_macros = self.model.depends_on.macros
lookup_macros = depends_on_macros.copy()
for macro_unique_id in lookup_macros:
lookup_macro = self.macro_resolver.macros.get(macro_unique_id)
if lookup_macro:
depends_on_macros.extend(lookup_macro.depends_on.macros)
macro_namespace = TestMacroNamespace(
self.macro_resolver, self._ctx, self.model, self.thread_ctx,
depends_on_macros
)
self.namespace = macro_namespace
def generate_test_context(
model: ManifestNode,
config: RuntimeConfig,
manifest: Manifest,
context_config: ContextConfig,
macro_resolver: MacroResolver
) -> Dict[str, Any]:
ctx = TestContext(
model, config, manifest, ParseProvider(), context_config,
macro_resolver
)
# The 'to_dict' method in ManifestContext moves all of the macro names
# in the macro 'namespace' up to top level keys
return ctx.to_dict()

View File

@@ -1,31 +1,29 @@
import abc
import itertools
import hashlib
from dataclasses import dataclass, field
from typing import (
Any, ClassVar, Dict, Tuple, Iterable, Optional, List, Callable,
Any, ClassVar, Dict, Tuple, Iterable, Optional, NewType, List, Callable,
)
from typing_extensions import Protocol
from hologram import JsonSchemaMixin
from hologram.helpers import (
StrEnum, register_pattern, ExtensibleJsonSchemaMixin
)
from dbt.contracts.util import Replaceable
from dbt.exceptions import InternalException
from dbt.utils import translate_aliases
from dbt.logger import GLOBAL_LOGGER as logger
from typing_extensions import Protocol
from dbt.dataclass_schema import (
dbtClassMixin, StrEnum, ExtensibleDbtClassMixin, HyphenatedDbtClassMixin,
ValidatedStringMixin, register_pattern
)
from dbt.contracts.util import Replaceable
class Identifier(ValidatedStringMixin):
ValidationRegex = r'^[A-Za-z_][A-Za-z0-9_]+$'
# we need register_pattern for jsonschema validation
Identifier = NewType('Identifier', str)
register_pattern(Identifier, r'^[A-Za-z_][A-Za-z0-9_]+$')
@dataclass
class AdapterResponse(dbtClassMixin):
class AdapterResponse(JsonSchemaMixin):
_message: str
code: Optional[str] = None
rows_affected: Optional[int] = None
@@ -42,19 +40,20 @@ class ConnectionState(StrEnum):
@dataclass(init=False)
class Connection(ExtensibleDbtClassMixin, Replaceable):
class Connection(ExtensibleJsonSchemaMixin, Replaceable):
type: Identifier
name: Optional[str] = None
name: Optional[str]
state: ConnectionState = ConnectionState.INIT
transaction_open: bool = False
# prevent serialization
_handle: Optional[Any] = None
_credentials: Optional[Any] = None
_credentials: JsonSchemaMixin = field(init=False)
def __init__(
self,
type: Identifier,
name: Optional[str],
credentials: dbtClassMixin,
credentials: JsonSchemaMixin,
state: ConnectionState = ConnectionState.INIT,
transaction_open: bool = False,
handle: Optional[Any] = None,
@@ -114,7 +113,7 @@ class LazyHandle:
# will work.
@dataclass # type: ignore
class Credentials(
ExtensibleDbtClassMixin,
ExtensibleJsonSchemaMixin,
Replaceable,
metaclass=abc.ABCMeta
):
@@ -128,21 +127,12 @@ class Credentials(
'type not implemented for base credentials class'
)
@abc.abstractproperty
def unique_field(self) -> str:
raise NotImplementedError(
'type not implemented for base credentials class'
)
def hashed_unique_field(self) -> str:
return hashlib.md5(self.unique_field.encode('utf-8')).hexdigest()
def connection_info(
self, *, with_aliases: bool = False
) -> Iterable[Tuple[str, Any]]:
"""Return an ordered iterator of key/value pairs for pretty-printing.
"""
as_dict = self.to_dict(omit_none=False)
as_dict = self.to_dict(omit_none=False, with_aliases=with_aliases)
connection_keys = set(self._connection_keys())
aliases: List[str] = []
if with_aliases:
@@ -158,10 +148,9 @@ class Credentials(
raise NotImplementedError
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
def from_dict(cls, data):
data = cls.translate_aliases(data)
return data
return super().from_dict(data)
@classmethod
def translate_aliases(
@@ -169,26 +158,31 @@ class Credentials(
) -> Dict[str, Any]:
return translate_aliases(kwargs, cls._ALIASES, recurse)
def __post_serialize__(self, dct):
# no super() -- do we need it?
if self._ALIASES:
dct.update({
new_name: dct[canonical_name]
def to_dict(self, omit_none=True, validate=False, *, with_aliases=False):
serialized = super().to_dict(omit_none=omit_none, validate=validate)
if with_aliases:
serialized.update({
new_name: serialized[canonical_name]
for new_name, canonical_name in self._ALIASES.items()
if canonical_name in dct
if canonical_name in serialized
})
return dct
return serialized
class UserConfigContract(Protocol):
send_anonymous_usage_stats: bool
use_colors: Optional[bool] = None
partial_parse: Optional[bool] = None
printer_width: Optional[int] = None
use_colors: Optional[bool]
partial_parse: Optional[bool]
printer_width: Optional[int]
def set_values(self, cookie_dir: str) -> None:
...
def to_dict(
self, omit_none: bool = True, validate: bool = False
) -> Dict[str, Any]:
...
class HasCredentials(Protocol):
credentials: Credentials
@@ -222,10 +216,9 @@ DEFAULT_QUERY_COMMENT = '''
@dataclass
class QueryComment(HyphenatedDbtClassMixin):
class QueryComment(JsonSchemaMixin):
comment: str = DEFAULT_QUERY_COMMENT
append: bool = False
job_label: bool = False
class AdapterRequiredConfig(HasCredentials, Protocol):

View File

@@ -1,45 +1,21 @@
import hashlib
import os
from dataclasses import dataclass, field
from mashumaro.types import SerializableType
from typing import List, Optional, Union, Dict, Any
from typing import List, Optional, Union
from dbt.dataclass_schema import dbtClassMixin, StrEnum
from hologram import JsonSchemaMixin
from .util import SourceKey
from dbt.exceptions import InternalException
from .util import MacroKey, SourceKey
MAXIMUM_SEED_SIZE = 1 * 1024 * 1024
MAXIMUM_SEED_SIZE_NAME = '1MB'
class ParseFileType(StrEnum):
Macro = 'macro'
Model = 'model'
Snapshot = 'snapshot'
Analysis = 'analysis'
Test = 'test'
Seed = 'seed'
Documentation = 'docs'
Schema = 'schema'
Hook = 'hook' # not a real filetype, from dbt_project.yml
parse_file_type_to_parser = {
ParseFileType.Macro: 'MacroParser',
ParseFileType.Model: 'ModelParser',
ParseFileType.Snapshot: 'SnapshotParser',
ParseFileType.Analysis: 'AnalysisParser',
ParseFileType.Test: 'DataTestParser',
ParseFileType.Seed: 'SeedParser',
ParseFileType.Documentation: 'DocumentationParser',
ParseFileType.Schema: 'SchemaParser',
ParseFileType.Hook: 'HookParser',
}
@dataclass
class FilePath(dbtClassMixin):
class FilePath(JsonSchemaMixin):
searched_path: str
relative_path: str
project_root: str
@@ -75,7 +51,7 @@ class FilePath(dbtClassMixin):
@dataclass
class FileHash(dbtClassMixin):
class FileHash(JsonSchemaMixin):
name: str # the hash type name
checksum: str # the hashlib.hash_type().hexdigest() of the file contents
@@ -115,7 +91,7 @@ class FileHash(dbtClassMixin):
@dataclass
class RemoteFile(dbtClassMixin):
class RemoteFile(JsonSchemaMixin):
@property
def searched_path(self) -> str:
return 'from remote system'
@@ -134,57 +110,48 @@ class RemoteFile(dbtClassMixin):
@dataclass
class BaseSourceFile(dbtClassMixin, SerializableType):
class SourceFile(JsonSchemaMixin):
"""Define a source file in dbt"""
path: Union[FilePath, RemoteFile] # the path information
checksum: FileHash
# Seems like knowing which project the file came from would be useful
project_name: Optional[str] = None
# Parse file type: i.e. which parser will process this file
parse_file_type: Optional[ParseFileType] = None
# we don't want to serialize this
contents: Optional[str] = None
_contents: Optional[str] = None
# the unique IDs contained in this file
nodes: List[str] = field(default_factory=list)
docs: List[str] = field(default_factory=list)
macros: List[str] = field(default_factory=list)
sources: List[str] = field(default_factory=list)
exposures: List[str] = field(default_factory=list)
# any node patches in this file. The entries are names, not unique ids!
patches: List[str] = field(default_factory=list)
# any macro patches in this file. The entries are package, name pairs.
macro_patches: List[MacroKey] = field(default_factory=list)
# any source patches in this file. The entries are package, name pairs
source_patches: List[SourceKey] = field(default_factory=list)
@property
def file_id(self):
def search_key(self) -> Optional[str]:
if isinstance(self.path, RemoteFile):
return None
if self.checksum.name == 'none':
return None
return f'{self.project_name}://{self.path.original_file_path}'
return self.path.search_key
def _serialize(self):
dct = self.to_dict()
return dct
@property
def contents(self) -> str:
if self._contents is None:
raise InternalException('SourceFile has no contents!')
return self._contents
@contents.setter
def contents(self, value):
self._contents = value
@classmethod
def _deserialize(cls, dct: Dict[str, int]):
if dct['parse_file_type'] == 'schema':
sf = SchemaSourceFile.from_dict(dct)
else:
sf = SourceFile.from_dict(dct)
return sf
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
# remove empty lists to save space
dct_keys = list(dct.keys())
for key in dct_keys:
if isinstance(dct[key], list) and not dct[key]:
del dct[key]
# remove contents. Schema files will still have 'dict_from_yaml'
# from the contents
if 'contents' in dct:
del dct['contents']
return dct
@dataclass
class SourceFile(BaseSourceFile):
nodes: List[str] = field(default_factory=list)
docs: List[str] = field(default_factory=list)
macros: List[str] = field(default_factory=list)
def empty(cls, path: FilePath) -> 'SourceFile':
self = cls(path=path, checksum=FileHash.empty())
self.contents = ''
return self
@classmethod
def big_seed(cls, path: FilePath) -> 'SourceFile':
@@ -193,106 +160,8 @@ class SourceFile(BaseSourceFile):
self.contents = ''
return self
def add_node(self, value):
if value not in self.nodes:
self.nodes.append(value)
# TODO: do this a different way. This remote file kludge isn't going
# to work long term
@classmethod
def remote(cls, contents: str, project_name: str) -> 'SourceFile':
self = cls(
path=RemoteFile(),
checksum=FileHash.from_contents(contents),
project_name=project_name,
contents=contents,
)
def remote(cls, contents: str) -> 'SourceFile':
self = cls(path=RemoteFile(), checksum=FileHash.empty())
self.contents = contents
return self
@dataclass
class SchemaSourceFile(BaseSourceFile):
dfy: Dict[str, Any] = field(default_factory=dict)
# these are in the manifest.nodes dictionary
tests: Dict[str, Any] = field(default_factory=dict)
sources: List[str] = field(default_factory=list)
exposures: List[str] = field(default_factory=list)
# node patches contain models, seeds, snapshots, analyses
ndp: List[str] = field(default_factory=list)
# any macro patches in this file by macro unique_id.
mcp: Dict[str, str] = field(default_factory=dict)
# any source patches in this file. The entries are package, name pairs
# Patches are only against external sources. Sources can be
# created too, but those are in 'sources'
sop: List[SourceKey] = field(default_factory=list)
pp_dict: Optional[Dict[str, Any]] = None
pp_test_index: Optional[Dict[str, Any]] = None
@property
def dict_from_yaml(self):
return self.dfy
@property
def node_patches(self):
return self.ndp
@property
def macro_patches(self):
return self.mcp
@property
def source_patches(self):
return self.sop
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
# Remove partial parsing specific data
for key in ('pp_files', 'pp_test_index', 'pp_dict'):
if key in dct:
del dct[key]
return dct
def append_patch(self, yaml_key, unique_id):
self.node_patches.append(unique_id)
def add_test(self, node_unique_id, test_from):
name = test_from['name']
key = test_from['key']
if key not in self.tests:
self.tests[key] = {}
if name not in self.tests[key]:
self.tests[key][name] = []
self.tests[key][name].append(node_unique_id)
def remove_tests(self, yaml_key, name):
if yaml_key in self.tests:
if name in self.tests[yaml_key]:
del self.tests[yaml_key][name]
def get_tests(self, yaml_key, name):
if yaml_key in self.tests:
if name in self.tests[yaml_key]:
return self.tests[yaml_key][name]
return []
def get_key_and_name_for_test(self, test_unique_id):
yaml_key = None
block_name = None
for key in self.tests.keys():
for name in self.tests[key]:
for unique_id in self.tests[key][name]:
if unique_id == test_unique_id:
yaml_key = key
block_name = name
break
return (yaml_key, block_name)
def get_all_test_ids(self):
test_ids = []
for key in self.tests.keys():
for name in self.tests[key]:
test_ids.extend(self.tests[key][name])
return test_ids
AnySourceFile = Union[SchemaSourceFile, SourceFile]

View File

@@ -19,19 +19,19 @@ from dbt.contracts.graph.parsed import (
from dbt.node_types import NodeType
from dbt.contracts.util import Replaceable
from dbt.dataclass_schema import dbtClassMixin
from hologram import JsonSchemaMixin
from dataclasses import dataclass, field
from typing import Optional, List, Union, Dict, Type
@dataclass
class InjectedCTE(dbtClassMixin, Replaceable):
class InjectedCTE(JsonSchemaMixin, Replaceable):
id: str
sql: str
@dataclass
class CompiledNodeMixin(dbtClassMixin):
class CompiledNodeMixin(JsonSchemaMixin):
# this is a special mixin class to provide a required argument. If a node
# is missing a `compiled` flag entirely, it must not be a CompiledNode.
compiled: bool
@@ -43,7 +43,6 @@ class CompiledNode(ParsedNode, CompiledNodeMixin):
extra_ctes_injected: bool = False
extra_ctes: List[InjectedCTE] = field(default_factory=list)
relation_name: Optional[str] = None
_pre_injected_sql: Optional[str] = None
def set_cte(self, cte_id: str, sql: str):
"""This is the equivalent of what self.extra_ctes[cte_id] = sql would
@@ -56,12 +55,6 @@ class CompiledNode(ParsedNode, CompiledNodeMixin):
else:
self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql))
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
if '_pre_injected_sql' in dct:
del dct['_pre_injected_sql']
return dct
@dataclass
class CompiledAnalysisNode(CompiledNode):
@@ -109,9 +102,7 @@ class CompiledSnapshotNode(CompiledNode):
@dataclass
class CompiledDataTestNode(CompiledNode):
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
# Was not able to make mypy happy and keep the code working. We need to
# refactor the various configs.
config: TestConfig = field(default_factory=TestConfig) # type:ignore
config: TestConfig = field(default_factory=TestConfig)
@dataclass
@@ -119,9 +110,16 @@ class CompiledSchemaTestNode(CompiledNode, HasTestMetadata):
# keep this in sync with ParsedSchemaTestNode!
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
column_name: Optional[str] = None
# Was not able to make mypy happy and keep the code working. We need to
# refactor the various configs.
config: TestConfig = field(default_factory=TestConfig) # type:ignore
config: TestConfig = field(default_factory=TestConfig)
def same_config(self, other) -> bool:
return (
self.unrendered_config.get('severity') ==
other.unrendered_config.get('severity')
)
def same_column_name(self, other) -> bool:
return self.column_name == other.column_name
def same_contents(self, other) -> bool:
if other is None:
@@ -180,7 +178,8 @@ def parsed_instance_for(compiled: CompiledNode) -> ParsedResource:
raise ValueError('invalid resource_type: {}'
.format(compiled.resource_type))
return cls.from_dict(compiled.to_dict(omit_none=True))
# validate=False to allow extra keys from compiling
return cls.from_dict(compiled.to_dict(), validate=False)
NonSourceCompiledNode = Union[

File diff suppressed because it is too large Load Diff

View File

@@ -2,13 +2,21 @@ from dataclasses import field, Field, dataclass
from enum import Enum
from itertools import chain
from typing import (
Any, List, Optional, Dict, Union, Type, TypeVar, Callable
)
from dbt.dataclass_schema import (
dbtClassMixin, ValidationError, register_pattern,
Any, List, Optional, Dict, MutableMapping, Union, Type, NewType, Tuple,
TypeVar, Callable, cast, Hashable
)
# TODO: patch+upgrade hologram to avoid this jsonschema import
import jsonschema # type: ignore
# This is protected, but we really do want to reuse this logic, and the cache!
# It would be nice to move the custom error picking stuff into hologram!
from hologram import _validate_schema
from hologram import JsonSchemaMixin, ValidationError
from hologram.helpers import StrEnum, register_pattern
from dbt.contracts.graph.unparsed import AdditionalPropertiesAllowed
from dbt.exceptions import InternalException, CompilationException
from dbt.exceptions import CompilationException, InternalException
from dbt.contracts.util import Replaceable, list_str
from dbt import hooks
from dbt.node_types import NodeType
@@ -162,15 +170,22 @@ def insensitive_patterns(*patterns: str):
return '^({})$'.format('|'.join(lowercased))
class Severity(str):
pass
Severity = NewType('Severity', str)
register_pattern(Severity, insensitive_patterns('warn', 'error'))
class SnapshotStrategy(StrEnum):
Timestamp = 'timestamp'
Check = 'check'
class All(StrEnum):
All = 'all'
@dataclass
class Hook(dbtClassMixin, Replaceable):
class Hook(JsonSchemaMixin, Replaceable):
sql: str
transaction: bool = True
index: Optional[int] = None
@@ -181,23 +196,19 @@ T = TypeVar('T', bound='BaseConfig')
@dataclass
class BaseConfig(
AdditionalPropertiesAllowed, Replaceable
AdditionalPropertiesAllowed, Replaceable, MutableMapping[str, Any]
):
# enable syntax like: config['key']
# Implement MutableMapping so this config will behave as some macros expect
# during parsing (notably, syntax like `{{ node.config['schema'] }}`)
def __getitem__(self, key):
return self.get(key)
# like doing 'get' on a dictionary
def get(self, key, default=None):
"""Handle parse-time use of `config` as a dictionary, making the extra
values available during parsing.
"""
if hasattr(self, key):
return getattr(self, key)
elif key in self._extra:
return self._extra[key]
else:
return default
return self._extra[key]
# enable syntax like: config['key'] = value
def __setitem__(self, key, value):
if hasattr(self, key):
setattr(self, key, value)
@@ -267,15 +278,8 @@ class BaseConfig(
return False
return True
# This is used in 'add_config_call' to created the combined config_call_dict.
# 'meta' moved here from node
mergebehavior = {
"append": ['pre-hook', 'pre_hook', 'post-hook', 'post_hook', 'tags'],
"update": ['quoting', 'column_types', 'meta'],
}
@classmethod
def _merge_dicts(
def _extract_dict(
cls, src: Dict[str, Any], data: Dict[str, Any]
) -> Dict[str, Any]:
"""Find all the items in data that match a target_field on this class,
@@ -309,6 +313,29 @@ class BaseConfig(
)
return result
def to_dict(
self,
omit_none: bool = True,
validate: bool = False,
*,
omit_hidden: bool = True,
) -> Dict[str, Any]:
result = super().to_dict(omit_none=omit_none, validate=validate)
if omit_hidden and not omit_none:
for fld, target_field in self._get_fields():
if target_field not in result:
continue
# if the field is not None, preserve it regardless of the
# setting. This is in line with existing behavior, but isn't
# an endorsement of it!
if result[target_field] is not None:
continue
if not ShowBehavior.should_show(fld):
del result[target_field]
return result
def update_from(
self: T, data: Dict[str, Any], adapter_type: str, validate: bool = True
) -> T:
@@ -317,37 +344,35 @@ class BaseConfig(
"""
# sadly, this is a circular import
from dbt.adapters.factory import get_config_class_by_name
dct = self.to_dict(omit_none=False)
dct = self.to_dict(omit_none=False, validate=False, omit_hidden=False)
adapter_config_cls = get_config_class_by_name(adapter_type)
self_merged = self._merge_dicts(dct, data)
self_merged = self._extract_dict(dct, data)
dct.update(self_merged)
adapter_merged = adapter_config_cls._merge_dicts(dct, data)
adapter_merged = adapter_config_cls._extract_dict(dct, data)
dct.update(adapter_merged)
# any remaining fields must be "clobber"
dct.update(data)
# any validation failures must have come from the update
if validate:
self.validate(dct)
return self.from_dict(dct)
return self.from_dict(dct, validate=validate)
def finalize_and_validate(self: T) -> T:
dct = self.to_dict(omit_none=False)
self.validate(dct)
# from_dict will validate for us
dct = self.to_dict(omit_none=False, validate=False)
return self.from_dict(dct)
def replace(self, **kwargs):
dct = self.to_dict(omit_none=True)
dct = self.to_dict(validate=False)
mapping = self.field_mapping()
for key, value in kwargs.items():
new_key = mapping.get(key, key)
dct[new_key] = value
return self.from_dict(dct)
return self.from_dict(dct, validate=False)
@dataclass
@@ -356,8 +381,33 @@ class SourceConfig(BaseConfig):
@dataclass
class NodeAndTestConfig(BaseConfig):
class NodeConfig(BaseConfig):
enabled: bool = True
materialized: str = 'view'
persist_docs: Dict[str, Any] = field(default_factory=dict)
post_hook: List[Hook] = field(
default_factory=list,
metadata=MergeBehavior.Append.meta(),
)
pre_hook: List[Hook] = field(
default_factory=list,
metadata=MergeBehavior.Append.meta(),
)
# this only applies for config v1, so it doesn't participate in comparison
vars: Dict[str, Any] = field(
default_factory=dict,
metadata=metas(CompareBehavior.Exclude, MergeBehavior.Update),
)
quoting: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
# This is actually only used by seeds. Should it be available to others?
# That would be a breaking change!
column_types: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
# these fields are included in serialized output, but are not part of
# config comparison (they are part of database_representation)
alias: Optional[str] = field(
@@ -378,67 +428,15 @@ class NodeAndTestConfig(BaseConfig):
MergeBehavior.Append,
CompareBehavior.Exclude),
)
meta: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
@dataclass
class NodeConfig(NodeAndTestConfig):
# Note: if any new fields are added with MergeBehavior, also update the
# 'mergebehavior' dictionary
materialized: str = 'view'
persist_docs: Dict[str, Any] = field(default_factory=dict)
post_hook: List[Hook] = field(
default_factory=list,
metadata=MergeBehavior.Append.meta(),
)
pre_hook: List[Hook] = field(
default_factory=list,
metadata=MergeBehavior.Append.meta(),
)
quoting: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
# This is actually only used by seeds. Should it be available to others?
# That would be a breaking change!
column_types: Dict[str, Any] = field(
default_factory=dict,
metadata=MergeBehavior.Update.meta(),
)
full_refresh: Optional[bool] = None
on_schema_change: Optional[str] = 'ignore'
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
field_map = {'post-hook': 'post_hook', 'pre-hook': 'pre_hook'}
# create a new dict because otherwise it gets overwritten in
# tests
new_dict = {}
for key in data:
new_dict[key] = data[key]
data = new_dict
def from_dict(cls, data, validate=True):
for key in hooks.ModelHookType:
if key in data:
data[key] = [hooks.get_hook_dict(h) for h in data[key]]
for field_name in field_map:
if field_name in data:
new_name = field_map[field_name]
data[new_name] = data.pop(field_name)
return data
return super().from_dict(data, validate=validate)
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
field_map = {'post_hook': 'post-hook', 'pre_hook': 'pre-hook'}
for field_name in field_map:
if field_name in dct:
dct[field_map[field_name]] = dct.pop(field_name)
return dct
# this is still used by jsonschema validation
@classmethod
def field_mapping(cls):
return {'post_hook': 'post-hook', 'pre_hook': 'pre-hook'}
@@ -451,44 +449,63 @@ class SeedConfig(NodeConfig):
@dataclass
class TestConfig(NodeAndTestConfig):
# this is repeated because of a different default
schema: Optional[str] = field(
default='dbt_test__audit',
metadata=CompareBehavior.Exclude.meta(),
)
class TestConfig(NodeConfig):
materialized: str = 'test'
severity: Severity = Severity('ERROR')
store_failures: Optional[bool] = None
where: Optional[str] = None
limit: Optional[int] = None
fail_calc: str = 'count(*)'
warn_if: str = '!= 0'
error_if: str = '!= 0'
SnapshotVariants = Union[
'TimestampSnapshotConfig',
'CheckSnapshotConfig',
'GenericSnapshotConfig',
]
def _relevance_without_strategy(error: jsonschema.ValidationError):
# calculate the 'relevance' of an error the normal jsonschema way, except
# if the validator is in the 'strategy' field and its conflicting with the
# 'enum'. This suppresses `"'timestamp' is not one of ['check']` and such
if 'strategy' in error.path and error.validator in {'enum', 'not'}:
length = 1
else:
length = -len(error.path)
validator = error.validator
return length, validator not in {'anyOf', 'oneOf'}
@dataclass
class SnapshotWrapper(JsonSchemaMixin):
"""This is a little wrapper to let us serialize/deserialize the
SnapshotVariants union.
"""
config: SnapshotVariants # mypy: ignore
@classmethod
def same_contents(
cls, unrendered: Dict[str, Any], other: Dict[str, Any]
) -> bool:
"""This is like __eq__, except it explicitly checks certain fields."""
modifiers = [
'severity',
'where',
'limit',
'fail_calc',
'warn_if',
'error_if',
'store_failures'
]
def validate(cls, data: Any):
config = data.get('config', {})
seen = set()
for _, target_name in cls._get_fields():
key = target_name
seen.add(key)
if key in modifiers:
if not cls.compare_key(unrendered, other, key):
return False
return True
if config.get('strategy') == 'check':
schema = _validate_schema(CheckSnapshotConfig)
to_validate = config
elif config.get('strategy') == 'timestamp':
schema = _validate_schema(TimestampSnapshotConfig)
to_validate = config
else:
h_cls = cast(Hashable, cls)
schema = _validate_schema(h_cls)
to_validate = data
validator = jsonschema.Draft7Validator(schema)
error = jsonschema.exceptions.best_match(
validator.iter_errors(to_validate),
key=_relevance_without_strategy,
)
if error is not None:
raise ValidationError.create_from(error) from error
@dataclass
@@ -496,49 +513,123 @@ class EmptySnapshotConfig(NodeConfig):
materialized: str = 'snapshot'
@dataclass
@dataclass(init=False)
class SnapshotConfig(EmptySnapshotConfig):
strategy: Optional[str] = None
unique_key: Optional[str] = None
target_schema: Optional[str] = None
unique_key: str = field(init=False, metadata=dict(init_required=True))
target_schema: str = field(init=False, metadata=dict(init_required=True))
target_database: Optional[str] = None
updated_at: Optional[str] = None
check_cols: Optional[Union[str, List[str]]] = None
def __init__(
self,
unique_key: str,
target_schema: str,
target_database: Optional[str] = None,
**kwargs
) -> None:
self.unique_key = unique_key
self.target_schema = target_schema
self.target_database = target_database
# kwargs['materialized'] = materialized
super().__init__(**kwargs)
# type hacks...
@classmethod
def _get_fields(cls) -> List[Tuple[Field, str]]: # type: ignore
fields: List[Tuple[Field, str]] = []
for old_field, name in super()._get_fields():
new_field = old_field
# tell hologram we're really an initvar
if old_field.metadata and old_field.metadata.get('init_required'):
new_field = field(init=True, metadata=old_field.metadata)
new_field.name = old_field.name
new_field.type = old_field.type
new_field._field_type = old_field._field_type # type: ignore
fields.append((new_field, name))
return fields
def finalize_and_validate(self: 'SnapshotConfig') -> SnapshotVariants:
data = self.to_dict()
return SnapshotWrapper.from_dict({'config': data}).config
@dataclass(init=False)
class GenericSnapshotConfig(SnapshotConfig):
strategy: str = field(init=False, metadata=dict(init_required=True))
def __init__(self, strategy: str, **kwargs) -> None:
self.strategy = strategy
super().__init__(**kwargs)
@classmethod
def validate(cls, data):
super().validate(data)
if not data.get('strategy') or not data.get('unique_key') or not \
data.get('target_schema'):
raise ValidationError(
"Snapshots must be configured with a 'strategy', 'unique_key', "
"and 'target_schema'.")
if data.get('strategy') == 'check':
if not data.get('check_cols'):
raise ValidationError(
"A snapshot configured with the check strategy must "
"specify a check_cols configuration.")
if (isinstance(data['check_cols'], str) and
data['check_cols'] != 'all'):
raise ValidationError(
f"Invalid value for 'check_cols': {data['check_cols']}. "
"Expected 'all' or a list of strings.")
def _collect_json_schema(
cls, definitions: Dict[str, Any]
) -> Dict[str, Any]:
# this is the method you want to override in hologram if you want
# to do clever things about the json schema and have classes that
# contain instances of your JsonSchemaMixin respect the change.
schema = super()._collect_json_schema(definitions)
elif data.get('strategy') == 'timestamp':
if not data.get('updated_at'):
raise ValidationError(
"A snapshot configured with the timestamp strategy "
"must specify an updated_at configuration.")
if data.get('check_cols'):
raise ValidationError(
"A 'timestamp' snapshot should not have 'check_cols'")
# If the strategy is not 'check' or 'timestamp' it's a custom strategy,
# formerly supported with GenericSnapshotConfig
# Instead of just the strategy we'd calculate normally, say
# "this strategy except none of our specialization strategies".
strategies = [schema['properties']['strategy']]
for specialization in (TimestampSnapshotConfig, CheckSnapshotConfig):
strategies.append(
{'not': specialization.json_schema()['properties']['strategy']}
)
def finalize_and_validate(self):
data = self.to_dict(omit_none=True)
self.validate(data)
return self.from_dict(data)
schema['properties']['strategy'] = {
'allOf': strategies
}
return schema
@dataclass(init=False)
class TimestampSnapshotConfig(SnapshotConfig):
strategy: str = field(
init=False,
metadata=dict(
restrict=[str(SnapshotStrategy.Timestamp)],
init_required=True,
),
)
updated_at: str = field(init=False, metadata=dict(init_required=True))
def __init__(
self, strategy: str, updated_at: str, **kwargs
) -> None:
self.strategy = strategy
self.updated_at = updated_at
super().__init__(**kwargs)
@dataclass(init=False)
class CheckSnapshotConfig(SnapshotConfig):
strategy: str = field(
init=False,
metadata=dict(
restrict=[str(SnapshotStrategy.Check)],
init_required=True,
),
)
# TODO: is there a way to get this to accept tuples of strings? Adding
# `Tuple[str, ...]` to the list of types results in this:
# ['email'] is valid under each of {'type': 'array', 'items':
# {'type': 'string'}}, {'type': 'array', 'items': {'type': 'string'}}
# but without it, parsing gets upset about values like `('email',)`
# maybe hologram itself should support this behavior? It's not like tuples
# are meaningful in json
check_cols: Union[All, List[str]] = field(
init=False,
metadata=dict(init_required=True),
)
def __init__(
self, strategy: str, check_cols: Union[All, List[str]],
**kwargs
) -> None:
self.strategy = strategy
self.check_cols = check_cols
super().__init__(**kwargs)
RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {

View File

@@ -1,7 +1,5 @@
import os
import time
from dataclasses import dataclass, field
from mashumaro.types import SerializableType
from pathlib import Path
from typing import (
Optional,
@@ -15,9 +13,8 @@ from typing import (
TypeVar,
)
from dbt.dataclass_schema import (
dbtClassMixin, ExtensibleDbtClassMixin
)
from hologram import JsonSchemaMixin
from hologram.helpers import ExtensibleJsonSchemaMixin
from dbt.clients.system import write_file
from dbt.contracts.files import FileHash, MAXIMUM_SEED_SIZE_NAME
@@ -41,14 +38,20 @@ from .model_config import (
TestConfig,
SourceConfig,
EmptySnapshotConfig,
SnapshotConfig,
SnapshotVariants,
)
# import these 3 so the SnapshotVariants forward ref works.
from .model_config import ( # noqa
TimestampSnapshotConfig,
CheckSnapshotConfig,
GenericSnapshotConfig,
)
@dataclass
class ColumnInfo(
AdditionalPropertiesMixin,
ExtensibleDbtClassMixin,
ExtensibleJsonSchemaMixin,
Replaceable
):
name: str
@@ -61,7 +64,7 @@ class ColumnInfo(
@dataclass
class HasFqn(dbtClassMixin, Replaceable):
class HasFqn(JsonSchemaMixin, Replaceable):
fqn: List[str]
def same_fqn(self, other: 'HasFqn') -> bool:
@@ -69,12 +72,12 @@ class HasFqn(dbtClassMixin, Replaceable):
@dataclass
class HasUniqueID(dbtClassMixin, Replaceable):
class HasUniqueID(JsonSchemaMixin, Replaceable):
unique_id: str
@dataclass
class MacroDependsOn(dbtClassMixin, Replaceable):
class MacroDependsOn(JsonSchemaMixin, Replaceable):
macros: List[str] = field(default_factory=list)
# 'in' on lists is O(n) so this is O(n^2) for # of macros
@@ -93,22 +96,12 @@ class DependsOn(MacroDependsOn):
@dataclass
class HasRelationMetadata(dbtClassMixin, Replaceable):
class HasRelationMetadata(JsonSchemaMixin, Replaceable):
database: Optional[str]
schema: str
# Can't set database to None like it ought to be
# because it messes up the subclasses and default parameters
# so hack it here
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
if 'database' not in data:
data['database'] = None
return data
class ParsedNodeMixins(dbtClassMixin):
class ParsedNodeMixins(JsonSchemaMixin):
resource_type: NodeType
depends_on: DependsOn
config: NodeConfig
@@ -117,21 +110,6 @@ class ParsedNodeMixins(dbtClassMixin):
def is_refable(self):
return self.resource_type in NodeType.refable()
@property
def should_store_failures(self):
return self.resource_type == NodeType.Test and (
self.config.store_failures if self.config.store_failures is not None
else flags.STORE_FAILURES
)
# will this node map to an object in the database?
@property
def is_relational(self):
return (
self.resource_type in NodeType.refable() or
self.should_store_failures
)
@property
def is_ephemeral(self):
return self.config.materialized == 'ephemeral'
@@ -148,25 +126,21 @@ class ParsedNodeMixins(dbtClassMixin):
"""Given a ParsedNodePatch, add the new information to the node."""
# explicitly pick out the parts to update so we don't inadvertently
# step on the model name or anything
# Note: config should already be updated
self.patch_path: Optional[str] = patch.file_id
# update created_at so process_docs will run in partial parsing
self.created_at = int(time.time())
self.patch_path: Optional[str] = patch.original_file_path
self.description = patch.description
self.columns = patch.columns
self.meta = patch.meta
self.docs = patch.docs
if flags.STRICT_MODE:
# It seems odd that an instance can be invalid
# Maybe there should be validation or restrictions
# elsewhere?
assert isinstance(self, dbtClassMixin)
dct = self.to_dict(omit_none=False)
self.validate(dct)
assert isinstance(self, JsonSchemaMixin)
self.to_dict(validate=True, omit_none=False)
def get_materialization(self):
return self.config.materialized
def local_vars(self):
return self.config.vars
@dataclass
class ParsedNodeMandatory(
@@ -196,12 +170,9 @@ class ParsedNodeDefaults(ParsedNodeMandatory):
meta: Dict[str, Any] = field(default_factory=dict)
docs: Docs = field(default_factory=Docs)
patch_path: Optional[str] = None
compiled_path: Optional[str] = None
build_path: Optional[str] = None
deferred: bool = False
unrendered_config: Dict[str, Any] = field(default_factory=dict)
created_at: int = field(default_factory=lambda: int(time.time()))
config_call_dict: Dict[str, Any] = field(default_factory=dict)
def write_node(self, target_path: str, subdirectory: str, payload: str):
if (os.path.basename(self.path) ==
@@ -223,55 +194,12 @@ T = TypeVar('T', bound='ParsedNode')
@dataclass
class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType):
def _serialize(self):
return self.to_dict()
def __post_serialize__(self, dct):
if 'config_call_dict' in dct:
del dct['config_call_dict']
return dct
@classmethod
def _deserialize(cls, dct: Dict[str, int]):
# The serialized ParsedNodes do not differ from each other
# in fields that would allow 'from_dict' to distinguis
# between them.
resource_type = dct['resource_type']
if resource_type == 'model':
return ParsedModelNode.from_dict(dct)
elif resource_type == 'analysis':
return ParsedAnalysisNode.from_dict(dct)
elif resource_type == 'seed':
return ParsedSeedNode.from_dict(dct)
elif resource_type == 'rpc':
return ParsedRPCNode.from_dict(dct)
elif resource_type == 'test':
if 'test_metadata' in dct:
return ParsedSchemaTestNode.from_dict(dct)
else:
return ParsedDataTestNode.from_dict(dct)
elif resource_type == 'operation':
return ParsedHookNode.from_dict(dct)
elif resource_type == 'seed':
return ParsedSeedNode.from_dict(dct)
elif resource_type == 'snapshot':
return ParsedSnapshotNode.from_dict(dct)
else:
return cls.from_dict(dct)
class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins):
def _persist_column_docs(self) -> bool:
if hasattr(self.config, 'persist_docs'):
assert isinstance(self.config, NodeConfig)
return bool(self.config.persist_docs.get('columns'))
return False
return bool(self.config.persist_docs.get('columns'))
def _persist_relation_docs(self) -> bool:
if hasattr(self.config, 'persist_docs'):
assert isinstance(self.config, NodeConfig)
return bool(self.config.persist_docs.get('relation'))
return False
return bool(self.config.persist_docs.get('relation'))
def same_body(self: T, other: T) -> bool:
return self.raw_sql == other.raw_sql
@@ -407,23 +335,21 @@ class ParsedSeedNode(ParsedNode):
@dataclass
class TestMetadata(dbtClassMixin, Replaceable):
class TestMetadata(JsonSchemaMixin, Replaceable):
namespace: Optional[str]
name: str
kwargs: Dict[str, Any] = field(default_factory=dict)
namespace: Optional[str] = None
kwargs: Dict[str, Any]
@dataclass
class HasTestMetadata(dbtClassMixin):
class HasTestMetadata(JsonSchemaMixin):
test_metadata: TestMetadata
@dataclass
class ParsedDataTestNode(ParsedNode):
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
# Was not able to make mypy happy and keep the code working. We need to
# refactor the various configs.
config: TestConfig = field(default_factory=TestConfig) # type: ignore
config: TestConfig = field(default_factory=TestConfig)
@dataclass
@@ -431,9 +357,16 @@ class ParsedSchemaTestNode(ParsedNode, HasTestMetadata):
# keep this in sync with CompiledSchemaTestNode!
resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]})
column_name: Optional[str] = None
# Was not able to make mypy happy and keep the code working. We need to
# refactor the various configs.
config: TestConfig = field(default_factory=TestConfig) # type: ignore
config: TestConfig = field(default_factory=TestConfig)
def same_config(self, other) -> bool:
return (
self.unrendered_config.get('severity') ==
other.unrendered_config.get('severity')
)
def same_column_name(self, other) -> bool:
return self.column_name == other.column_name
def same_contents(self, other) -> bool:
if other is None:
@@ -461,7 +394,7 @@ class IntermediateSnapshotNode(ParsedNode):
@dataclass
class ParsedSnapshotNode(ParsedNode):
resource_type: NodeType = field(metadata={'restrict': [NodeType.Snapshot]})
config: SnapshotConfig
config: SnapshotVariants
@dataclass
@@ -470,7 +403,6 @@ class ParsedPatch(HasYamlMetadata, Replaceable):
description: str
meta: Dict[str, Any]
docs: Docs
config: Dict[str, Any]
# The parsed node update is only the 'patch', not the test. The test became a
@@ -500,20 +432,19 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID):
docs: Docs = field(default_factory=Docs)
patch_path: Optional[str] = None
arguments: List[MacroArgument] = field(default_factory=list)
created_at: int = field(default_factory=lambda: int(time.time()))
def local_vars(self):
return {}
def patch(self, patch: ParsedMacroPatch):
self.patch_path: Optional[str] = patch.file_id
self.patch_path: Optional[str] = patch.original_file_path
self.description = patch.description
self.created_at = int(time.time())
self.meta = patch.meta
self.docs = patch.docs
self.arguments = patch.arguments
if flags.STRICT_MODE:
# What does this actually validate?
assert isinstance(self, dbtClassMixin)
dct = self.to_dict(omit_none=False)
self.validate(dct)
assert isinstance(self, JsonSchemaMixin)
self.to_dict(validate=True, omit_none=False)
def same_contents(self, other: Optional['ParsedMacro']) -> bool:
if other is None:
@@ -604,8 +535,7 @@ class ParsedSourceDefinition(
UnparsedBaseNode,
HasUniqueID,
HasRelationMetadata,
HasFqn,
HasFqn
):
name: str
source_name: str
@@ -626,7 +556,6 @@ class ParsedSourceDefinition(
patch_path: Optional[Path] = None
unrendered_config: Dict[str, Any] = field(default_factory=dict)
relation_name: Optional[str] = None
created_at: int = field(default_factory=lambda: int(time.time()))
def same_database_representation(
self, other: 'ParsedSourceDefinition'
@@ -702,10 +631,6 @@ class ParsedSourceDefinition(
def depends_on_nodes(self):
return []
@property
def depends_on(self):
return DependsOn(macros=[], nodes=[])
@property
def refs(self):
return []
@@ -731,13 +656,10 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
resource_type: NodeType = NodeType.Exposure
description: str = ''
maturity: Optional[MaturityType] = None
meta: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list)
url: Optional[str] = None
depends_on: DependsOn = field(default_factory=DependsOn)
refs: List[List[str]] = field(default_factory=list)
sources: List[List[str]] = field(default_factory=list)
created_at: int = field(default_factory=lambda: int(time.time()))
@property
def depends_on_nodes(self):
@@ -747,6 +669,11 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
def search_name(self):
return self.name
# no tags for now, but we could definitely add them
@property
def tags(self):
return []
def same_depends_on(self, old: 'ParsedExposure') -> bool:
return set(self.depends_on.nodes) == set(old.depends_on.nodes)
@@ -767,7 +694,6 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
def same_contents(self, old: Optional['ParsedExposure']) -> bool:
# existing when it didn't before is a change!
# metadata/tags changes are not "changes"
if old is None:
return True
@@ -783,18 +709,6 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn):
)
ManifestNodes = Union[
ParsedAnalysisNode,
ParsedDataTestNode,
ParsedHookNode,
ParsedModelNode,
ParsedRPCNode,
ParsedSchemaTestNode,
ParsedSeedNode,
ParsedSnapshotNode,
]
ParsedResource = Union[
ParsedDocumentation,
ParsedMacro,

View File

@@ -8,9 +8,8 @@ from dbt.contracts.util import (
import dbt.helper_types # noqa:F401
from dbt.exceptions import CompilationException
from dbt.dataclass_schema import (
dbtClassMixin, StrEnum, ExtensibleDbtClassMixin
)
from hologram import JsonSchemaMixin
from hologram.helpers import StrEnum, ExtensibleJsonSchemaMixin
from dataclasses import dataclass, field
from datetime import timedelta
@@ -19,16 +18,12 @@ from typing import Optional, List, Union, Dict, Any, Sequence
@dataclass
class UnparsedBaseNode(dbtClassMixin, Replaceable):
class UnparsedBaseNode(JsonSchemaMixin, Replaceable):
package_name: str
root_path: str
path: str
original_file_path: str
@property
def file_id(self):
return f'{self.package_name}://{self.original_file_path}'
@dataclass
class HasSQL:
@@ -71,12 +66,12 @@ class UnparsedRunHook(UnparsedNode):
@dataclass
class Docs(dbtClassMixin, Replaceable):
class Docs(JsonSchemaMixin, Replaceable):
show: bool = True
@dataclass
class HasDocs(AdditionalPropertiesMixin, ExtensibleDbtClassMixin,
class HasDocs(AdditionalPropertiesMixin, ExtensibleJsonSchemaMixin,
Replaceable):
name: str
description: str = ''
@@ -105,7 +100,7 @@ class UnparsedColumn(HasTests):
@dataclass
class HasColumnDocs(dbtClassMixin, Replaceable):
class HasColumnDocs(JsonSchemaMixin, Replaceable):
columns: Sequence[HasDocs] = field(default_factory=list)
@@ -115,40 +110,31 @@ class HasColumnTests(HasColumnDocs):
@dataclass
class HasYamlMetadata(dbtClassMixin):
class HasYamlMetadata(JsonSchemaMixin):
original_file_path: str
yaml_key: str
package_name: str
@property
def file_id(self):
return f'{self.package_name}://{self.original_file_path}'
@dataclass
class HasConfig():
config: Dict[str, Any] = field(default_factory=dict)
@dataclass
class UnparsedAnalysisUpdate(HasConfig, HasColumnDocs, HasDocs, HasYamlMetadata):
class UnparsedAnalysisUpdate(HasColumnDocs, HasDocs, HasYamlMetadata):
pass
@dataclass
class UnparsedNodeUpdate(HasConfig, HasColumnTests, HasTests, HasYamlMetadata):
class UnparsedNodeUpdate(HasColumnTests, HasTests, HasYamlMetadata):
quote_columns: Optional[bool] = None
@dataclass
class MacroArgument(dbtClassMixin):
class MacroArgument(JsonSchemaMixin):
name: str
type: Optional[str] = None
description: str = ''
@dataclass
class UnparsedMacroUpdate(HasConfig, HasDocs, HasYamlMetadata):
class UnparsedMacroUpdate(HasDocs, HasYamlMetadata):
arguments: List[MacroArgument] = field(default_factory=list)
@@ -162,7 +148,7 @@ class TimePeriod(StrEnum):
@dataclass
class Time(dbtClassMixin, Replaceable):
class Time(JsonSchemaMixin, Replaceable):
count: int
period: TimePeriod
@@ -173,7 +159,7 @@ class Time(dbtClassMixin, Replaceable):
@dataclass
class FreshnessThreshold(dbtClassMixin, Mergeable):
class FreshnessThreshold(JsonSchemaMixin, Mergeable):
warn_after: Optional[Time] = None
error_after: Optional[Time] = None
filter: Optional[str] = None
@@ -194,7 +180,7 @@ class FreshnessThreshold(dbtClassMixin, Mergeable):
@dataclass
class AdditionalPropertiesAllowed(
AdditionalPropertiesMixin,
ExtensibleDbtClassMixin
ExtensibleJsonSchemaMixin
):
_extra: Dict[str, Any] = field(default_factory=dict)
@@ -226,7 +212,7 @@ class ExternalTable(AdditionalPropertiesAllowed, Mergeable):
@dataclass
class Quoting(dbtClassMixin, Mergeable):
class Quoting(JsonSchemaMixin, Mergeable):
database: Optional[bool] = None
schema: Optional[bool] = None
identifier: Optional[bool] = None
@@ -244,15 +230,15 @@ class UnparsedSourceTableDefinition(HasColumnTests, HasTests):
external: Optional[ExternalTable] = None
tags: List[str] = field(default_factory=list)
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
if 'freshness' not in dct and self.freshness is None:
dct['freshness'] = None
return dct
def to_dict(self, omit_none=True, validate=False):
result = super().to_dict(omit_none=omit_none, validate=validate)
if omit_none and self.freshness is None:
result['freshness'] = None
return result
@dataclass
class UnparsedSourceDefinition(dbtClassMixin, Replaceable):
class UnparsedSourceDefinition(JsonSchemaMixin, Replaceable):
name: str
description: str = ''
meta: Dict[str, Any] = field(default_factory=dict)
@@ -266,21 +252,20 @@ class UnparsedSourceDefinition(dbtClassMixin, Replaceable):
loaded_at_field: Optional[str] = None
tables: List[UnparsedSourceTableDefinition] = field(default_factory=list)
tags: List[str] = field(default_factory=list)
config: Dict[str, Any] = field(default_factory=dict)
@property
def yaml_key(self) -> 'str':
return 'sources'
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
if 'freshnewss' not in dct and self.freshness is None:
dct['freshness'] = None
return dct
def to_dict(self, omit_none=True, validate=False):
result = super().to_dict(omit_none=omit_none, validate=validate)
if omit_none and self.freshness is None:
result['freshness'] = None
return result
@dataclass
class SourceTablePatch(dbtClassMixin):
class SourceTablePatch(JsonSchemaMixin):
name: str
description: Optional[str] = None
meta: Optional[Dict[str, Any]] = None
@@ -311,7 +296,7 @@ class SourceTablePatch(dbtClassMixin):
@dataclass
class SourcePatch(dbtClassMixin, Replaceable):
class SourcePatch(JsonSchemaMixin, Replaceable):
name: str = field(
metadata=dict(description='The name of the source to override'),
)
@@ -355,16 +340,12 @@ class SourcePatch(dbtClassMixin, Replaceable):
@dataclass
class UnparsedDocumentation(dbtClassMixin, Replaceable):
class UnparsedDocumentation(JsonSchemaMixin, Replaceable):
package_name: str
root_path: str
path: str
original_file_path: str
@property
def file_id(self):
return f'{self.package_name}://{self.original_file_path}'
@property
def resource_type(self):
return NodeType.Documentation
@@ -419,19 +400,17 @@ class MaturityType(StrEnum):
@dataclass
class ExposureOwner(dbtClassMixin, Replaceable):
class ExposureOwner(JsonSchemaMixin, Replaceable):
email: str
name: Optional[str] = None
@dataclass
class UnparsedExposure(dbtClassMixin, Replaceable):
class UnparsedExposure(JsonSchemaMixin, Replaceable):
name: str
type: ExposureType
owner: ExposureOwner
description: str = ''
maturity: Optional[MaturityType] = None
meta: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list)
url: Optional[str] = None
depends_on: List[str] = field(default_factory=list)

View File

@@ -4,39 +4,25 @@ from dbt.helper_types import NoValue
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from dbt import tracking
from dbt import ui
from dbt.dataclass_schema import (
dbtClassMixin, ValidationError,
HyphenatedDbtClassMixin,
ExtensibleDbtClassMixin,
register_pattern, ValidatedStringMixin
)
from hologram import JsonSchemaMixin, ValidationError
from hologram.helpers import HyphenatedJsonSchemaMixin, register_pattern, \
ExtensibleJsonSchemaMixin
from dataclasses import dataclass, field
from typing import Optional, List, Dict, Union, Any
from mashumaro.types import SerializableType
from typing import Optional, List, Dict, Union, Any, NewType
PIN_PACKAGE_URL = 'https://docs.getdbt.com/docs/package-management#section-specifying-package-versions' # noqa
DEFAULT_SEND_ANONYMOUS_USAGE_STATS = True
class Name(ValidatedStringMixin):
ValidationRegex = r'^[^\d\W]\w*$'
Name = NewType('Name', str)
register_pattern(Name, r'^[^\d\W]\w*$')
class SemverString(str, SerializableType):
def _serialize(self) -> str:
return self
@classmethod
def _deserialize(cls, value: str) -> 'SemverString':
return SemverString(value)
# this does not support the full semver (does not allow a trailing -fooXYZ) and
# is not restrictive enough for full semver, (allows '1.0'). But it's like
# 'semver lite'.
SemverString = NewType('SemverString', str)
register_pattern(
SemverString,
r'^(?:0|[1-9]\d*)\.(?:0|[1-9]\d*)(\.(?:0|[1-9]\d*))?$',
@@ -44,15 +30,15 @@ register_pattern(
@dataclass
class Quoting(dbtClassMixin, Mergeable):
schema: Optional[bool] = None
database: Optional[bool] = None
project: Optional[bool] = None
identifier: Optional[bool] = None
class Quoting(JsonSchemaMixin, Mergeable):
identifier: Optional[bool]
schema: Optional[bool]
database: Optional[bool]
project: Optional[bool]
@dataclass
class Package(Replaceable, HyphenatedDbtClassMixin):
class Package(Replaceable, HyphenatedJsonSchemaMixin):
pass
@@ -68,9 +54,8 @@ RawVersion = Union[str, float]
@dataclass
class GitPackage(Package):
git: str
revision: Optional[RawVersion] = None
revision: Optional[RawVersion]
warn_unpinned: Optional[bool] = None
subdirectory: Optional[str] = None
def get_revisions(self) -> List[str]:
if self.revision is None:
@@ -83,7 +68,6 @@ class GitPackage(Package):
class RegistryPackage(Package):
package: str
version: Union[RawVersion, List[RawVersion]]
install_prerelease: Optional[bool] = False
def get_versions(self) -> List[str]:
if isinstance(self.version, list):
@@ -96,7 +80,7 @@ PackageSpec = Union[LocalPackage, GitPackage, RegistryPackage]
@dataclass
class PackageConfig(dbtClassMixin, Replaceable):
class PackageConfig(JsonSchemaMixin, Replaceable):
packages: List[PackageSpec]
@@ -112,13 +96,13 @@ class ProjectPackageMetadata:
@dataclass
class Downloads(ExtensibleDbtClassMixin, Replaceable):
class Downloads(ExtensibleJsonSchemaMixin, Replaceable):
tarball: str
@dataclass
class RegistryPackageMetadata(
ExtensibleDbtClassMixin,
ExtensibleJsonSchemaMixin,
ProjectPackageMetadata,
):
downloads: Downloads
@@ -170,7 +154,7 @@ BANNED_PROJECT_NAMES = {
@dataclass
class Project(HyphenatedDbtClassMixin, Replaceable):
class Project(HyphenatedJsonSchemaMixin, Replaceable):
name: Name
version: Union[SemverString, float]
config_version: int
@@ -192,13 +176,11 @@ class Project(HyphenatedDbtClassMixin, Replaceable):
on_run_start: Optional[List[str]] = field(default_factory=list_str)
on_run_end: Optional[List[str]] = field(default_factory=list_str)
require_dbt_version: Optional[Union[List[str], str]] = None
dispatch: List[Dict[str, Any]] = field(default_factory=list)
models: Dict[str, Any] = field(default_factory=dict)
seeds: Dict[str, Any] = field(default_factory=dict)
snapshots: Dict[str, Any] = field(default_factory=dict)
analyses: Dict[str, Any] = field(default_factory=dict)
sources: Dict[str, Any] = field(default_factory=dict)
tests: Dict[str, Any] = field(default_factory=dict)
vars: Optional[Dict[str, Any]] = field(
default=None,
metadata=dict(
@@ -209,23 +191,18 @@ class Project(HyphenatedDbtClassMixin, Replaceable):
query_comment: Optional[Union[QueryComment, NoValue, str]] = NoValue()
@classmethod
def validate(cls, data):
super().validate(data)
if data['name'] in BANNED_PROJECT_NAMES:
def from_dict(cls, data, validate=True) -> 'Project':
result = super().from_dict(data, validate=validate)
if result.name in BANNED_PROJECT_NAMES:
raise ValidationError(
f"Invalid project name: {data['name']} is a reserved word"
f'Invalid project name: {result.name} is a reserved word'
)
# validate dispatch config
if 'dispatch' in data and data['dispatch']:
entries = data['dispatch']
for entry in entries:
if ('macro_namespace' not in entry or 'search_order' not in entry or
not isinstance(entry['search_order'], list)):
raise ValidationError(f"Invalid project dispatch config: {entry}")
return result
@dataclass
class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
class UserConfig(ExtensibleJsonSchemaMixin, Replaceable, UserConfigContract):
send_anonymous_usage_stats: bool = DEFAULT_SEND_ANONYMOUS_USAGE_STATS
use_colors: Optional[bool] = None
partial_parse: Optional[bool] = None
@@ -245,7 +222,7 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract):
@dataclass
class ProfileConfig(HyphenatedDbtClassMixin, Replaceable):
class ProfileConfig(HyphenatedJsonSchemaMixin, Replaceable):
profile_name: str = field(metadata={'preserve_underscore': True})
target_name: str = field(metadata={'preserve_underscore': True})
config: UserConfig
@@ -256,10 +233,10 @@ class ProfileConfig(HyphenatedDbtClassMixin, Replaceable):
@dataclass
class ConfiguredQuoting(Quoting, Replaceable):
identifier: bool = True
schema: bool = True
database: Optional[bool] = None
project: Optional[bool] = None
identifier: bool
schema: bool
database: Optional[bool]
project: Optional[bool]
@dataclass
@@ -272,5 +249,5 @@ class Configuration(Project, ProfileConfig):
@dataclass
class ProjectList(dbtClassMixin):
class ProjectList(JsonSchemaMixin):
projects: Dict[str, Project]

View File

@@ -1,11 +1,12 @@
from collections.abc import Mapping
from dataclasses import dataclass, fields
from typing import (
Optional, Dict,
Optional, TypeVar, Generic, Dict,
)
from typing_extensions import Protocol
from dbt.dataclass_schema import dbtClassMixin, StrEnum
from hologram import JsonSchemaMixin
from hologram.helpers import StrEnum
from dbt import deprecations
from dbt.contracts.util import Replaceable
@@ -31,7 +32,7 @@ class HasQuoting(Protocol):
quoting: Dict[str, bool]
class FakeAPIObject(dbtClassMixin, Replaceable, Mapping):
class FakeAPIObject(JsonSchemaMixin, Replaceable, Mapping):
# override the mapping truthiness, len is always >1
def __bool__(self):
return True
@@ -52,18 +53,21 @@ class FakeAPIObject(dbtClassMixin, Replaceable, Mapping):
return len(fields(self.__class__))
def incorporate(self, **kwargs):
value = self.to_dict(omit_none=True)
value = self.to_dict()
value = deep_merge(value, kwargs)
return self.from_dict(value)
@dataclass
class Policy(FakeAPIObject):
database: bool = True
schema: bool = True
identifier: bool = True
T = TypeVar('T')
def get_part(self, key: ComponentName) -> bool:
@dataclass
class _ComponentObject(FakeAPIObject, Generic[T]):
database: T
schema: T
identifier: T
def get_part(self, key: ComponentName) -> T:
if key == ComponentName.Database:
return self.database
elif key == ComponentName.Schema:
@@ -76,18 +80,25 @@ class Policy(FakeAPIObject):
.format(key, list(ComponentName))
)
def replace_dict(self, dct: Dict[ComponentName, bool]):
kwargs: Dict[str, bool] = {}
def replace_dict(self, dct: Dict[ComponentName, T]):
kwargs: Dict[str, T] = {}
for k, v in dct.items():
kwargs[str(k)] = v
return self.replace(**kwargs)
@dataclass
class Path(FakeAPIObject):
database: Optional[str] = None
schema: Optional[str] = None
identifier: Optional[str] = None
class Policy(_ComponentObject[bool]):
database: bool = True
schema: bool = True
identifier: bool = True
@dataclass
class Path(_ComponentObject[Optional[str]]):
database: Optional[str]
schema: Optional[str]
identifier: Optional[str]
def __post_init__(self):
# handle pesky jinja2.Undefined sneaking in here and messing up rende
@@ -109,22 +120,3 @@ class Path(FakeAPIObject):
if part is not None:
part = part.lower()
return part
def get_part(self, key: ComponentName) -> Optional[str]:
if key == ComponentName.Database:
return self.database
elif key == ComponentName.Schema:
return self.schema
elif key == ComponentName.Identifier:
return self.identifier
else:
raise ValueError(
'Got a key of {}, expected one of {}'
.format(key, list(ComponentName))
)
def replace_dict(self, dct: Dict[ComponentName, str]):
kwargs: Dict[str, str] = {}
for k, v in dct.items():
kwargs[str(k)] = v
return self.replace(**kwargs)

View File

@@ -17,21 +17,20 @@ from dbt.logger import (
GLOBAL_LOGGER as logger,
)
from dbt.utils import lowercase
from dbt.dataclass_schema import dbtClassMixin, StrEnum
from hologram.helpers import StrEnum
from hologram import JsonSchemaMixin
import agate
from dataclasses import dataclass, field
from datetime import datetime
from typing import (
Union, Dict, List, Optional, Any, NamedTuple, Sequence,
)
from typing import Union, Dict, List, Optional, Any, NamedTuple, Sequence
from dbt.clients.system import write_json
@dataclass
class TimingInfo(dbtClassMixin):
class TimingInfo(JsonSchemaMixin):
name: str
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
@@ -78,7 +77,6 @@ class TestStatus(StrEnum):
Error = NodeStatus.Error
Fail = NodeStatus.Fail
Warn = NodeStatus.Warn
Skipped = NodeStatus.Skipped
class FreshnessStatus(StrEnum):
@@ -89,23 +87,13 @@ class FreshnessStatus(StrEnum):
@dataclass
class BaseResult(dbtClassMixin):
class BaseResult(JsonSchemaMixin):
status: Union[RunStatus, TestStatus, FreshnessStatus]
timing: List[TimingInfo]
thread_id: str
execution_time: float
message: Optional[Union[str, int]]
adapter_response: Dict[str, Any]
message: Optional[str]
failures: Optional[int]
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
if 'message' not in data:
data['message'] = None
if 'failures' not in data:
data['failures'] = None
return data
@dataclass
@@ -115,11 +103,7 @@ class NodeResult(BaseResult):
@dataclass
class RunResult(NodeResult):
agate_table: Optional[agate.Table] = field(
default=None, metadata={
'serialize': lambda x: None, 'deserialize': lambda x: None
}
)
agate_table: Optional[agate.Table] = None
@property
def skipped(self):
@@ -127,7 +111,7 @@ class RunResult(NodeResult):
@dataclass
class ExecutionResult(dbtClassMixin):
class ExecutionResult(JsonSchemaMixin):
results: Sequence[BaseResult]
elapsed_time: float
@@ -161,8 +145,7 @@ def process_run_result(result: RunResult) -> RunResultOutput:
thread_id=result.thread_id,
execution_time=result.execution_time,
message=result.message,
adapter_response=result.adapter_response,
failures=result.failures
adapter_response=result.adapter_response
)
@@ -185,7 +168,7 @@ class RunExecutionResult(
@dataclass
@schema_version('run-results', 2)
@schema_version('run-results', 1)
class RunResultsArtifact(ExecutionResult, ArtifactMixin):
results: Sequence[RunResultOutput]
args: Dict[str, Any] = field(default_factory=dict)
@@ -210,8 +193,8 @@ class RunResultsArtifact(ExecutionResult, ArtifactMixin):
args=args
)
def write(self, path: str):
write_json(path, self.to_dict(omit_none=False))
def write(self, path: str, omit_none=False):
write_json(path, self.to_dict(omit_none=omit_none))
@dataclass
@@ -270,14 +253,14 @@ class FreshnessErrorEnum(StrEnum):
@dataclass
class SourceFreshnessRuntimeError(dbtClassMixin):
class SourceFreshnessRuntimeError(JsonSchemaMixin):
unique_id: str
error: Optional[Union[str, int]]
status: FreshnessErrorEnum
@dataclass
class SourceFreshnessOutput(dbtClassMixin):
class SourceFreshnessOutput(JsonSchemaMixin):
unique_id: str
max_loaded_at: datetime
snapshotted_at: datetime
@@ -383,7 +366,6 @@ class FreshnessExecutionResultArtifact(
Primitive = Union[bool, str, float, None]
PrimitiveDict = Dict[str, Primitive]
CatalogKey = NamedTuple(
'CatalogKey',
@@ -392,40 +374,40 @@ CatalogKey = NamedTuple(
@dataclass
class StatsItem(dbtClassMixin):
class StatsItem(JsonSchemaMixin):
id: str
label: str
value: Primitive
description: Optional[str]
include: bool
description: Optional[str] = None
StatsDict = Dict[str, StatsItem]
@dataclass
class ColumnMetadata(dbtClassMixin):
class ColumnMetadata(JsonSchemaMixin):
type: str
comment: Optional[str]
index: int
name: str
comment: Optional[str] = None
ColumnMap = Dict[str, ColumnMetadata]
@dataclass
class TableMetadata(dbtClassMixin):
class TableMetadata(JsonSchemaMixin):
type: str
database: Optional[str]
schema: str
name: str
database: Optional[str] = None
comment: Optional[str] = None
owner: Optional[str] = None
comment: Optional[str]
owner: Optional[str]
@dataclass
class CatalogTable(dbtClassMixin, Replaceable):
class CatalogTable(JsonSchemaMixin, Replaceable):
metadata: TableMetadata
columns: ColumnMap
stats: StatsDict
@@ -448,18 +430,12 @@ class CatalogMetadata(BaseArtifactMetadata):
@dataclass
class CatalogResults(dbtClassMixin):
class CatalogResults(JsonSchemaMixin):
nodes: Dict[str, CatalogTable]
sources: Dict[str, CatalogTable]
errors: Optional[List[str]] = None
errors: Optional[List[str]]
_compile_results: Optional[Any] = None
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
if '_compile_results' in dct:
del dct['_compile_results']
return dct
@dataclass
@schema_version('catalog', 1)

View File

@@ -5,7 +5,8 @@ from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import Optional, Union, List, Any, Dict, Type, Sequence
from dbt.dataclass_schema import dbtClassMixin, StrEnum
from hologram import JsonSchemaMixin
from hologram.helpers import StrEnum
from dbt.contracts.graph.compiled import CompileResultNode
from dbt.contracts.graph.manifest import WritableManifest
@@ -33,53 +34,31 @@ TaskID = uuid.UUID
@dataclass
class RPCParameters(dbtClassMixin):
task_tags: TaskTags
class RPCParameters(JsonSchemaMixin):
timeout: Optional[float]
@classmethod
def __pre_deserialize__(cls, data, omit_none=True):
data = super().__pre_deserialize__(data)
if 'timeout' not in data:
data['timeout'] = None
if 'task_tags' not in data:
data['task_tags'] = None
return data
task_tags: TaskTags
@dataclass
class RPCExecParameters(RPCParameters):
name: str
sql: str
macros: Optional[str] = None
macros: Optional[str]
@dataclass
class RPCCompileParameters(RPCParameters):
threads: Optional[int] = None
models: Union[None, str, List[str]] = None
select: Union[None, str, List[str]] = None
exclude: Union[None, str, List[str]] = None
selector: Optional[str] = None
state: Optional[str] = None
@dataclass
class RPCListParameters(RPCParameters):
resource_types: Optional[List[str]] = None
models: Union[None, str, List[str]] = None
exclude: Union[None, str, List[str]] = None
select: Union[None, str, List[str]] = None
selector: Optional[str] = None
output: Optional[str] = 'json'
output_keys: Optional[List[str]] = None
@dataclass
class RPCRunParameters(RPCParameters):
threads: Optional[int] = None
models: Union[None, str, List[str]] = None
select: Union[None, str, List[str]] = None
exclude: Union[None, str, List[str]] = None
selector: Optional[str] = None
state: Optional[str] = None
@@ -119,17 +98,6 @@ class RPCDocsGenerateParameters(RPCParameters):
state: Optional[str] = None
@dataclass
class RPCBuildParameters(RPCParameters):
threads: Optional[int] = None
models: Union[None, str, List[str]] = None
select: Union[None, str, List[str]] = None
exclude: Union[None, str, List[str]] = None
selector: Optional[str] = None
state: Optional[str] = None
defer: Optional[bool] = None
@dataclass
class RPCCliParameters(RPCParameters):
cli: str
@@ -164,7 +132,7 @@ class StatusParameters(RPCParameters):
@dataclass
class GCSettings(dbtClassMixin):
class GCSettings(JsonSchemaMixin):
# start evicting the longest-ago-ended tasks here
maxsize: int
# start evicting all tasks before now - auto_reap_age when we have this
@@ -200,8 +168,6 @@ class RPCRunOperationParameters(RPCParameters):
class RPCSourceFreshnessParameters(RPCParameters):
threads: Optional[int] = None
select: Union[None, str, List[str]] = None
exclude: Union[None, str, List[str]] = None
selector: Optional[str] = None
@dataclass
@@ -216,13 +182,6 @@ class RemoteResult(VersionedSchema):
logs: List[LogMessage]
@dataclass
@schema_version('remote-list-results', 1)
class RemoteListResults(RemoteResult):
output: List[Any]
generated_at: datetime = field(default_factory=datetime.utcnow)
@dataclass
@schema_version('remote-deps-result', 1)
class RemoteDepsResult(RemoteResult):
@@ -295,7 +254,7 @@ class RemoteExecutionResult(ExecutionResult, RemoteResult):
@dataclass
class ResultTable(dbtClassMixin):
class ResultTable(JsonSchemaMixin):
column_names: List[str]
rows: List[Any]
@@ -452,31 +411,21 @@ class TaskHandlerState(StrEnum):
@dataclass
class TaskTiming(dbtClassMixin):
class TaskTiming(JsonSchemaMixin):
state: TaskHandlerState
start: Optional[datetime]
end: Optional[datetime]
elapsed: Optional[float]
# These ought to be defaults but superclass order doesn't
# allow that to work
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
for field_name in ('start', 'end', 'elapsed'):
if field_name not in data:
data[field_name] = None
return data
@dataclass
class TaskRow(TaskTiming):
task_id: TaskID
request_id: Union[str, int]
request_source: str
method: str
request_id: Union[str, int]
tags: TaskTags = None
timeout: Optional[float] = None
timeout: Optional[float]
tags: TaskTags
@dataclass
@@ -502,7 +451,7 @@ class KillResult(RemoteResult):
@dataclass
@schema_version('remote-manifest-result', 1)
class GetManifestResult(RemoteResult):
manifest: Optional[WritableManifest] = None
manifest: Optional[WritableManifest]
# this is kind of carefuly structured: BlocksManifestTasks is implied by
@@ -526,16 +475,6 @@ class PollResult(RemoteResult, TaskTiming):
end: Optional[datetime]
elapsed: Optional[float]
# These ought to be defaults but superclass order doesn't
# allow that to work
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
for field_name in ('start', 'end', 'elapsed'):
if field_name not in data:
data[field_name] = None
return data
@dataclass
@schema_version('poll-remote-deps-result', 1)

View File

@@ -1,18 +1,18 @@
from dataclasses import dataclass
from dbt.dataclass_schema import dbtClassMixin
from hologram import JsonSchemaMixin
from typing import List, Dict, Any, Union
@dataclass
class SelectorDefinition(dbtClassMixin):
class SelectorDefinition(JsonSchemaMixin):
name: str
definition: Union[str, Dict[str, Any]]
description: str = ''
@dataclass
class SelectorFile(dbtClassMixin):
class SelectorFile(JsonSchemaMixin):
selectors: List[SelectorDefinition]
version: int = 2

View File

@@ -7,13 +7,15 @@ from typing import (
from dbt.clients.system import write_json, read_json
from dbt.exceptions import (
IncompatibleSchemaException,
InternalException,
RuntimeException,
)
from dbt.version import __version__
from dbt.tracking import get_invocation_id
from dbt.dataclass_schema import dbtClassMixin
from hologram import JsonSchemaMixin
MacroKey = Tuple[str, str]
SourceKey = Tuple[str, str]
@@ -55,10 +57,8 @@ class Mergeable(Replaceable):
class Writable:
def write(self, path: str):
write_json(
path, self.to_dict(omit_none=False) # type: ignore
)
def write(self, path: str, omit_none: bool = False):
write_json(path, self.to_dict(omit_none=omit_none)) # type: ignore
class AdditionalPropertiesMixin:
@@ -69,41 +69,22 @@ class AdditionalPropertiesMixin:
"""
ADDITIONAL_PROPERTIES = True
# This takes attributes in the dictionary that are
# not in the class definitions and puts them in an
# _extra dict in the class
@classmethod
def __pre_deserialize__(cls, data):
# dir() did not work because fields with
# metadata settings are not found
# The original version of this would create the
# object first and then update extra with the
# extra keys, but that won't work here, so
# we're copying the dict so we don't insert the
# _extra in the original data. This also requires
# that Mashumaro actually build the '_extra' field
cls_keys = cls._get_field_names()
new_dict = {}
def from_dict(cls, data, validate=True):
self = super().from_dict(data=data, validate=validate)
keys = self.to_dict(validate=False, omit_none=False)
for key, value in data.items():
if key not in cls_keys and key != '_extra':
if '_extra' not in new_dict:
new_dict['_extra'] = {}
new_dict['_extra'][key] = value
else:
new_dict[key] = value
data = new_dict
data = super().__pre_deserialize__(data)
return data
if key not in keys:
self.extra[key] = value
return self
def __post_serialize__(self, dct):
data = super().__post_serialize__(dct)
def to_dict(self, omit_none=True, validate=False):
data = super().to_dict(omit_none=omit_none, validate=validate)
data.update(self.extra)
if '_extra' in data:
del data['_extra']
return data
def replace(self, **kwargs):
dct = self.to_dict(omit_none=False)
dct = self.to_dict(omit_none=False, validate=False)
dct.update(kwargs)
return self.from_dict(dct)
@@ -125,8 +106,7 @@ class Readable:
return cls.from_dict(data) # type: ignore
BASE_SCHEMAS_URL = 'https://schemas.getdbt.com/'
SCHEMA_PATH = 'dbt/{name}/v{version}.json'
BASE_SCHEMAS_URL = 'https://schemas.getdbt.com/dbt/{name}/v{version}.json'
@dataclasses.dataclass
@@ -134,15 +114,11 @@ class SchemaVersion:
name: str
version: int
@property
def path(self) -> str:
return SCHEMA_PATH.format(
name=self.name,
version=self.version
)
def __str__(self) -> str:
return BASE_SCHEMAS_URL + self.path
return BASE_SCHEMAS_URL.format(
name=self.name,
version=self.version,
)
SCHEMA_VERSION_KEY = 'dbt_schema_version'
@@ -159,7 +135,7 @@ def get_metadata_env() -> Dict[str, str]:
@dataclasses.dataclass
class BaseArtifactMetadata(dbtClassMixin):
class BaseArtifactMetadata(JsonSchemaMixin):
dbt_schema_version: str
dbt_version: str = __version__
generated_at: datetime = dataclasses.field(
@@ -182,7 +158,7 @@ def schema_version(name: str, version: int):
@dataclasses.dataclass
class VersionedSchema(dbtClassMixin):
class VersionedSchema(JsonSchemaMixin):
dbt_schema_version: ClassVar[SchemaVersion]
@classmethod
@@ -204,9 +180,18 @@ class ArtifactMixin(VersionedSchema, Writable, Readable):
metadata: BaseArtifactMetadata
@classmethod
def validate(cls, data):
super().validate(data)
def from_dict(
cls: Type[T], data: Dict[str, Any], validate: bool = True
) -> T:
if cls.dbt_schema_version is None:
raise InternalException(
'Cannot call from_dict with no schema version!'
)
if validate:
expected = str(cls.dbt_schema_version)
found = data.get('metadata', {}).get(SCHEMA_VERSION_KEY)
if found != expected:
raise IncompatibleSchemaException(expected, found)
return super().from_dict(data=data, validate=validate)

View File

@@ -1,169 +0,0 @@
from typing import (
Type, ClassVar, cast,
)
import re
from dataclasses import fields
from enum import Enum
from datetime import datetime
from dateutil.parser import parse
from hologram import JsonSchemaMixin, FieldEncoder, ValidationError
# type: ignore
from mashumaro import DataClassDictMixin
from mashumaro.config import (
TO_DICT_ADD_OMIT_NONE_FLAG, BaseConfig as MashBaseConfig
)
from mashumaro.types import SerializableType, SerializationStrategy
class DateTimeSerialization(SerializationStrategy):
def serialize(self, value):
out = value.isoformat()
# Assume UTC if timezone is missing
if value.tzinfo is None:
out = out + "Z"
return out
def deserialize(self, value):
return (
value if isinstance(value, datetime) else parse(cast(str, value))
)
# This class pulls in both JsonSchemaMixin from Hologram and
# DataClassDictMixin from our fork of Mashumaro. The 'to_dict'
# and 'from_dict' methods come from Mashumaro. Building
# jsonschemas for every class and the 'validate' method
# come from Hologram.
class dbtClassMixin(DataClassDictMixin, JsonSchemaMixin):
"""Mixin which adds methods to generate a JSON schema and
convert to and from JSON encodable dicts with validation
against the schema
"""
class Config(MashBaseConfig):
code_generation_options = [
TO_DICT_ADD_OMIT_NONE_FLAG,
]
serialization_strategy = {
datetime: DateTimeSerialization(),
}
_hyphenated: ClassVar[bool] = False
ADDITIONAL_PROPERTIES: ClassVar[bool] = False
# This is called by the mashumaro to_dict in order to handle
# nested classes.
# Munges the dict that's returned.
def __post_serialize__(self, dct):
if self._hyphenated:
new_dict = {}
for key in dct:
if '_' in key:
new_key = key.replace('_', '-')
new_dict[new_key] = dct[key]
else:
new_dict[key] = dct[key]
dct = new_dict
return dct
# This is called by the mashumaro _from_dict method, before
# performing the conversion to a dict
@classmethod
def __pre_deserialize__(cls, data):
# `data` might not be a dict, e.g. for `query_comment`, which accepts
# a dict or a string; only snake-case for dict values.
if cls._hyphenated and isinstance(data, dict):
new_dict = {}
for key in data:
if '-' in key:
new_key = key.replace('-', '_')
new_dict[new_key] = data[key]
else:
new_dict[key] = data[key]
data = new_dict
return data
# This is used in the hologram._encode_field method, which calls
# a 'to_dict' method which does not have the same parameters in
# hologram and in mashumaro.
def _local_to_dict(self, **kwargs):
args = {}
if 'omit_none' in kwargs:
args['omit_none'] = kwargs['omit_none']
return self.to_dict(**args)
class ValidatedStringMixin(str, SerializableType):
ValidationRegex = ''
@classmethod
def _deserialize(cls, value: str) -> 'ValidatedStringMixin':
cls.validate(value)
return ValidatedStringMixin(value)
def _serialize(self) -> str:
return str(self)
@classmethod
def validate(cls, value):
res = re.match(cls.ValidationRegex, value)
if res is None:
raise ValidationError(f"Invalid value: {value}") # TODO
# These classes must be in this order or it doesn't work
class StrEnum(str, SerializableType, Enum):
def __str__(self):
return self.value
# https://docs.python.org/3.6/library/enum.html#using-automatic-values
def _generate_next_value_(name, *_):
return name
def _serialize(self) -> str:
return self.value
@classmethod
def _deserialize(cls, value: str):
return cls(value)
class HyphenatedDbtClassMixin(dbtClassMixin):
# used by from_dict/to_dict
_hyphenated: ClassVar[bool] = True
# used by jsonschema validation, _get_fields
@classmethod
def field_mapping(cls):
result = {}
for field in fields(cls):
skip = field.metadata.get("preserve_underscore")
if skip:
continue
if "_" in field.name:
result[field.name] = field.name.replace("_", "-")
return result
class ExtensibleDbtClassMixin(dbtClassMixin):
ADDITIONAL_PROPERTIES = True
# This is used by Hologram in jsonschema validation
def register_pattern(base_type: Type, pattern: str) -> None:
"""base_type should be a typing.NewType that should always have the given
regex pattern. That means that its underlying type ('__supertype__') had
better be a str!
"""
class PatternEncoder(FieldEncoder):
@property
def json_schema(self):
return {"type": "string", "pattern": pattern}
dbtClassMixin.register_field_encoders({base_type: PatternEncoder()})

View File

@@ -43,20 +43,6 @@ class DBTDeprecation:
active_deprecations.add(self.name)
class DispatchPackagesDeprecation(DBTDeprecation):
_name = 'dispatch-packages'
_description = '''\
The "packages" argument of adapter.dispatch() has been deprecated.
Use the "macro_namespace" argument instead.
Raised during dispatch for: {macro_name}
For more information, see:
https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch
'''
class MaterializationReturnDeprecation(DBTDeprecation):
_name = 'materialization-return'
@@ -131,14 +117,6 @@ class AdapterMacroDeprecation(DBTDeprecation):
'''
class PackageRedirectDeprecation(DBTDeprecation):
_name = 'package-redirect'
_description = '''\
The `{old_name}` package is deprecated in favor of `{new_name}`. Please update
your `packages.yml` configuration to use `{new_name}` instead.
'''
_adapter_renamed_description = """\
The adapter function `adapter.{old_name}` is deprecated and will be removed in
a future release of dbt. Please use `adapter.{new_name}` instead.
@@ -177,14 +155,12 @@ def warn(name, *args, **kwargs):
active_deprecations: Set[str] = set()
deprecations_list: List[DBTDeprecation] = [
DispatchPackagesDeprecation(),
MaterializationReturnDeprecation(),
NotADictionaryDeprecation(),
ColumnQuotingDeprecation(),
ModelsKeyNonModelDeprecation(),
ExecuteMacrosReleaseDeprecation(),
AdapterMacroDeprecation(),
PackageRedirectDeprecation()
]
deprecations: Dict[str, DBTDeprecation] = {

View File

@@ -93,9 +93,6 @@ class PinnedPackage(BasePackage):
dest_dirname = self.get_project_name(project, renderer)
return os.path.join(project.modules_path, dest_dirname)
def get_subdirectory(self):
return None
SomePinned = TypeVar('SomePinned', bound=PinnedPackage)
SomeUnpinned = TypeVar('SomeUnpinned', bound='UnpinnedPackage')

View File

@@ -1,6 +1,6 @@
import os
import hashlib
from typing import List, Optional
from typing import List
from dbt.clients import git, system
from dbt.config import Project
@@ -37,37 +37,18 @@ class GitPackageMixin:
class GitPinnedPackage(GitPackageMixin, PinnedPackage):
def __init__(
self,
git: str,
revision: str,
warn_unpinned: bool = True,
subdirectory: Optional[str] = None,
self, git: str, revision: str, warn_unpinned: bool = True
) -> None:
super().__init__(git)
self.revision = revision
self.warn_unpinned = warn_unpinned
self.subdirectory = subdirectory
self._checkout_name = md5sum(self.git)
def get_version(self):
return self.revision
def get_subdirectory(self):
return self.subdirectory
def nice_version_name(self):
if self.revision == 'HEAD':
return 'HEAD (default revision)'
else:
return 'revision {}'.format(self.revision)
def unpinned_msg(self):
if self.revision == 'HEAD':
return 'not pinned, using HEAD (default branch)'
elif self.revision in ('main', 'master'):
return f'pinned to the "{self.revision}" branch'
else:
return None
return 'revision {}'.format(self.revision)
def _checkout(self):
"""Performs a shallow clone of the repository into the downloads
@@ -76,8 +57,8 @@ class GitPinnedPackage(GitPackageMixin, PinnedPackage):
the path to the checked out directory."""
try:
dir_ = git.clone_and_checkout(
self.git, get_downloads_path(), revision=self.revision,
dirname=self._checkout_name, subdirectory=self.subdirectory
self.git, get_downloads_path(), branch=self.revision,
dirname=self._checkout_name
)
except ExecutableError as exc:
if exc.cmd and exc.cmd[0] == 'git':
@@ -91,12 +72,11 @@ class GitPinnedPackage(GitPackageMixin, PinnedPackage):
def _fetch_metadata(self, project, renderer) -> ProjectPackageMetadata:
path = self._checkout()
if self.unpinned_msg() and self.warn_unpinned:
if self.revision == 'master' and self.warn_unpinned:
warn_or_error(
'The git package "{}" \n\tis {}.\n\tThis can introduce '
'The git package "{}" is not pinned.\n\tThis can introduce '
'breaking changes into your project without warning!\n\nSee {}'
.format(self.git, self.unpinned_msg(), PIN_PACKAGE_URL),
.format(self.git, PIN_PACKAGE_URL),
log_fmt=ui.yellow('WARNING: {}')
)
loaded = Project.from_project_root(path, renderer)
@@ -115,16 +95,11 @@ class GitPinnedPackage(GitPackageMixin, PinnedPackage):
class GitUnpinnedPackage(GitPackageMixin, UnpinnedPackage[GitPinnedPackage]):
def __init__(
self,
git: str,
revisions: List[str],
warn_unpinned: bool = True,
subdirectory: Optional[str] = None,
self, git: str, revisions: List[str], warn_unpinned: bool = True
) -> None:
super().__init__(git)
self.revisions = revisions
self.warn_unpinned = warn_unpinned
self.subdirectory = subdirectory
@classmethod
def from_contract(
@@ -135,7 +110,7 @@ class GitUnpinnedPackage(GitPackageMixin, UnpinnedPackage[GitPinnedPackage]):
# we want to map None -> True
warn_unpinned = contract.warn_unpinned is not False
return cls(git=contract.git, revisions=revisions,
warn_unpinned=warn_unpinned, subdirectory=contract.subdirectory)
warn_unpinned=warn_unpinned)
def all_names(self) -> List[str]:
if self.git.endswith('.git'):
@@ -153,13 +128,12 @@ class GitUnpinnedPackage(GitPackageMixin, UnpinnedPackage[GitPinnedPackage]):
git=self.git,
revisions=self.revisions + other.revisions,
warn_unpinned=warn_unpinned,
subdirectory=self.subdirectory,
)
def resolved(self) -> GitPinnedPackage:
requested = set(self.revisions)
if len(requested) == 0:
requested = {'HEAD'}
requested = {'master'}
elif len(requested) > 1:
raise_dependency_error(
'git dependencies should contain exactly one version. '
@@ -167,5 +141,5 @@ class GitUnpinnedPackage(GitPackageMixin, UnpinnedPackage[GitPinnedPackage]):
return GitPinnedPackage(
git=self.git, revision=requested.pop(),
warn_unpinned=self.warn_unpinned, subdirectory=self.subdirectory
warn_unpinned=self.warn_unpinned
)

View File

@@ -30,13 +30,9 @@ class RegistryPackageMixin:
class RegistryPinnedPackage(RegistryPackageMixin, PinnedPackage):
def __init__(self,
package: str,
version: str,
version_latest: str) -> None:
def __init__(self, package: str, version: str) -> None:
super().__init__(package)
self.version = version
self.version_latest = version_latest
@property
def name(self):
@@ -48,9 +44,6 @@ class RegistryPinnedPackage(RegistryPackageMixin, PinnedPackage):
def get_version(self):
return self.version
def get_version_latest(self):
return self.version_latest
def nice_version_name(self):
return 'version {}'.format(self.version)
@@ -68,7 +61,7 @@ class RegistryPinnedPackage(RegistryPackageMixin, PinnedPackage):
system.make_directory(os.path.dirname(tar_path))
download_url = metadata.downloads.tarball
system.download_with_retries(download_url, tar_path)
system.download(download_url, tar_path)
deps_path = project.modules_path
package_name = self.get_project_name(project, renderer)
system.untar_package(tar_path, deps_path, package_name)
@@ -78,14 +71,10 @@ class RegistryUnpinnedPackage(
RegistryPackageMixin, UnpinnedPackage[RegistryPinnedPackage]
):
def __init__(
self,
package: str,
versions: List[semver.VersionSpecifier],
install_prerelease: bool
self, package: str, versions: List[semver.VersionSpecifier]
) -> None:
super().__init__(package)
self.versions = versions
self.install_prerelease = install_prerelease
def _check_in_index(self):
index = registry.index_cached()
@@ -102,18 +91,13 @@ class RegistryUnpinnedPackage(
semver.VersionSpecifier.from_version_string(v)
for v in raw_version
]
return cls(
package=contract.package,
versions=versions,
install_prerelease=contract.install_prerelease
)
return cls(package=contract.package, versions=versions)
def incorporate(
self, other: 'RegistryUnpinnedPackage'
) -> 'RegistryUnpinnedPackage':
return RegistryUnpinnedPackage(
package=self.package,
install_prerelease=self.install_prerelease,
versions=self.versions + other.versions,
)
@@ -127,18 +111,12 @@ class RegistryUnpinnedPackage(
raise DependencyException(new_msg) from e
available = registry.get_available_versions(self.package)
installable = semver.filter_installable(
available,
self.install_prerelease
)
available_latest = installable[-1]
# for now, pick a version and then recurse. later on,
# we'll probably want to traverse multiple options
# so we can match packages. not going to make a difference
# right now.
target = semver.resolve_to_specific_version(range_, installable)
target = semver.resolve_to_specific_version(range_, available)
if not target:
package_version_not_found(self.package, range_, installable)
return RegistryPinnedPackage(package=self.package, version=target,
version_latest=available_latest)
package_version_not_found(self.package, range_, available)
return RegistryPinnedPackage(package=self.package, version=target)

View File

@@ -7,14 +7,14 @@ from dbt.node_types import NodeType
from dbt import flags
from dbt.ui import line_wrap_message
import dbt.dataclass_schema
import hologram
def validator_error_message(exc):
"""Given a dbt.dataclass_schema.ValidationError (which is basically a
"""Given a hologram.ValidationError (which is basically a
jsonschema.ValidationError), return the relevant parts as a string
"""
if not isinstance(exc, dbt.dataclass_schema.ValidationError):
if not isinstance(exc, hologram.ValidationError):
return str(exc)
path = "[%s]" % "][".join(map(repr, exc.relative_path))
return 'at path {}: {}'.format(path, exc.message)
@@ -289,15 +289,6 @@ class JinjaRenderingException(CompilationException):
pass
class UndefinedMacroException(CompilationException):
def __str__(self, prefix='! ') -> str:
msg = super().__str__(prefix)
return f'{msg}. This can happen when calling a macro that does ' \
'not exist. Check for typos and/or install package dependencies ' \
'with "dbt deps".'
class UnknownAsyncIDException(Exception):
CODE = 10012
MESSAGE = 'RPC server got an unknown async ID'
@@ -710,11 +701,11 @@ def system_error(operation_name):
raise_compiler_error(
"dbt encountered an error when attempting to {}. "
"If this error persists, please create an issue at: \n\n"
"https://github.com/dbt-labs/dbt"
"https://github.com/fishtown-analytics/dbt"
.format(operation_name))
class ConnectionException(Exception):
class RegistryException(Exception):
pass
@@ -854,11 +845,11 @@ def _fix_dupe_msg(path_1: str, path_2: str, name: str, type_name: str) -> str:
)
def raise_duplicate_patch_name(patch_1, existing_patch_path):
def raise_duplicate_patch_name(patch_1, patch_2):
name = patch_1.name
fix = _fix_dupe_msg(
patch_1.original_file_path,
existing_patch_path,
patch_2.original_file_path,
name,
'resource',
)
@@ -869,12 +860,12 @@ def raise_duplicate_patch_name(patch_1, existing_patch_path):
)
def raise_duplicate_macro_patch_name(patch_1, existing_patch_path):
def raise_duplicate_macro_patch_name(patch_1, patch_2):
package_name = patch_1.package_name
name = patch_1.name
fix = _fix_dupe_msg(
patch_1.original_file_path,
existing_patch_path,
patch_2.original_file_path,
name,
'macros'
)

View File

@@ -13,11 +13,9 @@ FULL_REFRESH = None
USE_CACHE = None
WARN_ERROR = None
TEST_NEW_PARSER = None
USE_EXPERIMENTAL_PARSER = None
WRITE_JSON = None
PARTIAL_PARSE = None
USE_COLORS = None
STORE_FAILURES = None
def env_set_truthy(key: str) -> Optional[str]:
@@ -55,26 +53,22 @@ MP_CONTEXT = _get_context()
def reset():
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
STORE_FAILURES
WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS
STRICT_MODE = False
FULL_REFRESH = False
USE_CACHE = True
WARN_ERROR = False
TEST_NEW_PARSER = False
USE_EXPERIMENTAL_PARSER = False
WRITE_JSON = True
PARTIAL_PARSE = False
MP_CONTEXT = _get_context()
USE_COLORS = True
STORE_FAILURES = False
def set_from_args(args):
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
STORE_FAILURES
WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS
USE_CACHE = getattr(args, 'use_cache', USE_CACHE)
@@ -86,7 +80,6 @@ def set_from_args(args):
)
TEST_NEW_PARSER = getattr(args, 'test_new_parser', TEST_NEW_PARSER)
USE_EXPERIMENTAL_PARSER = getattr(args, 'use_experimental_parser', USE_EXPERIMENTAL_PARSER)
WRITE_JSON = getattr(args, 'write_json', WRITE_JSON)
PARTIAL_PARSE = getattr(args, 'partial_parse', None)
MP_CONTEXT = _get_context()
@@ -98,8 +91,6 @@ def set_from_args(args):
if use_colors_override is not None:
USE_COLORS = use_colors_override
STORE_FAILURES = getattr(args, 'store_failures', STORE_FAILURES)
# initialize everything to the defaults on module load
reset()

View File

@@ -1,6 +1,6 @@
# special support for CLI argument parsing.
import itertools
from dbt.clients.yaml_helper import yaml, Loader, Dumper # noqa: F401
import yaml
from typing import (
Dict, List, Optional, Tuple, Any, Union
@@ -26,7 +26,7 @@ SCHEMA_TEST_SELECTOR: str = 'test_type:schema'
def parse_union(
components: List[str], expect_exists: bool, greedy: bool = False
components: List[str], expect_exists: bool
) -> SelectionUnion:
# turn ['a b', 'c'] -> ['a', 'b', 'c']
raw_specs = itertools.chain.from_iterable(
@@ -37,7 +37,7 @@ def parse_union(
# ['a', 'b', 'c,d'] -> union('a', 'b', intersection('c', 'd'))
for raw_spec in raw_specs:
intersection_components: List[SelectionSpec] = [
SelectionCriteria.from_single_spec(part, greedy=greedy)
SelectionCriteria.from_single_spec(part)
for part in raw_spec.split(INTERSECTION_DELIMITER)
]
union_components.append(SelectionIntersection(
@@ -45,6 +45,7 @@ def parse_union(
expect_exists=expect_exists,
raw=raw_spec,
))
return SelectionUnion(
components=union_components,
expect_exists=False,
@@ -53,21 +54,21 @@ def parse_union(
def parse_union_from_default(
raw: Optional[List[str]], default: List[str], greedy: bool = False
raw: Optional[List[str]], default: List[str]
) -> SelectionUnion:
components: List[str]
expect_exists: bool
if raw is None:
return parse_union(components=default, expect_exists=False, greedy=greedy)
return parse_union(components=default, expect_exists=False)
else:
return parse_union(components=raw, expect_exists=True, greedy=greedy)
return parse_union(components=raw, expect_exists=True)
def parse_difference(
include: Optional[List[str]], exclude: Optional[List[str]]
) -> SelectionDifference:
included = parse_union_from_default(include, DEFAULT_INCLUDES)
excluded = parse_union_from_default(exclude, DEFAULT_EXCLUDES, greedy=True)
excluded = parse_union_from_default(exclude, DEFAULT_EXCLUDES)
return SelectionDifference(components=[included, excluded])
@@ -235,7 +236,7 @@ def parse_dict_definition(definition: Dict[str, Any]) -> SelectionSpec:
)
# if key isn't a valid method name, this will raise
base = SelectionCriteria.selection_criteria_from_dict(definition, dct)
base = SelectionCriteria.from_dict(definition, dct)
if diff_arg is None:
return base
else:

View File

@@ -1,8 +1,10 @@
import networkx as nx # type: ignore
import threading
from queue import PriorityQueue
from typing import Dict, Set, List, Generator, Optional
from typing import (
Dict, Set, Optional
)
import networkx as nx # type: ignore
from .graph import UniqueId
from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure
@@ -19,8 +21,9 @@ class GraphQueue:
that separate threads do not call `.empty()` or `__len__()` and `.get()` at
the same time, as there is an unlocked race!
"""
def __init__(self, graph: nx.DiGraph, manifest: Manifest, selected: Set[UniqueId]):
def __init__(
self, graph: nx.DiGraph, manifest: Manifest, selected: Set[UniqueId]
):
self.graph = graph
self.manifest = manifest
self._selected = selected
@@ -34,7 +37,7 @@ class GraphQueue:
# this lock controls most things
self.lock = threading.Lock()
# store the 'score' of each node as a number. Lower is higher priority.
self._scores = self._get_scores(self.graph)
self._scores = self._calculate_scores()
# populate the initial queue
self._find_new_additions()
# awaits after task end
@@ -53,59 +56,30 @@ class GraphQueue:
return False
return True
@staticmethod
def _grouped_topological_sort(
graph: nx.DiGraph,
) -> Generator[List[str], None, None]:
"""Topological sort of given graph that groups ties.
def _calculate_scores(self) -> Dict[UniqueId, int]:
"""Calculate the 'value' of each node in the graph based on how many
blocking descendants it has. We use this score for the internal
priority queue's ordering, so the quality of this metric is important.
Adapted from `nx.topological_sort`, this function returns a topo sort of a graph however
instead of arbitrarily ordering ties in the sort order, ties are grouped together in
lists.
The score is stored as a negative number because the internal
PriorityQueue picks lowest values first.
Args:
graph: The graph to be sorted.
We could do this in one pass over the graph instead of len(self.graph)
passes but this is easy. For large graphs this may hurt performance.
Returns:
A generator that yields lists of nodes, one list per graph depth level.
This operates on the graph, so it would require a lock if called from
outside __init__.
:return Dict[str, int]: The score dict, mapping unique IDs to integer
scores. Lower scores are higher priority.
"""
indegree_map = {v: d for v, d in graph.in_degree() if d > 0}
zero_indegree = [v for v, d in graph.in_degree() if d == 0]
while zero_indegree:
yield zero_indegree
new_zero_indegree = []
for v in zero_indegree:
for _, child in graph.edges(v):
indegree_map[child] -= 1
if not indegree_map[child]:
new_zero_indegree.append(child)
zero_indegree = new_zero_indegree
def _get_scores(self, graph: nx.DiGraph) -> Dict[str, int]:
"""Scoring nodes for processing order.
Scores are calculated by the graph depth level. Lowest score (0) should be processed first.
Args:
graph: The graph to be scored.
Returns:
A dictionary consisting of `node name`:`score` pairs.
"""
# split graph by connected subgraphs
subgraphs = (
graph.subgraph(x) for x in nx.connected_components(nx.Graph(graph))
)
# score all nodes in all subgraphs
scores = {}
for subgraph in subgraphs:
grouped_nodes = self._grouped_topological_sort(subgraph)
for level, group in enumerate(grouped_nodes):
for node in group:
scores[node] = level
for node in self.graph.nodes():
score = -1 * len([
d for d in nx.descendants(self.graph, node)
if self._include_in_cost(d)
])
scores[node] = score
return scores
def get(
@@ -159,6 +133,8 @@ class GraphQueue:
def _find_new_additions(self) -> None:
"""Find any nodes in the graph that need to be added to the internal
queue and add them.
Callers must hold the lock.
"""
for node, in_degree in self.graph.in_degree():
if not self._already_known(node) and in_degree == 0:

View File

@@ -1,5 +1,5 @@
from typing import Set, List, Optional, Tuple
from typing import Set, List, Optional
from .graph import Graph, UniqueId
from .queue import GraphQueue
@@ -25,23 +25,11 @@ def get_package_names(nodes):
def alert_non_existence(raw_spec, nodes):
if len(nodes) == 0:
warn_or_error(
f"The selection criterion '{str(raw_spec)}' does not match"
f" any nodes"
f"The selector '{str(raw_spec)}' does not match any nodes and will"
f" be ignored"
)
def can_select_indirectly(node):
"""If a node is not selected itself, but its parent(s) are, it may qualify
for indirect selection.
Today, only Test nodes can be indirectly selected. In the future,
other node types or invocation flags might qualify.
"""
if node.resource_type == NodeType.Test:
return True
else:
return False
class NodeSelector(MethodManager):
"""The node selector is aware of the graph and manifest,
"""
@@ -73,8 +61,8 @@ class NodeSelector(MethodManager):
def get_nodes_from_criteria(
self,
spec: SelectionCriteria
) -> Tuple[Set[UniqueId], Set[UniqueId]]:
spec: SelectionCriteria,
) -> Set[UniqueId]:
"""Get all nodes specified by the single selection criteria.
- collect the directly included nodes
@@ -91,14 +79,11 @@ class NodeSelector(MethodManager):
f"The '{spec.method}' selector specified in {spec.raw} is "
f"invalid. Must be one of [{valid_selectors}]"
)
return set(), set()
return set()
neighbors = self.collect_specified_neighbors(spec, collected)
direct_nodes, indirect_nodes = self.expand_selection(
selected=(collected | neighbors),
greedy=spec.greedy
)
return direct_nodes, indirect_nodes
extras = self.collect_specified_neighbors(spec, collected)
result = self.expand_selection(collected | extras)
return result
def collect_specified_neighbors(
self, spec: SelectionCriteria, selected: Set[UniqueId]
@@ -121,46 +106,24 @@ class NodeSelector(MethodManager):
additional.update(self.graph.select_children(selected, depth))
return additional
def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], Set[UniqueId]]:
"""If the spec is a composite spec (a union, difference, or intersection),
def select_nodes(self, spec: SelectionSpec) -> Set[UniqueId]:
"""Select the nodes in the graph according to the spec.
If the spec is a composite spec (a union, difference, or intersection),
recurse into its selections and combine them. If the spec is a concrete
selection criteria, resolve that using the given graph.
"""
if isinstance(spec, SelectionCriteria):
direct_nodes, indirect_nodes = self.get_nodes_from_criteria(spec)
result = self.get_nodes_from_criteria(spec)
else:
bundles = [
self.select_nodes_recursively(component)
node_selections = [
self.select_nodes(component)
for component in spec
]
direct_sets = []
indirect_sets = []
for direct, indirect in bundles:
direct_sets.append(direct)
indirect_sets.append(direct | indirect)
initial_direct = spec.combined(direct_sets)
indirect_nodes = spec.combined(indirect_sets)
direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes)
result = spec.combined(node_selections)
if spec.expect_exists:
alert_non_existence(spec.raw, direct_nodes)
return direct_nodes, indirect_nodes
def select_nodes(self, spec: SelectionSpec) -> Set[UniqueId]:
"""Select the nodes in the graph according to the spec.
This is the main point of entry for turning a spec into a set of nodes:
- Recurse through spec, select by criteria, combine by set operation
- Return final (unfiltered) selection set
"""
direct_nodes, indirect_nodes = self.select_nodes_recursively(spec)
return direct_nodes
alert_non_existence(spec.raw, result)
return result
def _is_graph_member(self, unique_id: UniqueId) -> bool:
if unique_id in self.manifest.sources:
@@ -199,55 +162,12 @@ class NodeSelector(MethodManager):
unique_id for unique_id in selected if self._is_match(unique_id)
}
def expand_selection(
self, selected: Set[UniqueId], greedy: bool = False
) -> Tuple[Set[UniqueId], Set[UniqueId]]:
# Test selection can expand to include an implicitly/indirectly selected test.
# In this way, `dbt test -m model_a` also includes tests that directly depend on `model_a`.
# Expansion has two modes, GREEDY and NOT GREEDY.
#
# GREEDY mode: If ANY parent is selected, select the test. We use this for EXCLUSION.
#
# NOT GREEDY mode:
# - If ALL parents are selected, select the test.
# - If ANY parent is missing, return it separately. We'll keep it around
# for later and see if its other parents show up.
# We use this for INCLUSION.
direct_nodes = set(selected)
indirect_nodes = set()
for unique_id in self.graph.select_successors(selected):
if unique_id in self.manifest.nodes:
node = self.manifest.nodes[unique_id]
if can_select_indirectly(node):
# should we add it in directly?
if greedy or set(node.depends_on.nodes) <= set(selected):
direct_nodes.add(unique_id)
# if not:
else:
indirect_nodes.add(unique_id)
return direct_nodes, indirect_nodes
def incorporate_indirect_nodes(
self, direct_nodes: Set[UniqueId], indirect_nodes: Set[UniqueId] = set()
) -> Set[UniqueId]:
# Check tests previously selected indirectly to see if ALL their
# parents are now present.
selected = set(direct_nodes)
for unique_id in indirect_nodes:
if unique_id in self.manifest.nodes:
node = self.manifest.nodes[unique_id]
if set(node.depends_on.nodes) <= set(selected):
selected.add(unique_id)
def expand_selection(self, selected: Set[UniqueId]) -> Set[UniqueId]:
"""Perform selector-specific expansion."""
return selected
def get_selected(self, spec: SelectionSpec) -> Set[UniqueId]:
"""get_selected runs through the node selection process:
"""get_selected runs trhough the node selection process:
- node selection. Based on the include/exclude sets, the set
of matched unique IDs is returned

View File

@@ -3,7 +3,7 @@ from itertools import chain
from pathlib import Path
from typing import Set, List, Dict, Iterator, Tuple, Any, Union, Type, Optional
from dbt.dataclass_schema import StrEnum
from hologram.helpers import StrEnum
from .graph import UniqueId
@@ -22,11 +22,13 @@ from dbt.contracts.graph.parsed import (
ParsedSourceDefinition,
)
from dbt.contracts.state import PreviousState
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.exceptions import (
InternalException,
RuntimeException,
)
from dbt.node_types import NodeType
from dbt.ui import warning_tag
SELECTOR_GLOB = '*'
@@ -47,23 +49,25 @@ class MethodName(StrEnum):
Exposure = 'exposure'
def is_selected_node(fqn: List[str], node_selector: str):
def is_selected_node(real_node, node_selector):
for i, selector_part in enumerate(node_selector):
# If qualified_name exactly matches model name (fqn's leaf), return True
if fqn[-1] == node_selector:
return True
# Flatten node parts. Dots in model names act as namespace separators
flat_fqn = [item for segment in fqn for item in segment.split('.')]
# Selector components cannot be more than fqn's
if len(flat_fqn) < len(node_selector.split('.')):
return False
is_last = (i == len(node_selector) - 1)
for i, selector_part in enumerate(node_selector.split('.')):
# if we hit a GLOB, then this node is selected
if selector_part == SELECTOR_GLOB:
return True
elif flat_fqn[i] == selector_part:
# match package.node_name or package.dir.node_name
elif is_last and selector_part == real_node[-1]:
return True
elif len(real_node) <= i:
return False
elif real_node[i] == selector_part:
continue
else:
return False
@@ -150,20 +154,31 @@ class SelectorMethod(metaclass=abc.ABCMeta):
class QualifiedNameSelectorMethod(SelectorMethod):
def node_is_match(self, qualified_name: str, fqn: List[str]) -> bool:
"""Determine if a qualified name matches an fqn for all package
def node_is_match(
self,
qualified_name: List[str],
package_names: Set[str],
fqn: List[str],
) -> bool:
"""Determine if a qualfied name matches an fqn, given the set of package
names in the graph.
:param str qualified_name: The qualified name to match the nodes with
:param List[str] qualified_name: The components of the selector or node
name, split on '.'.
:param Set[str] package_names: The set of pacakge names in the graph.
:param List[str] fqn: The node's fully qualified name in the graph.
"""
unscoped_fqn = fqn[1:]
if len(qualified_name) == 1 and fqn[-1] == qualified_name[0]:
return True
if is_selected_node(fqn, qualified_name):
return True
# Match nodes across different packages
elif is_selected_node(unscoped_fqn, qualified_name):
return True
if qualified_name[0] in package_names:
if is_selected_node(fqn, qualified_name):
return True
for package_name in package_names:
local_qualified_node_name = [package_name] + qualified_name
if is_selected_node(fqn, local_qualified_node_name):
return True
return False
@@ -174,9 +189,15 @@ class QualifiedNameSelectorMethod(SelectorMethod):
:param str selector: The selector or node name
"""
qualified_name = selector.split(".")
parsed_nodes = list(self.parsed_nodes(included_nodes))
package_names = {n.package_name for _, n in parsed_nodes}
for node, real_node in parsed_nodes:
if self.node_is_match(selector, real_node.fqn):
if self.node_is_match(
qualified_name,
package_names,
real_node.fqn,
):
yield node
@@ -379,7 +400,7 @@ class TestTypeSelectorMethod(SelectorMethod):
class StateSelectorMethod(SelectorMethod):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.modified_macros: Optional[List[str]] = None
self.macros_were_modified: Optional[List[str]] = None
def _macros_modified(self) -> List[str]:
# we checked in the caller!
@@ -392,74 +413,44 @@ class StateSelectorMethod(SelectorMethod):
modified = []
for uid, macro in new_macros.items():
name = f'{macro.package_name}.{macro.name}'
if uid in old_macros:
old_macro = old_macros[uid]
if macro.macro_sql != old_macro.macro_sql:
modified.append(uid)
modified.append(f'{name} changed')
else:
modified.append(uid)
modified.append(f'{name} added')
for uid, macro in old_macros.items():
if uid not in new_macros:
modified.append(uid)
modified.append(f'{macro.package_name}.{macro.name} removed')
return modified
return modified[:3]
def recursively_check_macros_modified(self, node):
# check if there are any changes in macros the first time
if self.modified_macros is None:
self.modified_macros = self._macros_modified()
# loop through all macros that this node depends on
for macro_uid in node.depends_on.macros:
# is this macro one of the modified macros?
if macro_uid in self.modified_macros:
return True
# if not, and this macro depends on other macros, keep looping
macro = self.manifest.macros[macro_uid]
if len(macro.depends_on.macros) > 0:
return self.recursively_check_macros_modified(macro)
else:
return False
return False
def check_modified(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
different_contents = not new.same_contents(old) # type: ignore
upstream_macro_change = self.recursively_check_macros_modified(new)
return different_contents or upstream_macro_change
def check_modified_body(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
if hasattr(new, "same_body"):
return not new.same_body(old) # type: ignore
else:
return False
def check_modified_configs(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
if hasattr(new, "same_config"):
return not new.same_config(old) # type: ignore
else:
return False
def check_modified_persisted_descriptions(
self, old: Optional[SelectorTarget], new: SelectorTarget
def check_modified(
self,
old: Optional[SelectorTarget],
new: SelectorTarget,
) -> bool:
if hasattr(new, "same_persisted_description"):
return not new.same_persisted_description(old) # type: ignore
else:
return False
# check if there are any changes in macros, if so, log a warning the
# first time
if self.macros_were_modified is None:
self.macros_were_modified = self._macros_modified()
if self.macros_were_modified:
log_str = ', '.join(self.macros_were_modified)
logger.warning(warning_tag(
f'During a state comparison, dbt detected a change in '
f'macros. This will not be marked as a modification. Some '
f'macros: {log_str}'
))
def check_modified_relation(
self, old: Optional[SelectorTarget], new: SelectorTarget
return not new.same_contents(old) # type: ignore
def check_new(
self,
old: Optional[SelectorTarget],
new: SelectorTarget,
) -> bool:
if hasattr(new, "same_database_representation"):
return not new.same_database_representation(old) # type: ignore
else:
return False
def check_modified_macros(self, _, new: SelectorTarget) -> bool:
return self.recursively_check_macros_modified(new)
def check_new(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
return old is None
def search(
@@ -471,15 +462,8 @@ class StateSelectorMethod(SelectorMethod):
)
state_checks = {
# it's new if there is no old version
'new': lambda old, _: old is None,
# use methods defined above to compare properties of old + new
'modified': self.check_modified,
'modified.body': self.check_modified_body,
'modified.configs': self.check_modified_configs,
'modified.persisted_descriptions': self.check_modified_persisted_descriptions,
'modified.relation': self.check_modified_relation,
'modified.macros': self.check_modified_macros,
'new': self.check_new,
}
if selector in state_checks:
checker = state_checks[selector]

View File

@@ -66,7 +66,6 @@ class SelectionCriteria:
parents_depth: Optional[int]
children: bool
children_depth: Optional[int]
greedy: bool = False
def __post_init__(self):
if self.children and self.childrens_parents:
@@ -103,9 +102,7 @@ class SelectionCriteria:
return method_name, method_arguments
@classmethod
def selection_criteria_from_dict(
cls, raw: Any, dct: Dict[str, Any], greedy: bool = False
) -> 'SelectionCriteria':
def from_dict(cls, raw: Any, dct: Dict[str, Any]) -> 'SelectionCriteria':
if 'value' not in dct:
raise RuntimeException(
f'Invalid node spec "{raw}" - no search value!'
@@ -124,11 +121,10 @@ class SelectionCriteria:
parents_depth=parents_depth,
children=bool(dct.get('children')),
children_depth=children_depth,
greedy=greedy
)
@classmethod
def dict_from_single_spec(cls, raw: str, greedy: bool = False):
def dict_from_single_spec(cls, raw: str):
result = RAW_SELECTOR_PATTERN.match(raw)
if result is None:
return {'error': 'Invalid selector spec'}
@@ -148,13 +144,13 @@ class SelectionCriteria:
return dct
@classmethod
def from_single_spec(cls, raw: str, greedy: bool = False) -> 'SelectionCriteria':
def from_single_spec(cls, raw: str) -> 'SelectionCriteria':
result = RAW_SELECTOR_PATTERN.match(raw)
if result is None:
# bad spec!
raise RuntimeException(f'Invalid selector spec "{raw}"')
return cls.selection_criteria_from_dict(raw, result.groupdict(), greedy=greedy)
return cls.from_dict(raw, result.groupdict())
class BaseSelectionGroup(Iterable[SelectionSpec], metaclass=ABCMeta):

View File

@@ -2,27 +2,14 @@
from dataclasses import dataclass
from datetime import timedelta
from pathlib import Path
from typing import Tuple, AbstractSet, Union
from typing import NewType, Tuple, AbstractSet
from dbt.dataclass_schema import (
dbtClassMixin, ValidationError, StrEnum,
from hologram import (
FieldEncoder, JsonSchemaMixin, JsonDict, ValidationError
)
from hologram import FieldEncoder, JsonDict
from mashumaro.types import SerializableType
from hologram.helpers import StrEnum
class Port(int, SerializableType):
@classmethod
def _deserialize(cls, value: Union[int, str]) -> 'Port':
try:
value = int(value)
except ValueError:
raise ValidationError(f'Cannot encode {value} into port number')
return Port(value)
def _serialize(self) -> int:
return self
Port = NewType('Port', int)
class PortEncoder(FieldEncoder):
@@ -79,12 +66,12 @@ class NVEnum(StrEnum):
@dataclass
class NoValue(dbtClassMixin):
class NoValue(JsonSchemaMixin):
"""Sometimes, you want a way to say none that isn't None"""
novalue: NVEnum = NVEnum.novalue
dbtClassMixin.register_field_encoders({
JsonSchemaMixin.register_field_encoders({
Port: PortEncoder(),
timedelta: TimeDeltaFieldEncoder(),
Path: PathEncoder(),

View File

@@ -1,4 +1,4 @@
from dbt.dataclass_schema import StrEnum
from hologram.helpers import StrEnum
import json
from typing import Union, Dict, Any

View File

@@ -1,5 +1,5 @@
{% macro get_columns_in_query(select_sql) -%}
{{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}
{{ return(adapter.dispatch('get_columns_in_query')(select_sql)) }}
{% endmacro %}
{% macro default__get_columns_in_query(select_sql) %}
@@ -15,7 +15,7 @@
{% endmacro %}
{% macro create_schema(relation) -%}
{{ adapter.dispatch('create_schema', 'dbt')(relation) }}
{{ adapter.dispatch('create_schema')(relation) }}
{% endmacro %}
{% macro default__create_schema(relation) -%}
@@ -25,7 +25,7 @@
{% endmacro %}
{% macro drop_schema(relation) -%}
{{ adapter.dispatch('drop_schema', 'dbt')(relation) }}
{{ adapter.dispatch('drop_schema')(relation) }}
{% endmacro %}
{% macro default__drop_schema(relation) -%}
@@ -35,7 +35,7 @@
{% endmacro %}
{% macro create_table_as(temporary, relation, sql) -%}
{{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, sql) }}
{{ adapter.dispatch('create_table_as')(temporary, relation, sql) }}
{%- endmacro %}
{% macro default__create_table_as(temporary, relation, sql) -%}
@@ -51,31 +51,8 @@
{% endmacro %}
{% macro get_create_index_sql(relation, index_dict) -%}
{{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}
{% endmacro %}
{% macro default__get_create_index_sql(relation, index_dict) -%}
{% do return(None) %}
{% endmacro %}
{% macro create_indexes(relation) -%}
{{ adapter.dispatch('create_indexes', 'dbt')(relation) }}
{%- endmacro %}
{% macro default__create_indexes(relation) -%}
{%- set _indexes = config.get('indexes', default=[]) -%}
{% for _index_dict in _indexes %}
{% set create_index_sql = get_create_index_sql(relation, _index_dict) %}
{% if create_index_sql %}
{% do run_query(create_index_sql) %}
{% endif %}
{% endfor %}
{% endmacro %}
{% macro create_view_as(relation, sql) -%}
{{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}
{{ adapter.dispatch('create_view_as')(relation, sql) }}
{%- endmacro %}
{% macro default__create_view_as(relation, sql) -%}
@@ -89,7 +66,7 @@
{% macro get_catalog(information_schema, schemas) -%}
{{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}
{{ return(adapter.dispatch('get_catalog')(information_schema, schemas)) }}
{%- endmacro %}
{% macro default__get_catalog(information_schema, schemas) -%}
@@ -104,7 +81,7 @@
{% macro get_columns_in_relation(relation) -%}
{{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}
{{ return(adapter.dispatch('get_columns_in_relation')(relation)) }}
{% endmacro %}
{% macro sql_convert_columns_in_relation(table) -%}
@@ -121,13 +98,13 @@
{% endmacro %}
{% macro alter_column_type(relation, column_name, new_column_type) -%}
{{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}
{{ return(adapter.dispatch('alter_column_type')(relation, column_name, new_column_type)) }}
{% endmacro %}
{% macro alter_column_comment(relation, column_dict) -%}
{{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}
{{ return(adapter.dispatch('alter_column_comment')(relation, column_dict)) }}
{% endmacro %}
{% macro default__alter_column_comment(relation, column_dict) -%}
@@ -136,7 +113,7 @@
{% endmacro %}
{% macro alter_relation_comment(relation, relation_comment) -%}
{{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}
{{ return(adapter.dispatch('alter_relation_comment')(relation, relation_comment)) }}
{% endmacro %}
{% macro default__alter_relation_comment(relation, relation_comment) -%}
@@ -145,7 +122,7 @@
{% endmacro %}
{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}
{{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}
{{ return(adapter.dispatch('persist_docs')(relation, model, for_relation, for_columns)) }}
{% endmacro %}
{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}
@@ -180,7 +157,7 @@
{% macro drop_relation(relation) -%}
{{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}
{{ return(adapter.dispatch('drop_relation')(relation)) }}
{% endmacro %}
@@ -191,7 +168,7 @@
{% endmacro %}
{% macro truncate_relation(relation) -%}
{{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}
{{ return(adapter.dispatch('truncate_relation')(relation)) }}
{% endmacro %}
@@ -202,7 +179,7 @@
{% endmacro %}
{% macro rename_relation(from_relation, to_relation) -%}
{{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}
{{ return(adapter.dispatch('rename_relation')(from_relation, to_relation)) }}
{% endmacro %}
{% macro default__rename_relation(from_relation, to_relation) -%}
@@ -214,7 +191,7 @@
{% macro information_schema_name(database) %}
{{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}
{{ return(adapter.dispatch('information_schema_name')(database)) }}
{% endmacro %}
{% macro default__information_schema_name(database) -%}
@@ -227,7 +204,7 @@
{% macro list_schemas(database) -%}
{{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}
{{ return(adapter.dispatch('list_schemas')(database)) }}
{% endmacro %}
{% macro default__list_schemas(database) -%}
@@ -241,7 +218,7 @@
{% macro check_schema_exists(information_schema, schema) -%}
{{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}
{{ return(adapter.dispatch('check_schema_exists')(information_schema, schema)) }}
{% endmacro %}
{% macro default__check_schema_exists(information_schema, schema) -%}
@@ -256,7 +233,7 @@
{% macro list_relations_without_caching(schema_relation) %}
{{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}
{{ return(adapter.dispatch('list_relations_without_caching')(schema_relation)) }}
{% endmacro %}
@@ -267,7 +244,7 @@
{% macro current_timestamp() -%}
{{ adapter.dispatch('current_timestamp', 'dbt')() }}
{{ adapter.dispatch('current_timestamp')() }}
{%- endmacro %}
@@ -278,7 +255,7 @@
{% macro collect_freshness(source, loaded_at_field, filter) %}
{{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}
{{ return(adapter.dispatch('collect_freshness')(source, loaded_at_field, filter))}}
{% endmacro %}
@@ -296,7 +273,7 @@
{% endmacro %}
{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}
{{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix))}}
{{ return(adapter.dispatch('make_temp_relation')(base_relation, suffix))}}
{% endmacro %}
{% macro default__make_temp_relation(base_relation, suffix) %}
@@ -311,34 +288,3 @@
{{ config.set('sql_header', caller()) }}
{%- endmacro %}
{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}
{{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}
{% endmacro %}
{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}
{% if add_columns is none %}
{% set add_columns = [] %}
{% endif %}
{% if remove_columns is none %}
{% set remove_columns = [] %}
{% endif %}
{% set sql -%}
alter {{ relation.type }} {{ relation }}
{% for column in add_columns %}
add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}
{% endfor %}{{ ',' if remove_columns | length > 0 }}
{% for column in remove_columns %}
drop column {{ column.name }}{{ ',' if not loop.last }}
{% endfor %}
{%- endset -%}
{% do run_query(sql) %}
{% endmacro %}

View File

@@ -13,10 +13,6 @@
#}
{% macro generate_alias_name(custom_alias_name=none, node=none) -%}
{% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}
{%- endmacro %}
{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}
{%- if custom_alias_name is none -%}

View File

@@ -14,7 +14,7 @@
#}
{% macro generate_database_name(custom_database_name=none, node=none) -%}
{% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}
{% do return(adapter.dispatch('generate_database_name')(custom_database_name, node)) %}
{%- endmacro %}
{% macro default__generate_database_name(custom_database_name=none, node=none) -%}

View File

@@ -15,10 +15,6 @@
#}
{% macro generate_schema_name(custom_schema_name, node) -%}
{{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}
{% endmacro %}
{% macro default__generate_schema_name(custom_schema_name, node) -%}
{%- set default_schema = target.schema -%}
{%- if custom_schema_name is none -%}

View File

@@ -1,15 +0,0 @@
{% macro get_where_subquery(relation) -%}
{% do return(adapter.dispatch('get_where_subquery')(relation)) %}
{%- endmacro %}
{% macro default__get_where_subquery(relation) -%}
{% set where = config.get('where', '') %}
{% if where %}
{%- set filtered -%}
(select * from {{ relation }} where {{ where }}) dbt_subquery
{%- endset -%}
{% do return(filtered) %}
{%- else -%}
{% do return(relation) %}
{%- endif -%}
{%- endmacro %}

View File

@@ -1,24 +1,23 @@
{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}
{{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}
{{ adapter.dispatch('get_merge_sql')(target, source, unique_key, dest_columns, predicates) }}
{%- endmacro %}
{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}
{{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}
{{ adapter.dispatch('get_delete_insert_merge_sql')(target, source, unique_key, dest_columns) }}
{%- endmacro %}
{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}
{{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}
{{ adapter.dispatch('get_insert_overwrite_merge_sql')(target, source, dest_columns, predicates, include_sql_header) }}
{%- endmacro %}
{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}
{%- set predicates = [] if predicates is none else [] + predicates -%}
{%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%}
{%- set update_columns = config.get('merge_update_columns', default = dest_columns | map(attribute="quoted") | list) -%}
{%- set sql_header = config.get('sql_header', none) -%}
{% if unique_key %}
@@ -38,8 +37,8 @@
{% if unique_key %}
when matched then update set
{% for column_name in update_columns -%}
{{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}
{% for column in dest_columns -%}
{{ adapter.quote(column.name) }} = DBT_INTERNAL_SOURCE.{{ adapter.quote(column.name) }}
{%- if not loop.last %}, {%- endif %}
{%- endfor %}
{% endif %}
@@ -79,7 +78,7 @@
(
select {{ dest_cols_csv }}
from {{ source }}
)
);
{%- endmacro %}

View File

@@ -72,12 +72,3 @@
{% endif %}
{% do return(config_full_refresh) %}
{% endmacro %}
{% macro should_store_failures() %}
{% set config_store_failures = config.get('store_failures') %}
{% if config_store_failures is none %}
{% set config_store_failures = flags.STORE_FAILURES %}
{% endif %}
{% do return(config_store_failures) %}
{% endmacro %}

View File

@@ -1,6 +1,5 @@
{% macro incremental_upsert(tmp_relation, target_relation, unique_key=none, statement_name="main") %}
{%- set dest_columns = adapter.get_columns_in_relation(target_relation) -%}
{%- set dest_cols_csv = dest_columns | map(attribute='quoted') | join(', ') -%}

View File

@@ -5,26 +5,7 @@
{% set target_relation = this.incorporate(type='table') %}
{% set existing_relation = load_relation(this) %}
{% set tmp_relation = make_temp_relation(target_relation) %}
{%- set full_refresh_mode = (should_full_refresh()) -%}
{% set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') %}
{% set tmp_identifier = model['name'] + '__dbt_tmp' %}
{% set backup_identifier = model['name'] + "__dbt_backup" %}
-- the intermediate_ and backup_ relations should not already exist in the database; get_relation
-- will return None in that case. Otherwise, we get a relation that we can drop
-- later, before we try to use this name for the current operation. This has to happen before
-- BEGIN, in a separate transaction
{% set preexisting_intermediate_relation = adapter.get_relation(identifier=tmp_identifier,
schema=schema,
database=database) %}
{% set preexisting_backup_relation = adapter.get_relation(identifier=backup_identifier,
schema=schema,
database=database) %}
{{ drop_relation_if_exists(preexisting_intermediate_relation) }}
{{ drop_relation_if_exists(preexisting_backup_relation) }}
{% set tmp_relation = make_temp_relation(this) %}
{{ run_hooks(pre_hooks, inside_transaction=False) }}
@@ -32,47 +13,32 @@
{{ run_hooks(pre_hooks, inside_transaction=True) }}
{% set to_drop = [] %}
{# -- first check whether we want to full refresh for source view or config reasons #}
{% set trigger_full_refresh = (full_refresh_mode or existing_relation.is_view) %}
{% if existing_relation is none %}
{% set build_sql = create_table_as(False, target_relation, sql) %}
{% elif trigger_full_refresh %}
{% elif existing_relation.is_view or should_full_refresh() %}
{#-- Make sure the backup doesn't exist so we don't encounter issues with the rename below #}
{% set tmp_identifier = model['name'] + '__dbt_tmp' %}
{% set backup_identifier = model['name'] + '__dbt_backup' %}
{% set intermediate_relation = existing_relation.incorporate(path={"identifier": tmp_identifier}) %}
{% set backup_identifier = existing_relation.identifier ~ "__dbt_backup" %}
{% set backup_relation = existing_relation.incorporate(path={"identifier": backup_identifier}) %}
{% do adapter.drop_relation(backup_relation) %}
{% set build_sql = create_table_as(False, intermediate_relation, sql) %}
{% set need_swap = true %}
{% do adapter.rename_relation(target_relation, backup_relation) %}
{% set build_sql = create_table_as(False, target_relation, sql) %}
{% do to_drop.append(backup_relation) %}
{% else %}
{% do run_query(create_table_as(True, tmp_relation, sql)) %}
{% do adapter.expand_target_column_types(
{% set tmp_relation = make_temp_relation(target_relation) %}
{% do run_query(create_table_as(True, tmp_relation, sql)) %}
{% do adapter.expand_target_column_types(
from_relation=tmp_relation,
to_relation=target_relation) %}
{% do process_schema_changes(on_schema_change, tmp_relation, existing_relation) %}
{% set build_sql = incremental_upsert(tmp_relation, target_relation, unique_key=unique_key) %}
{% set build_sql = incremental_upsert(tmp_relation, target_relation, unique_key=unique_key) %}
{% endif %}
{% call statement("main") %}
{{ build_sql }}
{% endcall %}
{% if need_swap %}
{% do adapter.rename_relation(target_relation, backup_relation) %}
{% do adapter.rename_relation(intermediate_relation, target_relation) %}
{% endif %}
{% do persist_docs(target_relation, model) %}
{% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}
{% do create_indexes(target_relation) %}
{% endif %}
{{ run_hooks(post_hooks, inside_transaction=True) }}
-- `COMMIT` happens here

View File

@@ -1,164 +0,0 @@
{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}
{% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}
{% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}
{% do log(log_message) %}
{{ return(default) }}
{% else %}
{{ return(on_schema_change) }}
{% endif %}
{% endmacro %}
{% macro diff_columns(source_columns, target_columns) %}
{% set result = [] %}
{% set source_names = source_columns | map(attribute = 'column') | list %}
{% set target_names = target_columns | map(attribute = 'column') | list %}
{# --check whether the name attribute exists in the target - this does not perform a data type check #}
{% for sc in source_columns %}
{% if sc.name not in target_names %}
{{ result.append(sc) }}
{% endif %}
{% endfor %}
{{ return(result) }}
{% endmacro %}
{% macro diff_column_data_types(source_columns, target_columns) %}
{% set result = [] %}
{% for sc in source_columns %}
{% set tc = target_columns | selectattr("name", "equalto", sc.name) | list | first %}
{% if tc %}
{% if sc.data_type != tc.data_type %}
{{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}
{% endif %}
{% endif %}
{% endfor %}
{{ return(result) }}
{% endmacro %}
{% macro check_for_schema_changes(source_relation, target_relation) %}
{% set schema_changed = False %}
{%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}
{%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}
{%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}
{%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}
{% set new_target_types = diff_column_data_types(source_columns, target_columns) %}
{% if source_not_in_target != [] %}
{% set schema_changed = True %}
{% elif target_not_in_source != [] or new_target_types != [] %}
{% set schema_changed = True %}
{% elif new_target_types != [] %}
{% set schema_changed = True %}
{% endif %}
{% set changes_dict = {
'schema_changed': schema_changed,
'source_not_in_target': source_not_in_target,
'target_not_in_source': target_not_in_source,
'new_target_types': new_target_types
} %}
{% set msg %}
In {{ target_relation }}:
Schema changed: {{ schema_changed }}
Source columns not in target: {{ source_not_in_target }}
Target columns not in source: {{ target_not_in_source }}
New column types: {{ new_target_types }}
{% endset %}
{% do log(msg) %}
{{ return(changes_dict) }}
{% endmacro %}
{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}
{%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}
{%- if on_schema_change == 'append_new_columns'-%}
{%- if add_to_target_arr | length > 0 -%}
{%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}
{%- endif -%}
{% elif on_schema_change == 'sync_all_columns' %}
{%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}
{%- set new_target_types = schema_changes_dict['new_target_types'] -%}
{% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}
{%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}
{% endif %}
{% if new_target_types != [] %}
{% for ntt in new_target_types %}
{% set column_name = ntt['column_name'] %}
{% set new_type = ntt['new_type'] %}
{% do alter_column_type(target_relation, column_name, new_type) %}
{% endfor %}
{% endif %}
{% endif %}
{% set schema_change_message %}
In {{ target_relation }}:
Schema change approach: {{ on_schema_change }}
Columns added: {{ add_to_target_arr }}
Columns removed: {{ remove_from_target_arr }}
Data types changed: {{ new_target_types }}
{% endset %}
{% do log(schema_change_message) %}
{% endmacro %}
{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}
{% if on_schema_change != 'ignore' %}
{% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}
{% if schema_changes_dict['schema_changed'] %}
{% if on_schema_change == 'fail' %}
{% set fail_msg %}
The source and target schemas on this incremental model are out of sync!
They can be reconciled in several ways:
- set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.
- Re-run the incremental model with `full_refresh: True` to update the target schema.
- update the schema manually and re-run the process.
{% endset %}
{% do exceptions.raise_compiler_error(fail_msg) %}
{# -- unless we ignore, run the sync operation per the config #}
{% else %}
{% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}
{% endif %}
{% endif %}
{% endif %}
{% endmacro %}

View File

@@ -1,6 +1,14 @@
{% macro create_csv_table(model, agate_table) -%}
{{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}
{{ adapter.dispatch('create_csv_table')(model, agate_table) }}
{%- endmacro %}
{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}
{{ adapter.dispatch('reset_csv_table')(model, full_refresh, old_relation, agate_table) }}
{%- endmacro %}
{% macro load_csv_rows(model, agate_table) -%}
{{ adapter.dispatch('load_csv_rows')(model, agate_table) }}
{%- endmacro %}
{% macro default__create_csv_table(model, agate_table) %}
@@ -25,9 +33,6 @@
{{ return(sql) }}
{% endmacro %}
{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}
{{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}
{%- endmacro %}
{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}
{% set sql = "" %}
@@ -42,21 +47,6 @@
{{ return(sql) }}
{% endmacro %}
{% macro get_binding_char() -%}
{{ adapter.dispatch('get_binding_char', 'dbt')() }}
{%- endmacro %}
{% macro default__get_binding_char() %}
{{ return('%s') }}
{% endmacro %}
{% macro get_batch_size() -%}
{{ adapter.dispatch('get_batch_size', 'dbt')() }}
{%- endmacro %}
{% macro default__get_batch_size() %}
{{ return(10000) }}
{% endmacro %}
{% macro get_seed_column_quoted_csv(model, column_names) %}
{%- set quote_seed_column = model['config'].get('quote_columns', None) -%}
@@ -69,48 +59,48 @@
{{ return(dest_cols_csv) }}
{% endmacro %}
{% macro load_csv_rows(model, agate_table) -%}
{{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}
{%- endmacro %}
{% macro basic_load_csv_rows(model, batch_size, agate_table) %}
{% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}
{% set bindings = [] %}
{% set statements = [] %}
{% for chunk in agate_table.rows | batch(batch_size) %}
{% set bindings = [] %}
{% for row in chunk %}
{% do bindings.extend(row) %}
{% endfor %}
{% set sql %}
insert into {{ this.render() }} ({{ cols_sql }}) values
{% for row in chunk -%}
({%- for column in agate_table.column_names -%}
%s
{%- if not loop.last%},{%- endif %}
{%- endfor -%})
{%- if not loop.last%},{%- endif %}
{%- endfor %}
{% endset %}
{% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}
{% if loop.index0 == 0 %}
{% do statements.append(sql) %}
{% endif %}
{% endfor %}
{# Return SQL so we can render it out into the compiled files #}
{{ return(statements[0]) }}
{% endmacro %}
{% macro default__load_csv_rows(model, agate_table) %}
{% set batch_size = get_batch_size() %}
{% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}
{% set bindings = [] %}
{% set statements = [] %}
{% for chunk in agate_table.rows | batch(batch_size) %}
{% set bindings = [] %}
{% for row in chunk %}
{% do bindings.extend(row) %}
{% endfor %}
{% set sql %}
insert into {{ this.render() }} ({{ cols_sql }}) values
{% for row in chunk -%}
({%- for column in agate_table.column_names -%}
{{ get_binding_char() }}
{%- if not loop.last%},{%- endif %}
{%- endfor -%})
{%- if not loop.last%},{%- endif %}
{%- endfor %}
{% endset %}
{% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}
{% if loop.index0 == 0 %}
{% do statements.append(sql) %}
{% endif %}
{% endfor %}
{# Return SQL so we can render it out into the compiled files #}
{{ return(statements[0]) }}
{{ return(basic_load_csv_rows(model, 10000, agate_table) )}}
{% endmacro %}
{% materialization seed, default %}
{%- set identifier = model['alias'] -%}
@@ -152,10 +142,6 @@
{% set target_relation = this.incorporate(type='table') %}
{% do persist_docs(target_relation, model) %}
{% if full_refresh_mode or not exists_as_table %}
{% do create_indexes(target_relation) %}
{% endif %}
{{ run_hooks(post_hooks, inside_transaction=True) }}
-- `COMMIT` happens here

View File

@@ -2,7 +2,7 @@
Add new columns to the table if applicable
#}
{% macro create_columns(relation, columns) %}
{{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}
{{ adapter.dispatch('create_columns')(relation, columns) }}
{% endmacro %}
{% macro default__create_columns(relation, columns) %}
@@ -15,7 +15,7 @@
{% macro post_snapshot(staging_relation) %}
{{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}
{{ adapter.dispatch('post_snapshot')(staging_relation) }}
{% endmacro %}
{% macro default__post_snapshot(staging_relation) %}
@@ -263,10 +263,6 @@
{% do persist_docs(target_relation, model) %}
{% if not target_relation_exists %}
{% do create_indexes(target_relation) %}
{% endif %}
{{ run_hooks(post_hooks, inside_transaction=True) }}
{{ adapter.commit() }}

View File

@@ -1,6 +1,6 @@
{% macro snapshot_merge_sql(target, source, insert_cols) -%}
{{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}
{{ adapter.dispatch('snapshot_merge_sql')(target, source, insert_cols) }}
{%- endmacro %}
@@ -21,6 +21,7 @@
and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'
then insert ({{ insert_cols_csv }})
values ({{ insert_cols_csv }})
;
{% endmacro %}

View File

@@ -36,7 +36,7 @@
Create SCD Hash SQL fields cross-db
#}
{% macro snapshot_hash_arguments(args) -%}
{{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}
{{ adapter.dispatch('snapshot_hash_arguments')(args) }}
{%- endmacro %}
@@ -52,7 +52,7 @@
Get the current time cross-db
#}
{% macro snapshot_get_time() -%}
{{ adapter.dispatch('snapshot_get_time', 'dbt')() }}
{{ adapter.dispatch('snapshot_get_time')() }}
{%- endmacro %}
{% macro default__snapshot_get_time() -%}
@@ -75,7 +75,7 @@
table instead of assuming that the user-supplied {{ updated_at }}
will be present in the historical data.
See https://github.com/dbt-labs/dbt/issues/2350
See https://github.com/fishtown-analytics/dbt/issues/2350
*/ #}
{% set row_changed_expr -%}
({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})
@@ -94,7 +94,7 @@
{% macro snapshot_string_as_time(timestamp) -%}
{{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}
{{ adapter.dispatch('snapshot_string_as_time')(timestamp) }}
{%- endmacro %}
@@ -144,7 +144,7 @@
{% if now is none or now is undefined -%}
{%- do exceptions.raise_compiler_error('Could not get a snapshot start time from the database') -%}
{%- endif %}
{% set updated_at = config.get('updated_at', snapshot_string_as_time(now)) %}
{% set updated_at = snapshot_string_as_time(now) %}
{% set column_added = false %}

View File

@@ -12,12 +12,7 @@
schema=schema,
database=database,
type='table') -%}
-- the intermediate_relation should not already exist in the database; get_relation
-- will return None in that case. Otherwise, we get a relation that we can drop
-- later, before we try to use this name for the current operation
{%- set preexisting_intermediate_relation = adapter.get_relation(identifier=tmp_identifier,
schema=schema,
database=database) -%}
/*
See ../view/view.sql for more information about this relation.
*/
@@ -26,15 +21,14 @@
schema=schema,
database=database,
type=backup_relation_type) -%}
-- as above, the backup_relation should not already exist
{%- set preexisting_backup_relation = adapter.get_relation(identifier=backup_identifier,
schema=schema,
database=database) -%}
{%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}
{%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}
-- drop the temp relations if they exist already in the database
{{ drop_relation_if_exists(preexisting_intermediate_relation) }}
{{ drop_relation_if_exists(preexisting_backup_relation) }}
-- drop the temp relations if they exists for some reason
{{ adapter.drop_relation(intermediate_relation) }}
{{ adapter.drop_relation(backup_relation) }}
{{ run_hooks(pre_hooks, inside_transaction=False) }}
@@ -48,13 +42,11 @@
-- cleanup
{% if old_relation is not none %}
{{ adapter.rename_relation(old_relation, backup_relation) }}
{{ adapter.rename_relation(target_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(intermediate_relation, target_relation) }}
{% do create_indexes(target_relation) %}
{{ run_hooks(post_hooks, inside_transaction=True) }}
{% do persist_docs(target_relation, model) %}

View File

@@ -1,64 +0,0 @@
{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}
{{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}
{%- endmacro %}
{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}
select
{{ fail_calc }} as failures,
{{ fail_calc }} {{ warn_if }} as should_warn,
{{ fail_calc }} {{ error_if }} as should_error
from (
{{ main_sql }}
{{ "limit " ~ limit if limit != none }}
) dbt_internal_test
{%- endmacro %}
{%- materialization test, default -%}
{% set relations = [] %}
{% if should_store_failures() %}
{% set identifier = model['alias'] %}
{% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}
{% set target_relation = api.Relation.create(
identifier=identifier, schema=schema, database=database, type='table') -%} %}
{% if old_relation %}
{% do adapter.drop_relation(old_relation) %}
{% endif %}
{% call statement(auto_begin=True) %}
{{ create_table_as(False, target_relation, sql) }}
{% endcall %}
{% do relations.append(target_relation) %}
{% set main_sql %}
select *
from {{ target_relation }}
{% endset %}
{{ adapter.commit() }}
{% else %}
{% set main_sql = sql %}
{% endif %}
{% set limit = config.get('limit') %}
{% set fail_calc = config.get('fail_calc') %}
{% set warn_if = config.get('warn_if') %}
{% set error_if = config.get('error_if') %}
{% call statement('main', fetch_result=True) -%}
{{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}
{%- endcall %}
{{ return({'relations': relations}) }}
{%- endmaterialization -%}

View File

@@ -1,10 +1,9 @@
{% macro handle_existing_table(full_refresh, old_relation) %}
{{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}
{{ adapter.dispatch("handle_existing_table", packages=['dbt'])(full_refresh, old_relation) }}
{% endmacro %}
{% macro default__handle_existing_table(full_refresh, old_relation) %}
{{ log("Dropping relation " ~ old_relation ~ " because it is of type " ~ old_relation.type) }}
{{ adapter.drop_relation(old_relation) }}
{% endmacro %}
@@ -20,7 +19,7 @@
*/
#}
{% macro create_or_replace_view() %}
{% macro create_or_replace_view(run_outside_transaction_hooks=True) %}
{%- set identifier = model['alias'] -%}
{%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}
@@ -31,7 +30,13 @@
identifier=identifier, schema=schema, database=database,
type='view') -%}
{{ run_hooks(pre_hooks) }}
{% if run_outside_transaction_hooks %}
-- no transactions on BigQuery
{{ run_hooks(pre_hooks, inside_transaction=False) }}
{% endif %}
-- `BEGIN` happens here on Snowflake
{{ run_hooks(pre_hooks, inside_transaction=True) }}
-- If there's a table with the same name and we weren't told to full refresh,
-- that's an error. If we were told to full refresh, drop it. This behavior differs
@@ -45,7 +50,14 @@
{{ create_view_as(target_relation, sql) }}
{%- endcall %}
{{ run_hooks(post_hooks) }}
{{ run_hooks(post_hooks, inside_transaction=True) }}
{{ adapter.commit() }}
{% if run_outside_transaction_hooks %}
-- No transactions on BigQuery
{{ run_hooks(post_hooks, inside_transaction=False) }}
{% endif %}
{{ return({'relations': [target_relation]}) }}

View File

@@ -9,12 +9,7 @@
type='view') -%}
{%- set intermediate_relation = api.Relation.create(identifier=tmp_identifier,
schema=schema, database=database, type='view') -%}
-- the intermediate_relation should not already exist in the database; get_relation
-- will return None in that case. Otherwise, we get a relation that we can drop
-- later, before we try to use this name for the current operation
{%- set preexisting_intermediate_relation = adapter.get_relation(identifier=tmp_identifier,
schema=schema,
database=database) -%}
/*
This relation (probably) doesn't exist yet. If it does exist, it's a leftover from
a previous run, and we're going to try to drop it immediately. At the end of this
@@ -32,16 +27,14 @@
{%- set backup_relation = api.Relation.create(identifier=backup_identifier,
schema=schema, database=database,
type=backup_relation_type) -%}
-- as above, the backup_relation should not already exist
{%- set preexisting_backup_relation = adapter.get_relation(identifier=backup_identifier,
schema=schema,
database=database) -%}
{%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}
{{ run_hooks(pre_hooks, inside_transaction=False) }}
-- drop the temp relations if they exist already in the database
{{ drop_relation_if_exists(preexisting_intermediate_relation) }}
{{ drop_relation_if_exists(preexisting_backup_relation) }}
-- drop the temp relations if they exists for some reason
{{ adapter.drop_relation(intermediate_relation) }}
{{ adapter.drop_relation(backup_relation) }}
-- `BEGIN` happens here:
{{ run_hooks(pre_hooks, inside_transaction=True) }}
@@ -54,7 +47,7 @@
-- cleanup
-- move the existing view out of the way
{% if old_relation is not none %}
{{ adapter.rename_relation(old_relation, backup_relation) }}
{{ adapter.rename_relation(target_relation, backup_relation) }}
{% endif %}
{{ adapter.rename_relation(intermediate_relation, target_relation) }}

View File

@@ -1,33 +1,43 @@
{% macro default__test_accepted_values(model, column_name, values, quote=True) %}
{% macro default__test_accepted_values(model, values) %}
{% set column_name = kwargs.get('column_name', kwargs.get('field')) %}
{% set quote_values = kwargs.get('quote', True) %}
with all_values as (
select
{{ column_name }} as value_field,
count(*) as n_records
select distinct
{{ column_name }} as value_field
from {{ model }}
group by 1
),
validation_errors as (
select
value_field
from all_values
where value_field not in (
{% for value in values -%}
{% if quote_values -%}
'{{ value }}'
{%- else -%}
{{ value }}
{%- endif -%}
{%- if not loop.last -%},{%- endif %}
{%- endfor %}
)
)
select *
from all_values
where value_field not in (
{% for value in values -%}
{% if quote -%}
'{{ value }}'
{%- else -%}
{{ value }}
{%- endif -%}
{%- if not loop.last -%},{%- endif %}
{%- endfor %}
)
select count(*) as validation_errors
from validation_errors
{% endmacro %}
{% test accepted_values(model, column_name, values, quote=True) %}
{% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}
{{ macro(model, column_name, values, quote) }}
{% endtest %}
{% macro test_accepted_values(model, values) %}
{% set macro = adapter.dispatch('test_accepted_values') %}
{{ macro(model, values, **kwargs) }}
{% endmacro %}

Some files were not shown because too many files have changed in this diff Show More