mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-18 23:41:28 +00:00
Compare commits
65 Commits
v0.21.0b2
...
regression
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42d71f5a97 | ||
|
|
58dc3b1829 | ||
|
|
bb9a400d77 | ||
|
|
01366be246 | ||
|
|
b034e2bc66 | ||
|
|
3bc9f49f7a | ||
|
|
09a61177b4 | ||
|
|
e8d3efef9f | ||
|
|
f9a46c15b9 | ||
|
|
7dc491b7ba | ||
|
|
59d131d3ac | ||
|
|
6563d09ba7 | ||
|
|
05dea18b62 | ||
|
|
d7177c7d89 | ||
|
|
35f0fea804 | ||
|
|
8953c7c533 | ||
|
|
76c59a5545 | ||
|
|
5c0a31b829 | ||
|
|
243bc3d41d | ||
|
|
67b594a950 | ||
|
|
2493c21649 | ||
|
|
d3826e670f | ||
|
|
4b5b1696b7 | ||
|
|
abb59ef14f | ||
|
|
3b7c2816b9 | ||
|
|
484517416f | ||
|
|
39447055d3 | ||
|
|
95cca277c9 | ||
|
|
96083dcaf5 | ||
|
|
75b4cf691b | ||
|
|
7c9171b00b | ||
|
|
3effade266 | ||
|
|
44e7390526 | ||
|
|
c141798abc | ||
|
|
df7ec3fb37 | ||
|
|
90e5507d03 | ||
|
|
332d3494b3 | ||
|
|
6393f5a5d7 | ||
|
|
ce97a9ca7a | ||
|
|
9af071bfe4 | ||
|
|
45a41202f3 | ||
|
|
9768999ca1 | ||
|
|
fc0d11c0a5 | ||
|
|
e6344205bb | ||
|
|
9d7a6556ef | ||
|
|
15f4add0b8 | ||
|
|
464becacd0 | ||
|
|
51a76d0d63 | ||
|
|
052e54d43a | ||
|
|
9e796671dd | ||
|
|
a9a6254f52 | ||
|
|
8b3a09c7ae | ||
|
|
6aa4d812d4 | ||
|
|
07fa719fb0 | ||
|
|
650b34ae24 | ||
|
|
0a935855f3 | ||
|
|
d500aae4dc | ||
|
|
370d3e746d | ||
|
|
ab06149c81 | ||
|
|
e72895c7c9 | ||
|
|
fe4a67daa4 | ||
|
|
09ea989d81 | ||
|
|
7fa14b6948 | ||
|
|
d4974cd35c | ||
|
|
459178811b |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.21.0b1
|
||||
current_version = 0.21.0b2
|
||||
parse = (?P<major>\d+)
|
||||
\.(?P<minor>\d+)
|
||||
\.(?P<patch>\d+)
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
version: 2.1
|
||||
jobs:
|
||||
build-wheels:
|
||||
docker: &test_only
|
||||
- image: fishtownanalytics/test-container:12
|
||||
environment:
|
||||
DBT_INVOCATION_ENV: circle
|
||||
DOCKER_TEST_DATABASE_HOST: "database"
|
||||
TOX_PARALLEL_NO_SPINNER: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Build wheels
|
||||
command: |
|
||||
python3.8 -m venv "${PYTHON_ENV}"
|
||||
export PYTHON_BIN="${PYTHON_ENV}/bin/python"
|
||||
$PYTHON_BIN -m pip install -U pip setuptools
|
||||
$PYTHON_BIN -m pip install -r requirements.txt
|
||||
$PYTHON_BIN -m pip install -r dev-requirements.txt
|
||||
/bin/bash ./scripts/build-wheels.sh
|
||||
$PYTHON_BIN ./scripts/collect-dbt-contexts.py > ./dist/context_metadata.json
|
||||
$PYTHON_BIN ./scripts/collect-artifact-schema.py > ./dist/artifact_schemas.json
|
||||
environment:
|
||||
PYTHON_ENV: /home/tox/build_venv/
|
||||
- store_artifacts:
|
||||
path: ./dist
|
||||
destination: dist
|
||||
integration-postgres:
|
||||
docker:
|
||||
- image: fishtownanalytics/test-container:12
|
||||
environment:
|
||||
DBT_INVOCATION_ENV: circle
|
||||
DOCKER_TEST_DATABASE_HOST: "database"
|
||||
TOX_PARALLEL_NO_SPINNER: 1
|
||||
- image: postgres
|
||||
name: database
|
||||
environment:
|
||||
POSTGRES_USER: "root"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
POSTGRES_DB: "dbt"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Setup postgres
|
||||
command: bash test/setup_db.sh
|
||||
environment:
|
||||
PGHOST: database
|
||||
PGUSER: root
|
||||
PGPASSWORD: password
|
||||
PGDATABASE: postgres
|
||||
- run:
|
||||
name: Postgres integration tests
|
||||
command: tox -p -e py36-postgres,py38-postgres -- -v -n4
|
||||
no_output_timeout: 30m
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-snowflake:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Snowflake integration tests
|
||||
command: tox -p -e py36-snowflake,py38-snowflake -- -v -n4
|
||||
no_output_timeout: 30m
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-redshift:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Redshift integration tests
|
||||
command: tox -p -e py36-redshift,py38-redshift -- -v -n4
|
||||
no_output_timeout: 30m
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
integration-bigquery:
|
||||
docker: *test_only
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Bigquery integration test
|
||||
command: tox -p -e py36-bigquery,py38-bigquery -- -v -n4
|
||||
no_output_timeout: 30m
|
||||
- store_artifacts:
|
||||
path: ./logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-everything:
|
||||
jobs:
|
||||
- integration-postgres
|
||||
- integration-redshift
|
||||
- integration-bigquery
|
||||
- integration-snowflake
|
||||
- build-wheels:
|
||||
requires:
|
||||
- integration-postgres
|
||||
- integration-redshift
|
||||
- integration-bigquery
|
||||
- integration-snowflake
|
||||
10
.github/actions/setup-postgres-linux/action.yml
vendored
Normal file
10
.github/actions/setup-postgres-linux/action.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
name: "Set up postgres (linux)"
|
||||
description: "Set up postgres service on linux vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
sudo systemctl start postgresql.service
|
||||
pg_isready
|
||||
sudo -u postgres bash ${{ github.action_path }}/setup_db.sh
|
||||
1
.github/actions/setup-postgres-linux/setup_db.sh
vendored
Symbolic link
1
.github/actions/setup-postgres-linux/setup_db.sh
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../test/setup_db.sh
|
||||
24
.github/actions/setup-postgres-macos/action.yml
vendored
Normal file
24
.github/actions/setup-postgres-macos/action.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: "Set up postgres (macos)"
|
||||
description: "Set up postgres service on macos vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
brew services start postgresql
|
||||
echo "Check PostgreSQL service is running"
|
||||
i=10
|
||||
COMMAND='pg_isready'
|
||||
while [ $i -gt -1 ]; do
|
||||
if [ $i == 0 ]; then
|
||||
echo "PostgreSQL service not ready, all attempts exhausted"
|
||||
exit 1
|
||||
fi
|
||||
echo "Check PostgreSQL service status"
|
||||
eval $COMMAND && break
|
||||
echo "PostgreSQL service not ready, wait 10 more sec, attempts left: $i"
|
||||
sleep 10
|
||||
((i--))
|
||||
done
|
||||
createuser -s postgres
|
||||
bash ${{ github.action_path }}/setup_db.sh
|
||||
1
.github/actions/setup-postgres-macos/setup_db.sh
vendored
Symbolic link
1
.github/actions/setup-postgres-macos/setup_db.sh
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../test/setup_db.sh
|
||||
12
.github/actions/setup-postgres-windows/action.yml
vendored
Normal file
12
.github/actions/setup-postgres-windows/action.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: "Set up postgres (windows)"
|
||||
description: "Set up postgres service on windows vm for dbt integration tests"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- shell: pwsh
|
||||
run: |
|
||||
$pgService = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $pgService -Status running -StartupType automatic
|
||||
Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru
|
||||
$env:Path += ";$env:PGBIN"
|
||||
bash ${{ github.action_path }}/setup_db.sh
|
||||
1
.github/actions/setup-postgres-windows/setup_db.sh
vendored
Symbolic link
1
.github/actions/setup-postgres-windows/setup_db.sh
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../test/setup_db.sh
|
||||
11
.github/pull_request_template.md
vendored
11
.github/pull_request_template.md
vendored
@@ -9,14 +9,13 @@ resolves #
|
||||
resolves #1234
|
||||
-->
|
||||
|
||||
|
||||
### Description
|
||||
|
||||
<!--- Describe the Pull Request here -->
|
||||
|
||||
|
||||
### Checklist
|
||||
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.
|
||||
|
||||
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
|
||||
- [ ] I have run this code in development and it appears to resolve the stated issue
|
||||
- [ ] This PR includes tests, or tests are not required/relevant for this PR
|
||||
- [ ] I have updated the `CHANGELOG.md` and added information about my change to the "dbt next" section.
|
||||
|
||||
95
.github/scripts/integration-test-matrix.js
vendored
Normal file
95
.github/scripts/integration-test-matrix.js
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
module.exports = ({ context }) => {
|
||||
const defaultPythonVersion = "3.8";
|
||||
const supportedPythonVersions = ["3.6", "3.7", "3.8", "3.9"];
|
||||
const supportedAdapters = ["snowflake", "postgres", "bigquery", "redshift"];
|
||||
|
||||
// if PR, generate matrix based on files changed and PR labels
|
||||
if (context.eventName.includes("pull_request")) {
|
||||
// `changes` is a list of adapter names that have related
|
||||
// file changes in the PR
|
||||
// ex: ['postgres', 'snowflake']
|
||||
const changes = JSON.parse(process.env.CHANGES);
|
||||
const labels = context.payload.pull_request.labels.map(({ name }) => name);
|
||||
console.log("labels", labels);
|
||||
console.log("changes", changes);
|
||||
const testAllLabel = labels.includes("test all");
|
||||
const include = [];
|
||||
|
||||
for (const adapter of supportedAdapters) {
|
||||
if (
|
||||
changes.includes(adapter) ||
|
||||
testAllLabel ||
|
||||
labels.includes(`test ${adapter}`)
|
||||
) {
|
||||
for (const pythonVersion of supportedPythonVersions) {
|
||||
if (
|
||||
pythonVersion === defaultPythonVersion ||
|
||||
labels.includes(`test python${pythonVersion}`) ||
|
||||
testAllLabel
|
||||
) {
|
||||
// always run tests on ubuntu by default
|
||||
include.push({
|
||||
os: "ubuntu-latest",
|
||||
adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
|
||||
if (labels.includes("test windows") || testAllLabel) {
|
||||
include.push({
|
||||
os: "windows-latest",
|
||||
adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
}
|
||||
|
||||
if (labels.includes("test macos") || testAllLabel) {
|
||||
include.push({
|
||||
os: "macos-latest",
|
||||
adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log("matrix", { include });
|
||||
|
||||
return {
|
||||
include,
|
||||
};
|
||||
}
|
||||
// if not PR, generate matrix of python version, adapter, and operating
|
||||
// system to run integration tests on
|
||||
|
||||
const include = [];
|
||||
// run for all adapters and python versions on ubuntu
|
||||
for (const adapter of supportedAdapters) {
|
||||
for (const pythonVersion of supportedPythonVersions) {
|
||||
include.push({
|
||||
os: 'ubuntu-latest',
|
||||
adapter: adapter,
|
||||
"python-version": pythonVersion,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// additionally include runs for all adapters, on macos and windows,
|
||||
// but only for the default python version
|
||||
for (const adapter of supportedAdapters) {
|
||||
for (const operatingSystem of ["windows-latest", "macos-latest"]) {
|
||||
include.push({
|
||||
os: operatingSystem,
|
||||
adapter: adapter,
|
||||
"python-version": defaultPythonVersion,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
console.log("matrix", { include });
|
||||
|
||||
return {
|
||||
include,
|
||||
};
|
||||
};
|
||||
266
.github/workflows/integration.yml
vendored
Normal file
266
.github/workflows/integration.yml
vendored
Normal file
@@ -0,0 +1,266 @@
|
||||
# **what?**
|
||||
# This workflow runs all integration tests for supported OS
|
||||
# and python versions and core adapters. If triggered by PR,
|
||||
# the workflow will only run tests for adapters related
|
||||
# to code changes. Use the `test all` and `test ${adapter}`
|
||||
# label to run all or additional tests. Use `ok to test`
|
||||
# label to mark PRs from forked repositories that are safe
|
||||
# to run integration tests for. Requires secrets to run
|
||||
# against different warehouses.
|
||||
|
||||
# **why?**
|
||||
# This checks the functionality of dbt from a user's perspective
|
||||
# and attempts to catch functional regressions.
|
||||
|
||||
# **when?**
|
||||
# This workflow will run on every push to a protected branch
|
||||
# and when manually triggered. It will also run for all PRs, including
|
||||
# PRs from forks. The workflow will be skipped until there is a label
|
||||
# to mark the PR as safe to run.
|
||||
|
||||
name: Adapter Integration Tests
|
||||
|
||||
on:
|
||||
# pushes to release branches
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
# all PRs, important to note that `pull_request_target` workflows
|
||||
# will run in the context of the target branch of a PR
|
||||
pull_request_target:
|
||||
# manual tigger
|
||||
workflow_dispatch:
|
||||
|
||||
# explicitly turn off permissions for `GITHUB_TOKEN`
|
||||
permissions: read-all
|
||||
|
||||
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# sets default shell to bash, for all operating systems
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
# generate test metadata about what files changed and the testing matrix to use
|
||||
test-metadata:
|
||||
# run if not a PR from a forked repository or has a label to mark as safe to test
|
||||
if: >-
|
||||
github.event_name != 'pull_request_target' ||
|
||||
github.event.pull_request.head.repo.full_name == github.repository ||
|
||||
contains(github.event.pull_request.labels.*.name, 'ok to test')
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
matrix: ${{ steps.generate-matrix.outputs.result }}
|
||||
|
||||
steps:
|
||||
- name: Check out the repository (non-PR)
|
||||
if: github.event_name != 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Check out the repository (PR)
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Check if relevant files changed
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
# For each filter, it sets output variable named by the filter to the text:
|
||||
# 'true' - if any of changed files matches any of filter rules
|
||||
# 'false' - if none of changed files matches any of filter rules
|
||||
# also, returns:
|
||||
# `changes` - JSON array with names of all filters matching any of the changed files
|
||||
uses: dorny/paths-filter@v2
|
||||
id: get-changes
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
filters: |
|
||||
postgres:
|
||||
- 'core/**'
|
||||
- 'plugins/postgres/**'
|
||||
- 'dev-requirements.txt'
|
||||
snowflake:
|
||||
- 'core/**'
|
||||
- 'plugins/snowflake/**'
|
||||
bigquery:
|
||||
- 'core/**'
|
||||
- 'plugins/bigquery/**'
|
||||
redshift:
|
||||
- 'core/**'
|
||||
- 'plugins/redshift/**'
|
||||
- 'plugins/postgres/**'
|
||||
|
||||
- name: Generate integration test matrix
|
||||
id: generate-matrix
|
||||
uses: actions/github-script@v4
|
||||
env:
|
||||
CHANGES: ${{ steps.get-changes.outputs.changes }}
|
||||
with:
|
||||
script: |
|
||||
const script = require('./.github/scripts/integration-test-matrix.js')
|
||||
const matrix = script({ context })
|
||||
console.log(matrix)
|
||||
return matrix
|
||||
|
||||
test:
|
||||
name: ${{ matrix.adapter }} / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
# run if not a PR from a forked repository or has a label to mark as safe to test
|
||||
# also checks that the matrix generated is not empty
|
||||
if: >-
|
||||
needs.test-metadata.outputs.matrix &&
|
||||
fromJSON( needs.test-metadata.outputs.matrix ).include[0] &&
|
||||
(
|
||||
github.event_name != 'pull_request_target' ||
|
||||
github.event.pull_request.head.repo.full_name == github.repository ||
|
||||
contains(github.event.pull_request.labels.*.name, 'ok to test')
|
||||
)
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
needs: test-metadata
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJSON(needs.test-metadata.outputs.matrix) }}
|
||||
|
||||
env:
|
||||
TOXENV: integration-${{ matrix.adapter }}
|
||||
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
if: github.event_name != 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# explicity checkout the branch for the PR,
|
||||
# this is necessary for the `pull_request_target` event
|
||||
- name: Check out the repository (PR)
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up postgres (linux)
|
||||
if: |
|
||||
matrix.adapter == 'postgres' &&
|
||||
runner.os == 'Linux'
|
||||
uses: ./.github/actions/setup-postgres-linux
|
||||
|
||||
- name: Set up postgres (macos)
|
||||
if: |
|
||||
matrix.adapter == 'postgres' &&
|
||||
runner.os == 'macOS'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: Set up postgres (windows)
|
||||
if: |
|
||||
matrix.adapter == 'postgres' &&
|
||||
runner.os == 'Windows'
|
||||
uses: ./.github/actions/setup-postgres-windows
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
|
||||
- name: Run tox (postgres)
|
||||
if: matrix.adapter == 'postgres'
|
||||
run: tox
|
||||
|
||||
- name: Run tox (redshift)
|
||||
if: matrix.adapter == 'redshift'
|
||||
env:
|
||||
REDSHIFT_TEST_DBNAME: ${{ secrets.REDSHIFT_TEST_DBNAME }}
|
||||
REDSHIFT_TEST_PASS: ${{ secrets.REDSHIFT_TEST_PASS }}
|
||||
REDSHIFT_TEST_USER: ${{ secrets.REDSHIFT_TEST_USER }}
|
||||
REDSHIFT_TEST_PORT: ${{ secrets.REDSHIFT_TEST_PORT }}
|
||||
REDSHIFT_TEST_HOST: ${{ secrets.REDSHIFT_TEST_HOST }}
|
||||
run: tox
|
||||
|
||||
- name: Run tox (snowflake)
|
||||
if: matrix.adapter == 'snowflake'
|
||||
env:
|
||||
SNOWFLAKE_TEST_ACCOUNT: ${{ secrets.SNOWFLAKE_TEST_ACCOUNT }}
|
||||
SNOWFLAKE_TEST_PASSWORD: ${{ secrets.SNOWFLAKE_TEST_PASSWORD }}
|
||||
SNOWFLAKE_TEST_USER: ${{ secrets.SNOWFLAKE_TEST_USER }}
|
||||
SNOWFLAKE_TEST_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_WAREHOUSE }}
|
||||
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: ${{ secrets.SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN }}
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_ID }}
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET }}
|
||||
SNOWFLAKE_TEST_ALT_DATABASE: ${{ secrets.SNOWFLAKE_TEST_ALT_DATABASE }}
|
||||
SNOWFLAKE_TEST_ALT_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_ALT_WAREHOUSE }}
|
||||
SNOWFLAKE_TEST_DATABASE: ${{ secrets.SNOWFLAKE_TEST_DATABASE }}
|
||||
SNOWFLAKE_TEST_QUOTED_DATABASE: ${{ secrets.SNOWFLAKE_TEST_QUOTED_DATABASE }}
|
||||
SNOWFLAKE_TEST_ROLE: ${{ secrets.SNOWFLAKE_TEST_ROLE }}
|
||||
run: tox
|
||||
|
||||
- name: Run tox (bigquery)
|
||||
if: matrix.adapter == 'bigquery'
|
||||
env:
|
||||
BIGQUERY_TEST_SERVICE_ACCOUNT_JSON: ${{ secrets.BIGQUERY_TEST_SERVICE_ACCOUNT_JSON }}
|
||||
BIGQUERY_TEST_ALT_DATABASE: ${{ secrets.BIGQUERY_TEST_ALT_DATABASE }}
|
||||
run: tox
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: logs
|
||||
path: ./logs
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: integration_results_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.adapter }}-${{ steps.date.outputs.date }}.csv
|
||||
path: integration_results.csv
|
||||
|
||||
require-label-comment:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
needs: test
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Needs permission PR comment
|
||||
if: >-
|
||||
needs.test.result == 'skipped' &&
|
||||
github.event_name == 'pull_request_target' &&
|
||||
github.event.pull_request.head.repo.full_name != github.repository
|
||||
uses: unsplash/comment-on-pr@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
msg: |
|
||||
"You do not have permissions to run integration tests, @dbt-labs/core "\
|
||||
"needs to label this PR with `ok to test` in order to run integration tests!"
|
||||
check_for_duplicate_msg: true
|
||||
206
.github/workflows/main.yml
vendored
Normal file
206
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
# **what?**
|
||||
# Runs code quality checks, unit tests, and verifies python build on
|
||||
# all code commited to the repository. This workflow should not
|
||||
# require any secrets since it runs for PRs from forked repos.
|
||||
# By default, secrets are not passed to workflows running from
|
||||
# a forked repo.
|
||||
|
||||
# **why?**
|
||||
# Ensure code for dbt meets a certain quality standard.
|
||||
|
||||
# **when?**
|
||||
# This will run for all PRs, when code is pushed to a release
|
||||
# branch, and when manually triggered.
|
||||
|
||||
name: Tests and Code Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
|
||||
# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
code-quality:
|
||||
name: ${{ matrix.toxenv }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toxenv: [flake8, mypy]
|
||||
|
||||
env:
|
||||
TOXENV: ${{ matrix.toxenv }}
|
||||
PYTEST_ADDOPTS: "-v --color=yes"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
|
||||
unit:
|
||||
name: unit test / python ${{ matrix.python-version }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8] # TODO: support unit testing for python 3.9 (https://github.com/dbt-labs/dbt/issues/3689)
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
id: date
|
||||
run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv
|
||||
path: unit_results.csv
|
||||
|
||||
build:
|
||||
name: build packages
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools wheel twine check-wheel-contents
|
||||
pip --version
|
||||
|
||||
- name: Build distributions
|
||||
run: ./scripts/build-dist.sh
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Check distribution descriptions
|
||||
run: |
|
||||
twine check dist/*
|
||||
|
||||
- name: Check wheel contents
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
test-build:
|
||||
name: verify packages / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
needs: build
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
python-version: [3.6, 3.7, 3.8, 3.9]
|
||||
|
||||
steps:
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade wheel
|
||||
pip --version
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check wheel distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
- name: Install source distributions
|
||||
run: |
|
||||
find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
dbt --version
|
||||
18
.github/workflows/performance.yml
vendored
18
.github/workflows/performance.yml
vendored
@@ -1,15 +1,13 @@
|
||||
|
||||
name: Performance Regression Testing
|
||||
name: Performance Regression Tests
|
||||
# Schedule triggers
|
||||
on:
|
||||
# runs twice a day at 10:05am and 10:05pm
|
||||
schedule:
|
||||
- cron: '5 10,22 * * *'
|
||||
- cron: "5 10,22 * * *"
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
||||
# checks fmt of runner code
|
||||
# purposefully not a dependency of any other job
|
||||
# will block merging, but not prevent developing
|
||||
@@ -83,7 +81,7 @@ jobs:
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: "3.8"
|
||||
- name: install dbt
|
||||
run: pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
- name: install hyperfine
|
||||
@@ -116,11 +114,11 @@ jobs:
|
||||
- name: checkout latest
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: '0.20.latest'
|
||||
ref: "0.20.latest"
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
python-version: "3.8"
|
||||
- name: move repo up a level
|
||||
run: mkdir ${{ github.workspace }}/../baseline/ && cp -r ${{ github.workspace }} ${{ github.workspace }}/../baseline
|
||||
- name: "[debug] ls new dbt location"
|
||||
@@ -166,11 +164,13 @@ jobs:
|
||||
name: runner
|
||||
- name: change permissions
|
||||
run: chmod +x ./runner
|
||||
- name: make results directory
|
||||
run: mkdir ./final-output/
|
||||
- name: run calculation
|
||||
run: ./runner calculate -r ./
|
||||
run: ./runner calculate -r ./ -o ./final-output/
|
||||
# always attempt to upload the results even if there were regressions found
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: final-calculations
|
||||
path: ./final_calculations.json
|
||||
path: ./final-output/*
|
||||
|
||||
139
.github/workflows/tests.yml
vendored
139
.github/workflows/tests.yml
vendored
@@ -1,139 +0,0 @@
|
||||
# This is a workflow to run our integration tests for windows and mac
|
||||
|
||||
name: dbt Tests
|
||||
|
||||
# Triggers
|
||||
on:
|
||||
# Triggers the workflow on push or pull request events and also adds a manual trigger
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
- '*.latest'
|
||||
- 'releases/*'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'develop'
|
||||
- '*.latest'
|
||||
- 'pr/*'
|
||||
- 'releases/*'
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
PostgresIntegrationTest:
|
||||
runs-on: 'windows-latest' #TODO: Add Mac support
|
||||
environment: 'Postgres'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: 'Install postgresql and set up database'
|
||||
shell: pwsh
|
||||
run: |
|
||||
$serviceName = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $serviceName -StartupType Automatic
|
||||
Start-Service -InputObject $serviceName
|
||||
& $env:PGBIN\createdb.exe -U postgres dbt
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE root WITH PASSWORD '$env:ROOT_PASSWORD';"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE root WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CREATE, CONNECT ON DATABASE dbt TO root WITH GRANT OPTION;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE noaccess WITH PASSWORD '$env:NOACCESS_PASSWORD' NOSUPERUSER;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE noaccess WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CONNECT ON DATABASE dbt TO noaccess;"
|
||||
env:
|
||||
ROOT_PASSWORD: ${{ secrets.ROOT_PASSWORD }}
|
||||
NOACCESS_PASSWORD: ${{ secrets.NOACCESS_PASSWORD }}
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: python -m pip install --upgrade pip && pip install tox
|
||||
|
||||
- name: 'Run integration tests'
|
||||
run: python -m tox -e py-postgres -- -v -n4
|
||||
|
||||
# These three are all similar except secure environment variables, which MUST be passed along to their tasks,
|
||||
# but there's probably a better way to do this!
|
||||
SnowflakeIntegrationTest:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
environment: 'Snowflake'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: python -m pip install --upgrade pip && pip install tox
|
||||
|
||||
- name: 'Run integration tests'
|
||||
run: python -m tox -e py-snowflake -- -v -n4
|
||||
env:
|
||||
SNOWFLAKE_TEST_ACCOUNT: ${{ secrets.SNOWFLAKE_TEST_ACCOUNT }}
|
||||
SNOWFLAKE_TEST_PASSWORD: ${{ secrets.SNOWFLAKE_TEST_PASSWORD }}
|
||||
SNOWFLAKE_TEST_USER: ${{ secrets.SNOWFLAKE_TEST_USER }}
|
||||
SNOWFLAKE_TEST_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_WAREHOUSE }}
|
||||
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: ${{ secrets.SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN }}
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_ID }}
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: ${{ secrets.SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET }}
|
||||
SNOWFLAKE_TEST_ALT_DATABASE: ${{ secrets.SNOWFLAKE_TEST_ALT_DATABASE }}
|
||||
SNOWFLAKE_TEST_ALT_WAREHOUSE: ${{ secrets.SNOWFLAKE_TEST_ALT_WAREHOUSE }}
|
||||
SNOWFLAKE_TEST_DATABASE: ${{ secrets.SNOWFLAKE_TEST_DATABASE }}
|
||||
SNOWFLAKE_TEST_QUOTED_DATABASE: ${{ secrets.SNOWFLAKE_TEST_QUOTED_DATABASE }}
|
||||
SNOWFLAKE_TEST_ROLE: ${{ secrets.SNOWFLAKE_TEST_ROLE }}
|
||||
|
||||
BigQueryIntegrationTest:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
environment: 'Bigquery'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: python -m pip install --upgrade pip && pip install tox
|
||||
|
||||
- name: 'Run integration tests'
|
||||
run: python -m tox -e py-bigquery -- -v -n4
|
||||
env:
|
||||
BIGQUERY_SERVICE_ACCOUNT_JSON: ${{ secrets.BIGQUERY_SERVICE_ACCOUNT_JSON }}
|
||||
BIGQUERY_TEST_ALT_DATABASE: ${{ secrets.BIGQUERY_TEST_ALT_DATABASE }}
|
||||
|
||||
RedshiftIntegrationTest:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
environment: 'Redshift'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: python -m pip install --upgrade pip && pip install tox
|
||||
|
||||
- name: 'Run integration tests'
|
||||
run: python -m tox -e py-redshift -- -v -n4
|
||||
env:
|
||||
REDSHIFT_TEST_DBNAME: ${{ secrets.REDSHIFT_TEST_DBNAME }}
|
||||
REDSHIFT_TEST_PASS: ${{ secrets.REDSHIFT_TEST_PASS }}
|
||||
REDSHIFT_TEST_USER: ${{ secrets.REDSHIFT_TEST_USER }}
|
||||
REDSHIFT_TEST_PORT: ${{ secrets.REDSHIFT_TEST_PORT }}
|
||||
REDSHIFT_TEST_HOST: ${{ secrets.REDSHIFT_TEST_HOST }}
|
||||
61
.github/workflows/unit_tests.yml
vendored
61
.github/workflows/unit_tests.yml
vendored
@@ -1,61 +0,0 @@
|
||||
# This is a workflow to run our linting and unit tests for windows, mac, and linux
|
||||
|
||||
name: Linting and Unit Tests
|
||||
|
||||
# Triggers
|
||||
on:
|
||||
# Trigger on commits to develop and releases branches
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
- '*.latest'
|
||||
- 'releases/*'
|
||||
pull_request: # Trigger for all PRs
|
||||
workflow_dispatch: # Allow manual triggers
|
||||
|
||||
jobs:
|
||||
Linting:
|
||||
runs-on: ubuntu-latest #no need to run on every OS
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.6'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: python -m pip install --upgrade pip && pip install tox
|
||||
|
||||
- name: 'Linting'
|
||||
run: tox -e mypy,flake8 -- -v
|
||||
|
||||
UnitTest:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: Linting
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Setup Python 3.6
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.6'
|
||||
architecture: 'x64'
|
||||
- name: Setup Python 3.7
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
- name: Setup Python 3.8
|
||||
uses: actions/setup-python@v2.2.2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: 'Install dependencies'
|
||||
run: python -m pip install --upgrade pip && pip install tox
|
||||
|
||||
- name: 'Run unit tests'
|
||||
run: tox -p -e py36,py37,py38
|
||||
@@ -26,7 +26,7 @@ This is the docs website code. It comes from the dbt-docs repository, and is gen
|
||||
|
||||
## Adapters
|
||||
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. The four core adapters that are in the main repository, contained within the [`plugins`](plugins) subdirectory, are: Postgres Redshift, Snowflake and BigQuery. Other warehouses use adapter plugins defined in separate repositories (e.g. [dbt-spark](https://github.com/fishtown-analytics/dbt-spark), [dbt-presto](https://github.com/fishtown-analytics/dbt-presto)).
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. The four core adapters that are in the main repository, contained within the [`plugins`](plugins) subdirectory, are: Postgres Redshift, Snowflake and BigQuery. Other warehouses use adapter plugins defined in separate repositories (e.g. [dbt-spark](https://github.com/dbt-labs/dbt-spark), [dbt-presto](https://github.com/dbt-labs/dbt-presto)).
|
||||
|
||||
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.
|
||||
|
||||
|
||||
2217
CHANGELOG.md
2217
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -68,7 +68,7 @@ The `dbt` maintainers use labels to categorize open issues. Some labels indicate
|
||||
|
||||
- **Trunks** are where active development of the next release takes place. There is one trunk named `develop` at the time of writing this, and will be the default branch of the repository.
|
||||
- **Release Branches** track a specific, not yet complete release of `dbt`. Each minor version release has a corresponding release branch. For example, the `0.11.x` series of releases has a branch called `0.11.latest`. This allows us to release new patch versions under `0.11` without necessarily needing to pull them into the latest version of `dbt`.
|
||||
- **Feature Branches** track individual features and fixes. On completion they should be merged into the trunk brnach or a specific release branch.
|
||||
- **Feature Branches** track individual features and fixes. On completion they should be merged into the trunk branch or a specific release branch.
|
||||
|
||||
## Getting the code
|
||||
|
||||
@@ -135,7 +135,7 @@ brew install postgresql
|
||||
|
||||
### Installation
|
||||
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Next, install `dbt` (and its dependencies) with:
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt` (and its dependencies) with:
|
||||
|
||||
```sh
|
||||
make dev
|
||||
@@ -170,6 +170,8 @@ docker-compose up -d database
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
```
|
||||
|
||||
Note that you may need to run the previous command twice as it does not currently wait for the database to be running before attempting to run commands against it. This will be fixed with [#3876](https://github.com/dbt-labs/dbt/issues/3876).
|
||||
|
||||
`dbt` uses test credentials specified in a `test.env` file in the root of the repository for non-Postgres databases. This `test.env` file is git-ignored, but please be _extra_ careful to never check in credentials or other sensitive information when developing against `dbt`. To create your `test.env` file, copy the provided sample file, then supply your relevant credentials. This step is only required to use non-Postgres databases.
|
||||
|
||||
```
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:18.04
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
|
||||
13
README.md
13
README.md
@@ -2,20 +2,17 @@
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt/ec7dee39f793aa4f7dd3dae37282cc87664813e4/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt/actions/workflows/tests.yml?query=branch%3Adevelop">
|
||||
<img src="https://github.com/dbt-labs/dbt/actions/workflows/tests.yml/badge.svg" alt="GitHub Actions"/>
|
||||
<a href="https://github.com/dbt-labs/dbt/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt/actions/workflows/main.yml/badge.svg?event=push" alt="Unit Tests Badge"/>
|
||||
</a>
|
||||
<a href="https://circleci.com/gh/dbt-labs/dbt/tree/develop">
|
||||
<img src="https://circleci.com/gh/dbt-labs/dbt/tree/develop.svg?style=svg" alt="CircleCI" />
|
||||
</a>
|
||||
<a href="https://dev.azure.com/fishtown-analytics/dbt/_build?definitionId=1&_a=summary&repositoryFilter=1&branchFilter=789%2C789%2C789%2C789">
|
||||
<img src="https://dev.azure.com/fishtown-analytics/dbt/_apis/build/status/fishtown-analytics.dbt?branchName=develop" alt="Azure Pipelines" />
|
||||
<a href="https://github.com/dbt-labs/dbt/actions/workflows/integration.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt/actions/workflows/integration.yml/badge.svg?event=push" alt="Integration Tests Badge"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||

|
||||

|
||||
|
||||
## Understanding dbt
|
||||
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
# Python package
|
||||
# Create and test a Python package on multiple Python versions.
|
||||
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- develop
|
||||
- '*.latest'
|
||||
- pr/*
|
||||
|
||||
jobs:
|
||||
- job: UnitTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e py -- -v
|
||||
displayName: Run unit tests
|
||||
|
||||
- job: PostgresIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: UnitTest
|
||||
|
||||
steps:
|
||||
- pwsh: |
|
||||
$serviceName = Get-Service -Name postgresql*
|
||||
Set-Service -InputObject $serviceName -StartupType Automatic
|
||||
Start-Service -InputObject $serviceName
|
||||
|
||||
& $env:PGBIN\createdb.exe -U postgres dbt
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE root WITH PASSWORD 'password';"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE root WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CREATE, CONNECT ON DATABASE dbt TO root WITH GRANT OPTION;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "CREATE ROLE noaccess WITH PASSWORD 'password' NOSUPERUSER;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "ALTER ROLE noaccess WITH LOGIN;"
|
||||
& $env:PGBIN\psql.exe -U postgres -c "GRANT CONNECT ON DATABASE dbt TO noaccess;"
|
||||
displayName: Install postgresql and set up database
|
||||
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e py-postgres -- -v -n4
|
||||
displayName: Run integration tests
|
||||
|
||||
# These three are all similar except secure environment variables, which MUST be passed along to their tasks,
|
||||
# but there's probably a better way to do this!
|
||||
- job: SnowflakeIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: UnitTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e py-snowflake -- -v -n4
|
||||
env:
|
||||
SNOWFLAKE_TEST_ACCOUNT: $(SNOWFLAKE_TEST_ACCOUNT)
|
||||
SNOWFLAKE_TEST_PASSWORD: $(SNOWFLAKE_TEST_PASSWORD)
|
||||
SNOWFLAKE_TEST_USER: $(SNOWFLAKE_TEST_USER)
|
||||
SNOWFLAKE_TEST_WAREHOUSE: $(SNOWFLAKE_TEST_WAREHOUSE)
|
||||
SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN: $(SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN)
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_ID: $(SNOWFLAKE_TEST_OAUTH_CLIENT_ID)
|
||||
SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET: $(SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: BigQueryIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: UnitTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
- script: python -m tox -e py-bigquery -- -v -n4
|
||||
env:
|
||||
BIGQUERY_SERVICE_ACCOUNT_JSON: $(BIGQUERY_SERVICE_ACCOUNT_JSON)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: RedshiftIntegrationTest
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn: UnitTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- script: python -m pip install --upgrade pip && pip install tox
|
||||
displayName: 'Install dependencies'
|
||||
|
||||
- script: python -m tox -e py-redshift -- -v -n4
|
||||
env:
|
||||
REDSHIFT_TEST_DBNAME: $(REDSHIFT_TEST_DBNAME)
|
||||
REDSHIFT_TEST_PASS: $(REDSHIFT_TEST_PASS)
|
||||
REDSHIFT_TEST_USER: $(REDSHIFT_TEST_USER)
|
||||
REDSHIFT_TEST_PORT: $(REDSHIFT_TEST_PORT)
|
||||
REDSHIFT_TEST_HOST: $(REDSHIFT_TEST_HOST)
|
||||
displayName: Run integration tests
|
||||
|
||||
- job: BuildWheel
|
||||
pool:
|
||||
vmImage: 'vs2017-win2016'
|
||||
dependsOn:
|
||||
- UnitTest
|
||||
- PostgresIntegrationTest
|
||||
- RedshiftIntegrationTest
|
||||
- SnowflakeIntegrationTest
|
||||
- BigQueryIntegrationTest
|
||||
condition: succeeded()
|
||||
steps:
|
||||
- task: UsePythonVersion@0
|
||||
inputs:
|
||||
versionSpec: '3.7'
|
||||
architecture: 'x64'
|
||||
- script: python -m pip install --upgrade pip setuptools && python -m pip install -r requirements.txt && python -m pip install -r dev-requirements.txt
|
||||
displayName: Install dependencies
|
||||
- task: ShellScript@2
|
||||
inputs:
|
||||
scriptPath: scripts/build-wheels.sh
|
||||
- task: CopyFiles@2
|
||||
inputs:
|
||||
contents: 'dist\?(*.whl|*.tar.gz)'
|
||||
TargetFolder: '$(Build.ArtifactStagingDirectory)'
|
||||
- task: PublishBuildArtifacts@1
|
||||
inputs:
|
||||
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
|
||||
artifactName: dists
|
||||
@@ -153,7 +153,7 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper):
|
||||
package_name = packages_arg.node.node.name
|
||||
macro_name = packages_arg.node.attr
|
||||
if (macro_name.startswith('_get') and 'namespaces' in macro_name):
|
||||
# noqa: https://github.com/fishtown-analytics/dbt-utils/blob/9e9407b/macros/cross_db_utils/_get_utils_namespaces.sql
|
||||
# noqa: https://github.com/dbt-labs/dbt-utils/blob/9e9407b/macros/cross_db_utils/_get_utils_namespaces.sql
|
||||
var_name = f'{package_name}_dispatch_list'
|
||||
# hard code compatibility for fivetran_utils, just a teensy bit different
|
||||
# noqa: https://github.com/fivetran/dbt_fivetran_utils/blob/0978ba2/macros/_get_utils_namespaces.sql
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
from functools import wraps
|
||||
import functools
|
||||
import requests
|
||||
from dbt.exceptions import RegistryException
|
||||
from dbt.utils import memoized
|
||||
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt import deprecations
|
||||
import os
|
||||
import time
|
||||
|
||||
if os.getenv('DBT_PACKAGE_HUB_URL'):
|
||||
DEFAULT_REGISTRY_BASE_URL = os.getenv('DBT_PACKAGE_HUB_URL')
|
||||
@@ -19,26 +18,11 @@ def _get_url(url, registry_base_url=None):
|
||||
return '{}{}'.format(registry_base_url, url)
|
||||
|
||||
|
||||
def _wrap_exceptions(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
max_attempts = 5
|
||||
attempt = 0
|
||||
while True:
|
||||
attempt += 1
|
||||
try:
|
||||
return fn(*args, **kwargs)
|
||||
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as exc:
|
||||
if attempt < max_attempts:
|
||||
time.sleep(1)
|
||||
continue
|
||||
raise RegistryException(
|
||||
'Unable to connect to registry hub'
|
||||
) from exc
|
||||
return wrapper
|
||||
def _get_with_retries(path, registry_base_url=None):
|
||||
get_fn = functools.partial(_get, path, registry_base_url)
|
||||
return connection_exception_retry(get_fn, 5)
|
||||
|
||||
|
||||
@_wrap_exceptions
|
||||
def _get(path, registry_base_url=None):
|
||||
url = _get_url(path, registry_base_url)
|
||||
logger.debug('Making package registry request: GET {}'.format(url))
|
||||
@@ -50,22 +34,44 @@ def _get(path, registry_base_url=None):
|
||||
|
||||
|
||||
def index(registry_base_url=None):
|
||||
return _get('api/v1/index.json', registry_base_url)
|
||||
return _get_with_retries('api/v1/index.json', registry_base_url)
|
||||
|
||||
|
||||
index_cached = memoized(index)
|
||||
|
||||
|
||||
def packages(registry_base_url=None):
|
||||
return _get('api/v1/packages.json', registry_base_url)
|
||||
return _get_with_retries('api/v1/packages.json', registry_base_url)
|
||||
|
||||
|
||||
def package(name, registry_base_url=None):
|
||||
return _get('api/v1/{}.json'.format(name), registry_base_url)
|
||||
response = _get_with_retries('api/v1/{}.json'.format(name), registry_base_url)
|
||||
|
||||
# Either redirectnamespace or redirectname in the JSON response indicate a redirect
|
||||
# redirectnamespace redirects based on package ownership
|
||||
# redirectname redirects based on package name
|
||||
# Both can be present at the same time, or neither. Fails gracefully to old name
|
||||
|
||||
if ('redirectnamespace' in response) or ('redirectname' in response):
|
||||
|
||||
if ('redirectnamespace' in response) and response['redirectnamespace'] is not None:
|
||||
use_namespace = response['redirectnamespace']
|
||||
else:
|
||||
use_namespace = response['namespace']
|
||||
|
||||
if ('redirectname' in response) and response['redirectname'] is not None:
|
||||
use_name = response['redirectname']
|
||||
else:
|
||||
use_name = response['name']
|
||||
|
||||
new_nwo = use_namespace + "/" + use_name
|
||||
deprecations.warn('package-redirect', old_name=name, new_name=new_nwo)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def package_version(name, version, registry_base_url=None):
|
||||
return _get('api/v1/{}/{}.json'.format(name, version), registry_base_url)
|
||||
return _get_with_retries('api/v1/{}/{}.json'.format(name, version), registry_base_url)
|
||||
|
||||
|
||||
def get_available_versions(name):
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import errno
|
||||
import functools
|
||||
import fnmatch
|
||||
import json
|
||||
import os
|
||||
@@ -15,9 +16,8 @@ from typing import (
|
||||
)
|
||||
|
||||
import dbt.exceptions
|
||||
import dbt.utils
|
||||
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt.utils import _connection_exception_retry as connection_exception_retry
|
||||
|
||||
if sys.platform == 'win32':
|
||||
from ctypes import WinDLL, c_bool
|
||||
@@ -30,7 +30,7 @@ def find_matching(
|
||||
root_path: str,
|
||||
relative_paths_to_search: List[str],
|
||||
file_pattern: str,
|
||||
) -> List[Dict[str, str]]:
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Given an absolute `root_path`, a list of relative paths to that
|
||||
absolute root path (`relative_paths_to_search`), and a `file_pattern`
|
||||
@@ -61,11 +61,19 @@ def find_matching(
|
||||
relative_path = os.path.relpath(
|
||||
absolute_path, absolute_path_to_search
|
||||
)
|
||||
modification_time = 0.0
|
||||
try:
|
||||
modification_time = os.path.getmtime(absolute_path)
|
||||
except OSError:
|
||||
logger.exception(
|
||||
f"Error retrieving modification time for file {absolute_path}"
|
||||
)
|
||||
if reobj.match(local_file):
|
||||
matching.append({
|
||||
'searched_path': relative_path_to_search,
|
||||
'absolute_path': absolute_path,
|
||||
'relative_path': relative_path,
|
||||
'modification_time': modification_time,
|
||||
})
|
||||
|
||||
return matching
|
||||
@@ -441,6 +449,13 @@ def run_cmd(
|
||||
return out, err
|
||||
|
||||
|
||||
def download_with_retries(
|
||||
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
|
||||
) -> None:
|
||||
download_fn = functools.partial(download, url, path, timeout)
|
||||
connection_exception_retry(download_fn, 5)
|
||||
|
||||
|
||||
def download(
|
||||
url: str, path: str, timeout: Optional[Union[float, tuple]] = None
|
||||
) -> None:
|
||||
|
||||
@@ -10,7 +10,7 @@ from dbt.adapters.factory import get_adapter
|
||||
from dbt.clients import jinja
|
||||
from dbt.clients.system import make_directory
|
||||
from dbt.context.providers import generate_runtime_model
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.contracts.graph.manifest import Manifest, UniqueID
|
||||
from dbt.contracts.graph.compiled import (
|
||||
COMPILED_TYPES,
|
||||
CompiledSchemaTestNode,
|
||||
@@ -107,6 +107,18 @@ def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
|
||||
_add_prepended_cte(prepended_ctes, new_cte)
|
||||
|
||||
|
||||
def _get_tests_for_node(manifest: Manifest, unique_id: UniqueID) -> List[UniqueID]:
|
||||
""" Get a list of tests that depend on the node with the
|
||||
provided unique id """
|
||||
|
||||
return [
|
||||
node.unique_id
|
||||
for _, node in manifest.nodes.items()
|
||||
if node.resource_type == NodeType.Test and
|
||||
unique_id in node.depends_on_nodes
|
||||
]
|
||||
|
||||
|
||||
class Linker:
|
||||
def __init__(self, data=None):
|
||||
if data is None:
|
||||
@@ -142,7 +154,7 @@ class Linker:
|
||||
include all nodes in their corresponding graph entries.
|
||||
"""
|
||||
out_graph = self.graph.copy()
|
||||
for node_id in self.graph.nodes():
|
||||
for node_id in self.graph:
|
||||
data = manifest.expect(node_id).to_dict(omit_none=True)
|
||||
out_graph.add_node(node_id, **data)
|
||||
nx.write_gpickle(out_graph, outfile)
|
||||
@@ -412,13 +424,80 @@ class Compiler:
|
||||
self.link_node(linker, node, manifest)
|
||||
for exposure in manifest.exposures.values():
|
||||
self.link_node(linker, exposure, manifest)
|
||||
# linker.add_node(exposure.unique_id)
|
||||
|
||||
cycle = linker.find_cycles()
|
||||
|
||||
if cycle:
|
||||
raise RuntimeError("Found a cycle: {}".format(cycle))
|
||||
|
||||
self.resolve_graph(linker, manifest)
|
||||
|
||||
def resolve_graph(self, linker: Linker, manifest: Manifest) -> None:
|
||||
""" This method adds additional edges to the DAG. For a given non-test
|
||||
executable node, add an edge from an upstream test to the given node if
|
||||
the set of nodes the test depends on is a proper/strict subset of the
|
||||
upstream nodes for the given node. """
|
||||
|
||||
# Given a graph:
|
||||
# model1 --> model2 --> model3
|
||||
# | |
|
||||
# | \/
|
||||
# \/ test 2
|
||||
# test1
|
||||
#
|
||||
# Produce the following graph:
|
||||
# model1 --> model2 --> model3
|
||||
# | | /\ /\
|
||||
# | \/ | |
|
||||
# \/ test2 ------- |
|
||||
# test1 -------------------
|
||||
|
||||
for node_id in linker.graph:
|
||||
# If node is executable (in manifest.nodes) and does _not_
|
||||
# represent a test, continue.
|
||||
if (
|
||||
node_id in manifest.nodes and
|
||||
manifest.nodes[node_id].resource_type != NodeType.Test
|
||||
):
|
||||
# Get *everything* upstream of the node
|
||||
all_upstream_nodes = nx.traversal.bfs_tree(
|
||||
linker.graph, node_id, reverse=True
|
||||
)
|
||||
# Get the set of upstream nodes not including the current node.
|
||||
upstream_nodes = set([
|
||||
n for n in all_upstream_nodes if n != node_id
|
||||
])
|
||||
|
||||
# Get all tests that depend on any upstream nodes.
|
||||
upstream_tests = []
|
||||
for upstream_node in upstream_nodes:
|
||||
upstream_tests += _get_tests_for_node(
|
||||
manifest,
|
||||
upstream_node
|
||||
)
|
||||
|
||||
for upstream_test in upstream_tests:
|
||||
# Get the set of all nodes that the test depends on
|
||||
# including the upstream_node itself. This is necessary
|
||||
# because tests can depend on multiple nodes (ex:
|
||||
# relationship tests). Test nodes do not distinguish
|
||||
# between what node the test is "testing" and what
|
||||
# node(s) it depends on.
|
||||
test_depends_on = set(
|
||||
manifest.nodes[upstream_test].depends_on_nodes
|
||||
)
|
||||
|
||||
# If the set of nodes that an upstream test depends on
|
||||
# is a proper (or strict) subset of all upstream nodes of
|
||||
# the current node, add an edge from the upstream test
|
||||
# to the current node. Must be a proper/strict subset to
|
||||
# avoid adding a circular dependency to the graph.
|
||||
if (test_depends_on < upstream_nodes):
|
||||
linker.graph.add_edge(
|
||||
upstream_test,
|
||||
node_id
|
||||
)
|
||||
|
||||
def compile(self, manifest: Manifest, write=True) -> Graph:
|
||||
self.initialize()
|
||||
linker = Linker()
|
||||
|
||||
@@ -84,7 +84,8 @@ def read_user_config(directory: str) -> UserConfig:
|
||||
|
||||
# The Profile class is included in RuntimeConfig, so any attribute
|
||||
# additions must also be set where the RuntimeConfig class is created
|
||||
@dataclass
|
||||
# `init=False` is a workaround for https://bugs.python.org/issue45081
|
||||
@dataclass(init=False)
|
||||
class Profile(HasCredentials):
|
||||
profile_name: str
|
||||
target_name: str
|
||||
@@ -92,6 +93,23 @@ class Profile(HasCredentials):
|
||||
threads: int
|
||||
credentials: Credentials
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
profile_name: str,
|
||||
target_name: str,
|
||||
config: UserConfig,
|
||||
threads: int,
|
||||
credentials: Credentials
|
||||
):
|
||||
"""Explicitly defining `__init__` to work around bug in Python 3.9.7
|
||||
https://bugs.python.org/issue45081
|
||||
"""
|
||||
self.profile_name = profile_name
|
||||
self.target_name = target_name
|
||||
self.config = config
|
||||
self.threads = threads
|
||||
self.credentials = credentials
|
||||
|
||||
def to_profile_info(
|
||||
self, serialize_credentials: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
|
||||
@@ -645,13 +645,24 @@ class Project:
|
||||
def hashed_name(self):
|
||||
return hashlib.md5(self.project_name.encode('utf-8')).hexdigest()
|
||||
|
||||
def get_selector(self, name: str) -> SelectionSpec:
|
||||
def get_selector(self, name: str) -> Union[SelectionSpec, bool]:
|
||||
if name not in self.selectors:
|
||||
raise RuntimeException(
|
||||
f'Could not find selector named {name}, expected one of '
|
||||
f'{list(self.selectors)}'
|
||||
)
|
||||
return self.selectors[name]
|
||||
return self.selectors[name]["definition"]
|
||||
|
||||
def get_default_selector_name(self) -> Union[str, None]:
|
||||
"""This function fetch the default selector to use on `dbt run` (if any)
|
||||
:return: either a selector if default is set or None
|
||||
:rtype: Union[SelectionSpec, None]
|
||||
"""
|
||||
for selector_name, selector in self.selectors.items():
|
||||
if selector["default"] is True:
|
||||
return selector_name
|
||||
|
||||
return None
|
||||
|
||||
def get_macro_search_order(self, macro_namespace: str):
|
||||
for dispatch_entry in self.dispatch:
|
||||
|
||||
@@ -391,6 +391,10 @@ class UnsetCredentials(Credentials):
|
||||
def type(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def unique_field(self):
|
||||
return None
|
||||
|
||||
def connection_info(self, *args, **kwargs):
|
||||
return {}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Union
|
||||
from dbt.clients.yaml_helper import ( # noqa: F401
|
||||
yaml, Loader, Dumper, load_yaml_text
|
||||
)
|
||||
@@ -29,13 +29,14 @@ Validator Error:
|
||||
"""
|
||||
|
||||
|
||||
class SelectorConfig(Dict[str, SelectionSpec]):
|
||||
class SelectorConfig(Dict[str, Dict[str, Union[SelectionSpec, bool]]]):
|
||||
|
||||
@classmethod
|
||||
def selectors_from_dict(cls, data: Dict[str, Any]) -> 'SelectorConfig':
|
||||
try:
|
||||
SelectorFile.validate(data)
|
||||
selector_file = SelectorFile.from_dict(data)
|
||||
validate_selector_default(selector_file)
|
||||
selectors = parse_from_selectors_definition(selector_file)
|
||||
except ValidationError as exc:
|
||||
yaml_sel_cfg = yaml.dump(exc.instance)
|
||||
@@ -118,6 +119,24 @@ def selector_config_from_data(
|
||||
return selectors
|
||||
|
||||
|
||||
def validate_selector_default(selector_file: SelectorFile) -> None:
|
||||
"""Check if a selector.yml file has more than 1 default key set to true"""
|
||||
default_set: bool = False
|
||||
default_selector_name: Union[str, None] = None
|
||||
|
||||
for selector in selector_file.selectors:
|
||||
if selector.default is True and default_set is False:
|
||||
default_set = True
|
||||
default_selector_name = selector.name
|
||||
continue
|
||||
if selector.default is True and default_set is True:
|
||||
raise DbtSelectorsError(
|
||||
"Error when parsing the selector file. "
|
||||
"Found multiple selectors with `default: true`:"
|
||||
f"{default_selector_name} and {selector.name}"
|
||||
)
|
||||
|
||||
|
||||
# These are utilities to clean up the dictionary created from
|
||||
# selectors.yml by turning the cli-string format entries into
|
||||
# normalized dictionary entries. It parallels the flow in
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import abc
|
||||
import itertools
|
||||
import hashlib
|
||||
from dataclasses import dataclass, field
|
||||
from typing import (
|
||||
Any, ClassVar, Dict, Tuple, Iterable, Optional, List, Callable,
|
||||
@@ -127,6 +128,15 @@ class Credentials(
|
||||
'type not implemented for base credentials class'
|
||||
)
|
||||
|
||||
@abc.abstractproperty
|
||||
def unique_field(self) -> str:
|
||||
raise NotImplementedError(
|
||||
'type not implemented for base credentials class'
|
||||
)
|
||||
|
||||
def hashed_unique_field(self) -> str:
|
||||
return hashlib.md5(self.unique_field.encode('utf-8')).hexdigest()
|
||||
|
||||
def connection_info(
|
||||
self, *, with_aliases: bool = False
|
||||
) -> Iterable[Tuple[str, Any]]:
|
||||
|
||||
@@ -42,6 +42,7 @@ parse_file_type_to_parser = {
|
||||
class FilePath(dbtClassMixin):
|
||||
searched_path: str
|
||||
relative_path: str
|
||||
modification_time: float
|
||||
project_root: str
|
||||
|
||||
@property
|
||||
@@ -132,6 +133,10 @@ class RemoteFile(dbtClassMixin):
|
||||
def original_file_path(self):
|
||||
return 'from remote system'
|
||||
|
||||
@property
|
||||
def modification_time(self):
|
||||
return 'from remote system'
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseSourceFile(dbtClassMixin, SerializableType):
|
||||
@@ -150,8 +155,6 @@ class BaseSourceFile(dbtClassMixin, SerializableType):
|
||||
def file_id(self):
|
||||
if isinstance(self.path, RemoteFile):
|
||||
return None
|
||||
if self.checksum.name == 'none':
|
||||
return None
|
||||
return f'{self.project_name}://{self.path.original_file_path}'
|
||||
|
||||
def _serialize(self):
|
||||
|
||||
@@ -285,6 +285,9 @@ class SourceFreshnessOutput(dbtClassMixin):
|
||||
status: FreshnessStatus
|
||||
criteria: FreshnessThreshold
|
||||
adapter_response: Dict[str, Any]
|
||||
timing: List[TimingInfo]
|
||||
thread_id: str
|
||||
execution_time: float
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -333,7 +336,10 @@ def process_freshness_result(
|
||||
max_loaded_at_time_ago_in_s=result.age,
|
||||
status=result.status,
|
||||
criteria=criteria,
|
||||
adapter_response=result.adapter_response
|
||||
adapter_response=result.adapter_response,
|
||||
timing=result.timing,
|
||||
thread_id=result.thread_id,
|
||||
execution_time=result.execution_time,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ class RPCExecParameters(RPCParameters):
|
||||
class RPCCompileParameters(RPCParameters):
|
||||
threads: Optional[int] = None
|
||||
models: Union[None, str, List[str]] = None
|
||||
select: Union[None, str, List[str]] = None
|
||||
exclude: Union[None, str, List[str]] = None
|
||||
selector: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
@@ -71,12 +72,14 @@ class RPCListParameters(RPCParameters):
|
||||
select: Union[None, str, List[str]] = None
|
||||
selector: Optional[str] = None
|
||||
output: Optional[str] = 'json'
|
||||
output_keys: Optional[List[str]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class RPCRunParameters(RPCParameters):
|
||||
threads: Optional[int] = None
|
||||
models: Union[None, str, List[str]] = None
|
||||
select: Union[None, str, List[str]] = None
|
||||
exclude: Union[None, str, List[str]] = None
|
||||
selector: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
@@ -118,8 +121,9 @@ class RPCDocsGenerateParameters(RPCParameters):
|
||||
|
||||
@dataclass
|
||||
class RPCBuildParameters(RPCParameters):
|
||||
resource_types: Optional[List[str]] = None
|
||||
select: Union[None, str, List[str]] = None
|
||||
threads: Optional[int] = None
|
||||
models: Union[None, str, List[str]] = None
|
||||
exclude: Union[None, str, List[str]] = None
|
||||
selector: Optional[str] = None
|
||||
state: Optional[str] = None
|
||||
|
||||
@@ -9,6 +9,7 @@ class SelectorDefinition(dbtClassMixin):
|
||||
name: str
|
||||
definition: Union[str, Dict[str, Any]]
|
||||
description: str = ''
|
||||
default: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -131,6 +131,14 @@ class AdapterMacroDeprecation(DBTDeprecation):
|
||||
'''
|
||||
|
||||
|
||||
class PackageRedirectDeprecation(DBTDeprecation):
|
||||
_name = 'package-redirect'
|
||||
_description = '''\
|
||||
The `{old_name}` package is deprecated in favor of `{new_name}`. Please update
|
||||
your `packages.yml` configuration to use `{new_name}` instead.
|
||||
'''
|
||||
|
||||
|
||||
_adapter_renamed_description = """\
|
||||
The adapter function `adapter.{old_name}` is deprecated and will be removed in
|
||||
a future release of dbt. Please use `adapter.{new_name}` instead.
|
||||
@@ -176,6 +184,7 @@ deprecations_list: List[DBTDeprecation] = [
|
||||
ModelsKeyNonModelDeprecation(),
|
||||
ExecuteMacrosReleaseDeprecation(),
|
||||
AdapterMacroDeprecation(),
|
||||
PackageRedirectDeprecation()
|
||||
]
|
||||
|
||||
deprecations: Dict[str, DBTDeprecation] = {
|
||||
|
||||
@@ -30,9 +30,13 @@ class RegistryPackageMixin:
|
||||
|
||||
|
||||
class RegistryPinnedPackage(RegistryPackageMixin, PinnedPackage):
|
||||
def __init__(self, package: str, version: str) -> None:
|
||||
def __init__(self,
|
||||
package: str,
|
||||
version: str,
|
||||
version_latest: str) -> None:
|
||||
super().__init__(package)
|
||||
self.version = version
|
||||
self.version_latest = version_latest
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -44,6 +48,9 @@ class RegistryPinnedPackage(RegistryPackageMixin, PinnedPackage):
|
||||
def get_version(self):
|
||||
return self.version
|
||||
|
||||
def get_version_latest(self):
|
||||
return self.version_latest
|
||||
|
||||
def nice_version_name(self):
|
||||
return 'version {}'.format(self.version)
|
||||
|
||||
@@ -61,7 +68,7 @@ class RegistryPinnedPackage(RegistryPackageMixin, PinnedPackage):
|
||||
system.make_directory(os.path.dirname(tar_path))
|
||||
|
||||
download_url = metadata.downloads.tarball
|
||||
system.download(download_url, tar_path)
|
||||
system.download_with_retries(download_url, tar_path)
|
||||
deps_path = project.modules_path
|
||||
package_name = self.get_project_name(project, renderer)
|
||||
system.untar_package(tar_path, deps_path, package_name)
|
||||
@@ -124,6 +131,7 @@ class RegistryUnpinnedPackage(
|
||||
available,
|
||||
self.install_prerelease
|
||||
)
|
||||
available_latest = installable[-1]
|
||||
|
||||
# for now, pick a version and then recurse. later on,
|
||||
# we'll probably want to traverse multiple options
|
||||
@@ -132,4 +140,5 @@ class RegistryUnpinnedPackage(
|
||||
target = semver.resolve_to_specific_version(range_, installable)
|
||||
if not target:
|
||||
package_version_not_found(self.package, range_, installable)
|
||||
return RegistryPinnedPackage(package=self.package, version=target)
|
||||
return RegistryPinnedPackage(package=self.package, version=target,
|
||||
version_latest=available_latest)
|
||||
|
||||
@@ -714,7 +714,7 @@ def system_error(operation_name):
|
||||
.format(operation_name))
|
||||
|
||||
|
||||
class RegistryException(Exception):
|
||||
class ConnectionException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ WRITE_JSON = None
|
||||
PARTIAL_PARSE = None
|
||||
USE_COLORS = None
|
||||
STORE_FAILURES = None
|
||||
GREEDY = None
|
||||
|
||||
|
||||
def env_set_truthy(key: str) -> Optional[str]:
|
||||
@@ -56,7 +57,7 @@ MP_CONTEXT = _get_context()
|
||||
def reset():
|
||||
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
|
||||
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
|
||||
STORE_FAILURES
|
||||
STORE_FAILURES, GREEDY
|
||||
|
||||
STRICT_MODE = False
|
||||
FULL_REFRESH = False
|
||||
@@ -69,12 +70,13 @@ def reset():
|
||||
MP_CONTEXT = _get_context()
|
||||
USE_COLORS = True
|
||||
STORE_FAILURES = False
|
||||
GREEDY = False
|
||||
|
||||
|
||||
def set_from_args(args):
|
||||
global STRICT_MODE, FULL_REFRESH, USE_CACHE, WARN_ERROR, TEST_NEW_PARSER, \
|
||||
USE_EXPERIMENTAL_PARSER, WRITE_JSON, PARTIAL_PARSE, MP_CONTEXT, USE_COLORS, \
|
||||
STORE_FAILURES
|
||||
STORE_FAILURES, GREEDY
|
||||
|
||||
USE_CACHE = getattr(args, 'use_cache', USE_CACHE)
|
||||
|
||||
@@ -99,6 +101,7 @@ def set_from_args(args):
|
||||
USE_COLORS = use_colors_override
|
||||
|
||||
STORE_FAILURES = getattr(args, 'store_failures', STORE_FAILURES)
|
||||
GREEDY = getattr(args, 'greedy', GREEDY)
|
||||
|
||||
|
||||
# initialize everything to the defaults on module load
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# special support for CLI argument parsing.
|
||||
from dbt import flags
|
||||
import itertools
|
||||
from dbt.clients.yaml_helper import yaml, Loader, Dumper # noqa: F401
|
||||
|
||||
@@ -66,7 +67,7 @@ def parse_union_from_default(
|
||||
def parse_difference(
|
||||
include: Optional[List[str]], exclude: Optional[List[str]]
|
||||
) -> SelectionDifference:
|
||||
included = parse_union_from_default(include, DEFAULT_INCLUDES)
|
||||
included = parse_union_from_default(include, DEFAULT_INCLUDES, greedy=bool(flags.GREEDY))
|
||||
excluded = parse_union_from_default(exclude, DEFAULT_EXCLUDES, greedy=True)
|
||||
return SelectionDifference(components=[included, excluded])
|
||||
|
||||
@@ -180,7 +181,7 @@ def parse_union_definition(definition: Dict[str, Any]) -> SelectionSpec:
|
||||
union_def_parts = _get_list_dicts(definition, 'union')
|
||||
include, exclude = _parse_include_exclude_subdefs(union_def_parts)
|
||||
|
||||
union = SelectionUnion(components=include)
|
||||
union = SelectionUnion(components=include, greedy_warning=False)
|
||||
|
||||
if exclude is None:
|
||||
union.raw = definition
|
||||
@@ -188,7 +189,8 @@ def parse_union_definition(definition: Dict[str, Any]) -> SelectionSpec:
|
||||
else:
|
||||
return SelectionDifference(
|
||||
components=[union, exclude],
|
||||
raw=definition
|
||||
raw=definition,
|
||||
greedy_warning=False
|
||||
)
|
||||
|
||||
|
||||
@@ -197,7 +199,7 @@ def parse_intersection_definition(
|
||||
) -> SelectionSpec:
|
||||
intersection_def_parts = _get_list_dicts(definition, 'intersection')
|
||||
include, exclude = _parse_include_exclude_subdefs(intersection_def_parts)
|
||||
intersection = SelectionIntersection(components=include)
|
||||
intersection = SelectionIntersection(components=include, greedy_warning=False)
|
||||
|
||||
if exclude is None:
|
||||
intersection.raw = definition
|
||||
@@ -205,7 +207,8 @@ def parse_intersection_definition(
|
||||
else:
|
||||
return SelectionDifference(
|
||||
components=[intersection, exclude],
|
||||
raw=definition
|
||||
raw=definition,
|
||||
greedy_warning=False
|
||||
)
|
||||
|
||||
|
||||
@@ -239,7 +242,7 @@ def parse_dict_definition(definition: Dict[str, Any]) -> SelectionSpec:
|
||||
if diff_arg is None:
|
||||
return base
|
||||
else:
|
||||
return SelectionDifference(components=[base, diff_arg])
|
||||
return SelectionDifference(components=[base, diff_arg], greedy_warning=False)
|
||||
|
||||
|
||||
def parse_from_definition(
|
||||
@@ -271,10 +274,12 @@ def parse_from_definition(
|
||||
|
||||
def parse_from_selectors_definition(
|
||||
source: SelectorFile
|
||||
) -> Dict[str, SelectionSpec]:
|
||||
result: Dict[str, SelectionSpec] = {}
|
||||
) -> Dict[str, Dict[str, Union[SelectionSpec, bool]]]:
|
||||
result: Dict[str, Dict[str, Union[SelectionSpec, bool]]] = {}
|
||||
selector: SelectorDefinition
|
||||
for selector in source.selectors:
|
||||
result[selector.name] = parse_from_definition(selector.definition,
|
||||
rootlevel=True)
|
||||
result[selector.name] = {
|
||||
"default": selector.default,
|
||||
"definition": parse_from_definition(selector.definition, rootlevel=True)
|
||||
}
|
||||
return result
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
from typing import Set, List, Optional, Tuple
|
||||
|
||||
from .graph import Graph, UniqueId
|
||||
@@ -30,6 +29,24 @@ def alert_non_existence(raw_spec, nodes):
|
||||
)
|
||||
|
||||
|
||||
def alert_unused_nodes(raw_spec, node_names):
|
||||
summary_nodes_str = ("\n - ").join(node_names[:3])
|
||||
debug_nodes_str = ("\n - ").join(node_names)
|
||||
and_more_str = f"\n - and {len(node_names) - 3} more" if len(node_names) > 4 else ""
|
||||
summary_msg = (
|
||||
f"\nSome tests were excluded because at least one parent is not selected. "
|
||||
f"Use the --greedy flag to include them."
|
||||
f"\n - {summary_nodes_str}{and_more_str}"
|
||||
)
|
||||
logger.info(summary_msg)
|
||||
if len(node_names) > 4:
|
||||
debug_msg = (
|
||||
f"Full list of tests that were excluded:"
|
||||
f"\n - {debug_nodes_str}"
|
||||
)
|
||||
logger.debug(debug_msg)
|
||||
|
||||
|
||||
def can_select_indirectly(node):
|
||||
"""If a node is not selected itself, but its parent(s) are, it may qualify
|
||||
for indirect selection.
|
||||
@@ -151,16 +168,16 @@ class NodeSelector(MethodManager):
|
||||
|
||||
return direct_nodes, indirect_nodes
|
||||
|
||||
def select_nodes(self, spec: SelectionSpec) -> Set[UniqueId]:
|
||||
def select_nodes(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], Set[UniqueId]]:
|
||||
"""Select the nodes in the graph according to the spec.
|
||||
|
||||
This is the main point of entry for turning a spec into a set of nodes:
|
||||
- Recurse through spec, select by criteria, combine by set operation
|
||||
- Return final (unfiltered) selection set
|
||||
"""
|
||||
|
||||
direct_nodes, indirect_nodes = self.select_nodes_recursively(spec)
|
||||
return direct_nodes
|
||||
indirect_only = indirect_nodes.difference(direct_nodes)
|
||||
return direct_nodes, indirect_only
|
||||
|
||||
def _is_graph_member(self, unique_id: UniqueId) -> bool:
|
||||
if unique_id in self.manifest.sources:
|
||||
@@ -213,6 +230,8 @@ class NodeSelector(MethodManager):
|
||||
# - If ANY parent is missing, return it separately. We'll keep it around
|
||||
# for later and see if its other parents show up.
|
||||
# We use this for INCLUSION.
|
||||
# Users can also opt in to inclusive GREEDY mode by passing --greedy flag,
|
||||
# or by specifying `greedy: true` in a yaml selector
|
||||
|
||||
direct_nodes = set(selected)
|
||||
indirect_nodes = set()
|
||||
@@ -251,15 +270,24 @@ class NodeSelector(MethodManager):
|
||||
|
||||
- node selection. Based on the include/exclude sets, the set
|
||||
of matched unique IDs is returned
|
||||
- expand the graph at each leaf node, before combination
|
||||
- selectors might override this. for example, this is where
|
||||
tests are added
|
||||
- includes direct + indirect selection (for tests)
|
||||
- filtering:
|
||||
- selectors can filter the nodes after all of them have been
|
||||
selected
|
||||
"""
|
||||
selected_nodes = self.select_nodes(spec)
|
||||
selected_nodes, indirect_only = self.select_nodes(spec)
|
||||
filtered_nodes = self.filter_selection(selected_nodes)
|
||||
|
||||
if indirect_only:
|
||||
filtered_unused_nodes = self.filter_selection(indirect_only)
|
||||
if filtered_unused_nodes and spec.greedy_warning:
|
||||
# log anything that didn't make the cut
|
||||
unused_node_names = []
|
||||
for unique_id in filtered_unused_nodes:
|
||||
name = self.manifest.nodes[unique_id].name
|
||||
unused_node_names.append(name)
|
||||
alert_unused_nodes(spec, unused_node_names)
|
||||
|
||||
return filtered_nodes
|
||||
|
||||
def get_graph_queue(self, spec: SelectionSpec) -> GraphQueue:
|
||||
|
||||
@@ -67,6 +67,7 @@ class SelectionCriteria:
|
||||
children: bool
|
||||
children_depth: Optional[int]
|
||||
greedy: bool = False
|
||||
greedy_warning: bool = False # do not raise warning for yaml selectors
|
||||
|
||||
def __post_init__(self):
|
||||
if self.children and self.childrens_parents:
|
||||
@@ -124,11 +125,11 @@ class SelectionCriteria:
|
||||
parents_depth=parents_depth,
|
||||
children=bool(dct.get('children')),
|
||||
children_depth=children_depth,
|
||||
greedy=greedy
|
||||
greedy=(greedy or bool(dct.get('greedy'))),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def dict_from_single_spec(cls, raw: str, greedy: bool = False):
|
||||
def dict_from_single_spec(cls, raw: str):
|
||||
result = RAW_SELECTOR_PATTERN.match(raw)
|
||||
if result is None:
|
||||
return {'error': 'Invalid selector spec'}
|
||||
@@ -145,6 +146,8 @@ class SelectionCriteria:
|
||||
dct['parents'] = bool(dct.get('parents'))
|
||||
if 'children' in dct:
|
||||
dct['children'] = bool(dct.get('children'))
|
||||
if 'greedy' in dct:
|
||||
dct['greedy'] = bool(dct.get('greedy'))
|
||||
return dct
|
||||
|
||||
@classmethod
|
||||
@@ -162,10 +165,12 @@ class BaseSelectionGroup(Iterable[SelectionSpec], metaclass=ABCMeta):
|
||||
self,
|
||||
components: Iterable[SelectionSpec],
|
||||
expect_exists: bool = False,
|
||||
greedy_warning: bool = True,
|
||||
raw: Any = None,
|
||||
):
|
||||
self.components: List[SelectionSpec] = list(components)
|
||||
self.expect_exists = expect_exists
|
||||
self.greedy_warning = greedy_warning
|
||||
self.raw = raw
|
||||
|
||||
def __iter__(self) -> Iterator[SelectionSpec]:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{% macro get_columns_in_query(select_sql) -%}
|
||||
{{ return(adapter.dispatch('get_columns_in_query')(select_sql)) }}
|
||||
{{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__get_columns_in_query(select_sql) %}
|
||||
@@ -15,7 +15,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro create_schema(relation) -%}
|
||||
{{ adapter.dispatch('create_schema')(relation) }}
|
||||
{{ adapter.dispatch('create_schema', 'dbt')(relation) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__create_schema(relation) -%}
|
||||
@@ -25,7 +25,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro drop_schema(relation) -%}
|
||||
{{ adapter.dispatch('drop_schema')(relation) }}
|
||||
{{ adapter.dispatch('drop_schema', 'dbt')(relation) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__drop_schema(relation) -%}
|
||||
@@ -35,7 +35,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro create_table_as(temporary, relation, sql) -%}
|
||||
{{ adapter.dispatch('create_table_as')(temporary, relation, sql) }}
|
||||
{{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, sql) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__create_table_as(temporary, relation, sql) -%}
|
||||
@@ -52,7 +52,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro get_create_index_sql(relation, index_dict) -%}
|
||||
{{ return(adapter.dispatch('get_create_index_sql')(relation, index_dict)) }}
|
||||
{{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__get_create_index_sql(relation, index_dict) -%}
|
||||
@@ -60,7 +60,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro create_indexes(relation) -%}
|
||||
{{ adapter.dispatch('create_indexes')(relation) }}
|
||||
{{ adapter.dispatch('create_indexes', 'dbt')(relation) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__create_indexes(relation) -%}
|
||||
@@ -75,7 +75,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro create_view_as(relation, sql) -%}
|
||||
{{ adapter.dispatch('create_view_as')(relation, sql) }}
|
||||
{{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__create_view_as(relation, sql) -%}
|
||||
@@ -89,7 +89,7 @@
|
||||
|
||||
|
||||
{% macro get_catalog(information_schema, schemas) -%}
|
||||
{{ return(adapter.dispatch('get_catalog')(information_schema, schemas)) }}
|
||||
{{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__get_catalog(information_schema, schemas) -%}
|
||||
@@ -104,7 +104,7 @@
|
||||
|
||||
|
||||
{% macro get_columns_in_relation(relation) -%}
|
||||
{{ return(adapter.dispatch('get_columns_in_relation')(relation)) }}
|
||||
{{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro sql_convert_columns_in_relation(table) -%}
|
||||
@@ -121,13 +121,13 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro alter_column_type(relation, column_name, new_column_type) -%}
|
||||
{{ return(adapter.dispatch('alter_column_type')(relation, column_name, new_column_type)) }}
|
||||
{{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
|
||||
{% macro alter_column_comment(relation, column_dict) -%}
|
||||
{{ return(adapter.dispatch('alter_column_comment')(relation, column_dict)) }}
|
||||
{{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__alter_column_comment(relation, column_dict) -%}
|
||||
@@ -136,7 +136,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro alter_relation_comment(relation, relation_comment) -%}
|
||||
{{ return(adapter.dispatch('alter_relation_comment')(relation, relation_comment)) }}
|
||||
{{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__alter_relation_comment(relation, relation_comment) -%}
|
||||
@@ -145,7 +145,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}
|
||||
{{ return(adapter.dispatch('persist_docs')(relation, model, for_relation, for_columns)) }}
|
||||
{{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}
|
||||
@@ -180,7 +180,7 @@
|
||||
|
||||
|
||||
{% macro drop_relation(relation) -%}
|
||||
{{ return(adapter.dispatch('drop_relation')(relation)) }}
|
||||
{{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro truncate_relation(relation) -%}
|
||||
{{ return(adapter.dispatch('truncate_relation')(relation)) }}
|
||||
{{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
@@ -202,7 +202,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro rename_relation(from_relation, to_relation) -%}
|
||||
{{ return(adapter.dispatch('rename_relation')(from_relation, to_relation)) }}
|
||||
{{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__rename_relation(from_relation, to_relation) -%}
|
||||
@@ -214,7 +214,7 @@
|
||||
|
||||
|
||||
{% macro information_schema_name(database) %}
|
||||
{{ return(adapter.dispatch('information_schema_name')(database)) }}
|
||||
{{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__information_schema_name(database) -%}
|
||||
@@ -227,7 +227,7 @@
|
||||
|
||||
|
||||
{% macro list_schemas(database) -%}
|
||||
{{ return(adapter.dispatch('list_schemas')(database)) }}
|
||||
{{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__list_schemas(database) -%}
|
||||
@@ -241,7 +241,7 @@
|
||||
|
||||
|
||||
{% macro check_schema_exists(information_schema, schema) -%}
|
||||
{{ return(adapter.dispatch('check_schema_exists')(information_schema, schema)) }}
|
||||
{{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__check_schema_exists(information_schema, schema) -%}
|
||||
@@ -256,7 +256,7 @@
|
||||
|
||||
|
||||
{% macro list_relations_without_caching(schema_relation) %}
|
||||
{{ return(adapter.dispatch('list_relations_without_caching')(schema_relation)) }}
|
||||
{{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
@@ -267,7 +267,7 @@
|
||||
|
||||
|
||||
{% macro current_timestamp() -%}
|
||||
{{ adapter.dispatch('current_timestamp')() }}
|
||||
{{ adapter.dispatch('current_timestamp', 'dbt')() }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
@@ -278,7 +278,7 @@
|
||||
|
||||
|
||||
{% macro collect_freshness(source, loaded_at_field, filter) %}
|
||||
{{ return(adapter.dispatch('collect_freshness')(source, loaded_at_field, filter))}}
|
||||
{{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
@@ -296,7 +296,7 @@
|
||||
{% endmacro %}
|
||||
|
||||
{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}
|
||||
{{ return(adapter.dispatch('make_temp_relation')(base_relation, suffix))}}
|
||||
{{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix))}}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__make_temp_relation(base_relation, suffix) %}
|
||||
@@ -313,7 +313,7 @@
|
||||
|
||||
|
||||
{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}
|
||||
{{ return(adapter.dispatch('alter_relation_add_remove_columns')(relation, add_columns, remove_columns)) }}
|
||||
{{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}
|
||||
|
||||
@@ -13,6 +13,10 @@
|
||||
|
||||
#}
|
||||
{% macro generate_alias_name(custom_alias_name=none, node=none) -%}
|
||||
{% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}
|
||||
|
||||
{%- if custom_alias_name is none -%}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
#}
|
||||
{% macro generate_database_name(custom_database_name=none, node=none) -%}
|
||||
{% do return(adapter.dispatch('generate_database_name')(custom_database_name, node)) %}
|
||||
{% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__generate_database_name(custom_database_name=none, node=none) -%}
|
||||
|
||||
@@ -15,6 +15,10 @@
|
||||
|
||||
#}
|
||||
{% macro generate_schema_name(custom_schema_name, node) -%}
|
||||
{{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__generate_schema_name(custom_schema_name, node) -%}
|
||||
|
||||
{%- set default_schema = target.schema -%}
|
||||
{%- if custom_schema_name is none -%}
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
{% macro get_where_subquery(relation) -%}
|
||||
{% do return(adapter.dispatch('get_where_subquery')(relation)) %}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__get_where_subquery(relation) -%}
|
||||
{% set where = config.get('where', '') %}
|
||||
{% if where %}
|
||||
{%- set filtered -%}
|
||||
(select * from {{ relation }} where {{ where }}) dbt_subquery
|
||||
{%- endset -%}
|
||||
{% do return(filtered) %}
|
||||
{%- else -%}
|
||||
{% do return(relation) %}
|
||||
{%- endif -%}
|
||||
{%- endmacro %}
|
||||
@@ -1,17 +1,17 @@
|
||||
|
||||
|
||||
{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}
|
||||
{{ adapter.dispatch('get_merge_sql')(target, source, unique_key, dest_columns, predicates) }}
|
||||
{{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}
|
||||
{{ adapter.dispatch('get_delete_insert_merge_sql')(target, source, unique_key, dest_columns) }}
|
||||
{{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}
|
||||
{{ adapter.dispatch('get_insert_overwrite_merge_sql')(target, source, dest_columns, predicates, include_sql_header) }}
|
||||
{{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
|
||||
{% macro create_csv_table(model, agate_table) -%}
|
||||
{{ adapter.dispatch('create_csv_table')(model, agate_table) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}
|
||||
{{ adapter.dispatch('reset_csv_table')(model, full_refresh, old_relation, agate_table) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro load_csv_rows(model, agate_table) -%}
|
||||
{{ adapter.dispatch('load_csv_rows')(model, agate_table) }}
|
||||
{{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__create_csv_table(model, agate_table) %}
|
||||
@@ -33,6 +25,9 @@
|
||||
{{ return(sql) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}
|
||||
{{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}
|
||||
{% set sql = "" %}
|
||||
@@ -47,6 +42,21 @@
|
||||
{{ return(sql) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro get_binding_char() -%}
|
||||
{{ adapter.dispatch('get_binding_char', 'dbt')() }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__get_binding_char() %}
|
||||
{{ return('%s') }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro get_batch_size() -%}
|
||||
{{ adapter.dispatch('get_batch_size', 'dbt')() }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__get_batch_size() %}
|
||||
{{ return(10000) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro get_seed_column_quoted_csv(model, column_names) %}
|
||||
{%- set quote_seed_column = model['config'].get('quote_columns', None) -%}
|
||||
@@ -59,47 +69,47 @@
|
||||
{{ return(dest_cols_csv) }}
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
{% macro basic_load_csv_rows(model, batch_size, agate_table) %}
|
||||
{% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}
|
||||
{% set bindings = [] %}
|
||||
|
||||
{% set statements = [] %}
|
||||
|
||||
{% for chunk in agate_table.rows | batch(batch_size) %}
|
||||
{% set bindings = [] %}
|
||||
|
||||
{% for row in chunk %}
|
||||
{% do bindings.extend(row) %}
|
||||
{% endfor %}
|
||||
|
||||
{% set sql %}
|
||||
insert into {{ this.render() }} ({{ cols_sql }}) values
|
||||
{% for row in chunk -%}
|
||||
({%- for column in agate_table.column_names -%}
|
||||
%s
|
||||
{%- if not loop.last%},{%- endif %}
|
||||
{%- endfor -%})
|
||||
{%- if not loop.last%},{%- endif %}
|
||||
{%- endfor %}
|
||||
{% endset %}
|
||||
|
||||
{% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}
|
||||
|
||||
{% if loop.index0 == 0 %}
|
||||
{% do statements.append(sql) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Return SQL so we can render it out into the compiled files #}
|
||||
{{ return(statements[0]) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro load_csv_rows(model, agate_table) -%}
|
||||
{{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__load_csv_rows(model, agate_table) %}
|
||||
{{ return(basic_load_csv_rows(model, 10000, agate_table) )}}
|
||||
{% endmacro %}
|
||||
|
||||
{% set batch_size = get_batch_size() %}
|
||||
|
||||
{% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}
|
||||
{% set bindings = [] %}
|
||||
|
||||
{% set statements = [] %}
|
||||
|
||||
{% for chunk in agate_table.rows | batch(batch_size) %}
|
||||
{% set bindings = [] %}
|
||||
|
||||
{% for row in chunk %}
|
||||
{% do bindings.extend(row) %}
|
||||
{% endfor %}
|
||||
|
||||
{% set sql %}
|
||||
insert into {{ this.render() }} ({{ cols_sql }}) values
|
||||
{% for row in chunk -%}
|
||||
({%- for column in agate_table.column_names -%}
|
||||
{{ get_binding_char() }}
|
||||
{%- if not loop.last%},{%- endif %}
|
||||
{%- endfor -%})
|
||||
{%- if not loop.last%},{%- endif %}
|
||||
{%- endfor %}
|
||||
{% endset %}
|
||||
|
||||
{% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}
|
||||
|
||||
{% if loop.index0 == 0 %}
|
||||
{% do statements.append(sql) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Return SQL so we can render it out into the compiled files #}
|
||||
{{ return(statements[0]) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% materialization seed, default %}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Add new columns to the table if applicable
|
||||
#}
|
||||
{% macro create_columns(relation, columns) %}
|
||||
{{ adapter.dispatch('create_columns')(relation, columns) }}
|
||||
{{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__create_columns(relation, columns) %}
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
|
||||
{% macro post_snapshot(staging_relation) %}
|
||||
{{ adapter.dispatch('post_snapshot')(staging_relation) }}
|
||||
{{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__post_snapshot(staging_relation) %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
{% macro snapshot_merge_sql(target, source, insert_cols) -%}
|
||||
{{ adapter.dispatch('snapshot_merge_sql')(target, source, insert_cols) }}
|
||||
{{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
Create SCD Hash SQL fields cross-db
|
||||
#}
|
||||
{% macro snapshot_hash_arguments(args) -%}
|
||||
{{ adapter.dispatch('snapshot_hash_arguments')(args) }}
|
||||
{{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
Get the current time cross-db
|
||||
#}
|
||||
{% macro snapshot_get_time() -%}
|
||||
{{ adapter.dispatch('snapshot_get_time')() }}
|
||||
{{ adapter.dispatch('snapshot_get_time', 'dbt')() }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__snapshot_get_time() -%}
|
||||
@@ -75,7 +75,7 @@
|
||||
table instead of assuming that the user-supplied {{ updated_at }}
|
||||
will be present in the historical data.
|
||||
|
||||
See https://github.com/fishtown-analytics/dbt/issues/2350
|
||||
See https://github.com/dbt-labs/dbt/issues/2350
|
||||
*/ #}
|
||||
{% set row_changed_expr -%}
|
||||
({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})
|
||||
@@ -94,7 +94,7 @@
|
||||
|
||||
|
||||
{% macro snapshot_string_as_time(timestamp) -%}
|
||||
{{ adapter.dispatch('snapshot_string_as_time')(timestamp) }}
|
||||
{{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}
|
||||
{{ adapter.dispatch('get_test_sql')(main_sql, fail_calc, warn_if, error_if, limit) }}
|
||||
{{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}
|
||||
{%- endmacro %}
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
{% macro handle_existing_table(full_refresh, old_relation) %}
|
||||
{{ adapter.dispatch('handle_existing_table', macro_namespace = 'dbt')(full_refresh, old_relation) }}
|
||||
{{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__handle_existing_table(full_refresh, old_relation) %}
|
||||
|
||||
@@ -7,7 +7,7 @@ with all_values as (
|
||||
count(*) as n_records
|
||||
|
||||
from {{ model }}
|
||||
group by 1
|
||||
group by {{ column_name }}
|
||||
|
||||
)
|
||||
|
||||
@@ -28,6 +28,6 @@ where value_field not in (
|
||||
|
||||
|
||||
{% test accepted_values(model, column_name, values, quote=True) %}
|
||||
{% set macro = adapter.dispatch('test_accepted_values') %}
|
||||
{% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}
|
||||
{{ macro(model, column_name, values, quote) }}
|
||||
{% endtest %}
|
||||
|
||||
@@ -8,6 +8,6 @@ where {{ column_name }} is null
|
||||
|
||||
|
||||
{% test not_null(model, column_name) %}
|
||||
{% set macro = adapter.dispatch('test_not_null') %}
|
||||
{% set macro = adapter.dispatch('test_not_null', 'dbt') %}
|
||||
{{ macro(model, column_name) }}
|
||||
{% endtest %}
|
||||
|
||||
@@ -2,27 +2,29 @@
|
||||
{% macro default__test_relationships(model, column_name, to, field) %}
|
||||
|
||||
with child as (
|
||||
select * from {{ model }}
|
||||
select {{ column_name }} as from_field
|
||||
from {{ model }}
|
||||
where {{ column_name }} is not null
|
||||
),
|
||||
|
||||
parent as (
|
||||
select * from {{ to }}
|
||||
select {{ field }} as to_field
|
||||
from {{ to }}
|
||||
)
|
||||
|
||||
select
|
||||
child.{{ column_name }}
|
||||
from_field
|
||||
|
||||
from child
|
||||
left join parent
|
||||
on child.{{ column_name }} = parent.{{ field }}
|
||||
on child.from_field = parent.to_field
|
||||
|
||||
where parent.{{ field }} is null
|
||||
where parent.to_field is null
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
|
||||
{% test relationships(model, column_name, to, field) %}
|
||||
{% set macro = adapter.dispatch('test_relationships') %}
|
||||
{% set macro = adapter.dispatch('test_relationships', 'dbt') %}
|
||||
{{ macro(model, column_name, to, field) }}
|
||||
{% endtest %}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% macro default__test_unique(model, column_name) %}
|
||||
|
||||
select
|
||||
{{ column_name }},
|
||||
{{ column_name }} as unique_field,
|
||||
count(*) as n_records
|
||||
|
||||
from {{ model }}
|
||||
@@ -13,6 +13,6 @@ having count(*) > 1
|
||||
|
||||
|
||||
{% test unique(model, column_name) %}
|
||||
{% set macro = adapter.dispatch('test_unique') %}
|
||||
{% set macro = adapter.dispatch('test_unique', 'dbt') %}
|
||||
{{ macro(model, column_name) }}
|
||||
{% endtest %}
|
||||
|
||||
178
core/dbt/main.py
178
core/dbt/main.py
@@ -10,23 +10,23 @@ from pathlib import Path
|
||||
|
||||
import dbt.version
|
||||
import dbt.flags as flags
|
||||
import dbt.task.run as run_task
|
||||
import dbt.task.build as build_task
|
||||
import dbt.task.clean as clean_task
|
||||
import dbt.task.compile as compile_task
|
||||
import dbt.task.debug as debug_task
|
||||
import dbt.task.clean as clean_task
|
||||
import dbt.task.deps as deps_task
|
||||
import dbt.task.init as init_task
|
||||
import dbt.task.seed as seed_task
|
||||
import dbt.task.test as test_task
|
||||
import dbt.task.snapshot as snapshot_task
|
||||
import dbt.task.generate as generate_task
|
||||
import dbt.task.serve as serve_task
|
||||
import dbt.task.freshness as freshness_task
|
||||
import dbt.task.run_operation as run_operation_task
|
||||
import dbt.task.generate as generate_task
|
||||
import dbt.task.init as init_task
|
||||
import dbt.task.list as list_task
|
||||
import dbt.task.parse as parse_task
|
||||
import dbt.task.run as run_task
|
||||
import dbt.task.run_operation as run_operation_task
|
||||
import dbt.task.seed as seed_task
|
||||
import dbt.task.serve as serve_task
|
||||
import dbt.task.snapshot as snapshot_task
|
||||
import dbt.task.test as test_task
|
||||
from dbt.profiler import profiler
|
||||
from dbt.task.list import ListTask
|
||||
from dbt.task.rpc.server import RPCServerTask
|
||||
from dbt.adapters.factory import reset_adapters, cleanup_connections
|
||||
|
||||
@@ -399,6 +399,40 @@ def _build_build_subparser(subparsers, base_subparser):
|
||||
Stop execution upon a first failure.
|
||||
'''
|
||||
)
|
||||
sub.add_argument(
|
||||
'--store-failures',
|
||||
action='store_true',
|
||||
help='''
|
||||
Store test results (failing rows) in the database
|
||||
'''
|
||||
)
|
||||
sub.add_argument(
|
||||
'--greedy',
|
||||
action='store_true',
|
||||
help='''
|
||||
Select all tests that touch the selected resources,
|
||||
even if they also depend on unselected resources
|
||||
'''
|
||||
)
|
||||
resource_values: List[str] = [
|
||||
str(s) for s in build_task.BuildTask.ALL_RESOURCE_VALUES
|
||||
] + ['all']
|
||||
sub.add_argument('--resource-type',
|
||||
choices=resource_values,
|
||||
action='append',
|
||||
default=[],
|
||||
dest='resource_types')
|
||||
# explicity don't support --models
|
||||
sub.add_argument(
|
||||
'-s',
|
||||
'--select',
|
||||
dest='select',
|
||||
nargs='+',
|
||||
help='''
|
||||
Specify the nodes to include.
|
||||
''',
|
||||
)
|
||||
_add_common_selector_arguments(sub)
|
||||
return sub
|
||||
|
||||
|
||||
@@ -554,39 +588,6 @@ def _build_docs_generate_subparser(subparsers, base_subparser):
|
||||
return generate_sub
|
||||
|
||||
|
||||
def _add_models_argument(sub, help_override=None, **kwargs):
|
||||
help_str = '''
|
||||
Specify the models to include.
|
||||
'''
|
||||
if help_override is not None:
|
||||
help_str = help_override
|
||||
sub.add_argument(
|
||||
'-m',
|
||||
'--models',
|
||||
dest='models',
|
||||
nargs='+',
|
||||
help=help_str,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
def _add_select_argument(sub, dest='models', help_override=None, **kwargs):
|
||||
help_str = '''
|
||||
Specify the nodes to include.
|
||||
'''
|
||||
if help_override is not None:
|
||||
help_str = help_override
|
||||
|
||||
sub.add_argument(
|
||||
'-s',
|
||||
'--select',
|
||||
dest=dest,
|
||||
nargs='+',
|
||||
help=help_str,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
def _add_common_selector_arguments(sub):
|
||||
sub.add_argument(
|
||||
'--exclude',
|
||||
@@ -615,17 +616,26 @@ def _add_common_selector_arguments(sub):
|
||||
)
|
||||
|
||||
|
||||
def _add_selection_arguments(*subparsers, **kwargs):
|
||||
models_name = kwargs.get('models_name', 'models')
|
||||
def _add_selection_arguments(*subparsers):
|
||||
for sub in subparsers:
|
||||
if models_name == 'models':
|
||||
_add_models_argument(sub)
|
||||
elif models_name == 'select':
|
||||
# these still get stored in 'models', so they present the same
|
||||
# interface to the task
|
||||
_add_select_argument(sub)
|
||||
else:
|
||||
raise InternalException(f'Unknown models style {models_name}')
|
||||
sub.add_argument(
|
||||
'-m',
|
||||
'--models',
|
||||
dest='select',
|
||||
nargs='+',
|
||||
help='''
|
||||
Specify the nodes to include.
|
||||
''',
|
||||
)
|
||||
sub.add_argument(
|
||||
'-s',
|
||||
'--select',
|
||||
dest='select',
|
||||
nargs='+',
|
||||
help='''
|
||||
Specify the nodes to include.
|
||||
''',
|
||||
)
|
||||
_add_common_selector_arguments(sub)
|
||||
|
||||
|
||||
@@ -635,7 +645,7 @@ def _add_table_mutability_arguments(*subparsers):
|
||||
'--full-refresh',
|
||||
action='store_true',
|
||||
help='''
|
||||
If specified, DBT will drop incremental models and
|
||||
If specified, dbt will drop incremental models and
|
||||
fully-recalculate the incremental table from the model definition.
|
||||
'''
|
||||
)
|
||||
@@ -751,6 +761,14 @@ def _build_test_subparser(subparsers, base_subparser):
|
||||
Store test results (failing rows) in the database
|
||||
'''
|
||||
)
|
||||
sub.add_argument(
|
||||
'--greedy',
|
||||
action='store_true',
|
||||
help='''
|
||||
Select all tests that touch the selected resources,
|
||||
even if they also depend on unselected resources
|
||||
'''
|
||||
)
|
||||
|
||||
sub.set_defaults(cls=test_task.TestTask, which='test', rpc_method='test')
|
||||
return sub
|
||||
@@ -787,11 +805,14 @@ def _build_source_freshness_subparser(subparsers, base_subparser):
|
||||
which='source-freshness',
|
||||
rpc_method='source-freshness',
|
||||
)
|
||||
_add_select_argument(
|
||||
sub,
|
||||
sub.add_argument(
|
||||
'-s',
|
||||
'--select',
|
||||
dest='select',
|
||||
metavar='SELECTOR',
|
||||
required=False,
|
||||
nargs='+',
|
||||
help='''
|
||||
Specify the nodes to include.
|
||||
''',
|
||||
)
|
||||
_add_common_selector_arguments(sub)
|
||||
return sub
|
||||
@@ -836,9 +857,9 @@ def _build_list_subparser(subparsers, base_subparser):
|
||||
''',
|
||||
aliases=['ls'],
|
||||
)
|
||||
sub.set_defaults(cls=ListTask, which='list', rpc_method=None)
|
||||
sub.set_defaults(cls=list_task.ListTask, which='list', rpc_method=None)
|
||||
resource_values: List[str] = [
|
||||
str(s) for s in ListTask.ALL_RESOURCE_VALUES
|
||||
str(s) for s in list_task.ListTask.ALL_RESOURCE_VALUES
|
||||
] + ['default', 'all']
|
||||
sub.add_argument('--resource-type',
|
||||
choices=resource_values,
|
||||
@@ -848,22 +869,39 @@ def _build_list_subparser(subparsers, base_subparser):
|
||||
sub.add_argument('--output',
|
||||
choices=['json', 'name', 'path', 'selector'],
|
||||
default='selector')
|
||||
sub.add_argument('--output-keys')
|
||||
|
||||
_add_models_argument(
|
||||
sub,
|
||||
help_override='''
|
||||
sub.add_argument(
|
||||
'-m',
|
||||
'--models',
|
||||
dest='models',
|
||||
nargs='+',
|
||||
help='''
|
||||
Specify the models to select and set the resource-type to 'model'.
|
||||
Mutually exclusive with '--select' (or '-s') and '--resource-type'
|
||||
''',
|
||||
metavar='SELECTOR',
|
||||
required=False
|
||||
required=False,
|
||||
)
|
||||
_add_select_argument(
|
||||
sub,
|
||||
sub.add_argument(
|
||||
'-s',
|
||||
'--select',
|
||||
dest='select',
|
||||
nargs='+',
|
||||
help='''
|
||||
Specify the nodes to include.
|
||||
''',
|
||||
metavar='SELECTOR',
|
||||
required=False,
|
||||
)
|
||||
sub.add_argument(
|
||||
'--greedy',
|
||||
action='store_true',
|
||||
help='''
|
||||
Select all tests that touch the selected resources,
|
||||
even if they also depend on unselected resources
|
||||
'''
|
||||
)
|
||||
_add_common_selector_arguments(sub)
|
||||
|
||||
return sub
|
||||
@@ -1006,8 +1044,6 @@ def parse_args(args, cls=DBTArgumentParser):
|
||||
enable_help='''
|
||||
Allow for partial parsing by looking for and writing to a pickle file
|
||||
in the target directory. This overrides the user configuration file.
|
||||
|
||||
WARNING: This can result in unexpected behavior if you use env_var()!
|
||||
''',
|
||||
disable_help='''
|
||||
Disallow partial parsing. This overrides the user configuration file.
|
||||
@@ -1073,10 +1109,10 @@ def parse_args(args, cls=DBTArgumentParser):
|
||||
# --threads, --no-version-check
|
||||
_add_common_arguments(run_sub, compile_sub, generate_sub, test_sub,
|
||||
rpc_sub, seed_sub, parse_sub, build_sub)
|
||||
# --models, --exclude
|
||||
# --select, --exclude
|
||||
# list_sub sets up its own arguments.
|
||||
_add_selection_arguments(build_sub, run_sub, compile_sub, generate_sub, test_sub)
|
||||
_add_selection_arguments(snapshot_sub, seed_sub, models_name='select')
|
||||
_add_selection_arguments(
|
||||
run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)
|
||||
# --defer
|
||||
_add_defer_argument(run_sub, test_sub, build_sub)
|
||||
# --full-refresh
|
||||
|
||||
@@ -72,10 +72,13 @@ class HookParser(SimpleParser[HookBlock, ParsedHookNode]):
|
||||
|
||||
# Hooks are only in the dbt_project.yml file for the project
|
||||
def get_path(self) -> FilePath:
|
||||
# There ought to be an existing file object for this, but
|
||||
# until that is implemented use a dummy modification time
|
||||
path = FilePath(
|
||||
project_root=self.project.project_root,
|
||||
searched_path='.',
|
||||
relative_path='dbt_project.yml',
|
||||
modification_time=0.0,
|
||||
)
|
||||
return path
|
||||
|
||||
|
||||
@@ -203,8 +203,11 @@ class ManifestLoader:
|
||||
# used to get the SourceFiles from the manifest files.
|
||||
start_read_files = time.perf_counter()
|
||||
project_parser_files = {}
|
||||
saved_files = {}
|
||||
if self.saved_manifest:
|
||||
saved_files = self.saved_manifest.files
|
||||
for project in self.all_projects.values():
|
||||
read_files(project, self.manifest.files, project_parser_files)
|
||||
read_files(project, self.manifest.files, project_parser_files, saved_files)
|
||||
self._perf_info.path_count = len(self.manifest.files)
|
||||
self._perf_info.read_files_elapsed = (time.perf_counter() - start_read_files)
|
||||
|
||||
@@ -423,7 +426,7 @@ class ManifestLoader:
|
||||
if not self.partially_parsing and HookParser in parser_types:
|
||||
hook_parser = HookParser(project, self.manifest, self.root_project)
|
||||
path = hook_parser.get_path()
|
||||
file = load_source_file(path, ParseFileType.Hook, project.project_name)
|
||||
file = load_source_file(path, ParseFileType.Hook, project.project_name, {})
|
||||
if file:
|
||||
file_block = FileBlock(file)
|
||||
hook_parser.parse_file(file_block)
|
||||
@@ -648,7 +651,7 @@ class ManifestLoader:
|
||||
macro_parser = MacroParser(project, self.manifest)
|
||||
for path in macro_parser.get_paths():
|
||||
source_file = load_source_file(
|
||||
path, ParseFileType.Macro, project.project_name)
|
||||
path, ParseFileType.Macro, project.project_name, {})
|
||||
block = FileBlock(source_file)
|
||||
# This does not add the file to the manifest.files,
|
||||
# but that shouldn't be necessary here.
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
from dbt.context.context_config import ContextConfig
|
||||
from dbt.contracts.graph.parsed import ParsedModelNode
|
||||
import dbt.flags as flags
|
||||
import dbt.tracking
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.parser.base import SimpleSQLParser
|
||||
from dbt.parser.search import FileBlock
|
||||
import dbt.tracking as tracking
|
||||
from dbt import utils
|
||||
from dbt_extractor import ExtractionError, py_extract_from_source # type: ignore
|
||||
from functools import reduce
|
||||
from itertools import chain
|
||||
import random
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, Iterator, List, Optional, Union
|
||||
|
||||
|
||||
class ModelParser(SimpleSQLParser[ParsedModelNode]):
|
||||
@@ -26,32 +28,52 @@ class ModelParser(SimpleSQLParser[ParsedModelNode]):
|
||||
def get_compiled_path(cls, block: FileBlock):
|
||||
return block.path.relative_path
|
||||
|
||||
# TODO when this is turned on by default, simplify the nasty if/else tree inside this method.
|
||||
def render_update(
|
||||
self, node: ParsedModelNode, config: ContextConfig
|
||||
) -> None:
|
||||
self.manifest._parsing_info.static_analysis_path_count += 1
|
||||
# TODO go back to 1/100 when this is turned on by default.
|
||||
# `True` roughly 1/50 times this function is called
|
||||
sample: bool = random.randint(1, 51) == 50
|
||||
|
||||
# `True` roughly 1/100 times this function is called
|
||||
sample: bool = random.randint(1, 101) == 100
|
||||
# top-level declaration of variables
|
||||
experimentally_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None
|
||||
config_call_dict: Dict[str, Any] = {}
|
||||
source_calls: List[List[str]] = []
|
||||
|
||||
# run the experimental parser if the flag is on or if we're sampling
|
||||
if flags.USE_EXPERIMENTAL_PARSER or sample:
|
||||
try:
|
||||
experimentally_parsed: Dict[str, List[Any]] = py_extract_from_source(node.raw_sql)
|
||||
if self._has_banned_macro(node):
|
||||
# this log line is used for integration testing. If you change
|
||||
# the code at the beginning of the line change the tests in
|
||||
# test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
|
||||
logger.debug(
|
||||
f"1601: parser fallback to jinja because of macro override for {node.path}"
|
||||
)
|
||||
experimentally_parsed = "has_banned_macro"
|
||||
else:
|
||||
# run the experimental parser and return the results
|
||||
try:
|
||||
experimentally_parsed = py_extract_from_source(
|
||||
node.raw_sql
|
||||
)
|
||||
logger.debug(f"1699: statically parsed {node.path}")
|
||||
# if we want information on what features are barring the experimental
|
||||
# parser from reading model files, this is where we would add that
|
||||
# since that information is stored in the `ExtractionError`.
|
||||
except ExtractionError:
|
||||
experimentally_parsed = "cannot_parse"
|
||||
|
||||
# second config format
|
||||
config_call_dict: Dict[str, Any] = {}
|
||||
for c in experimentally_parsed['configs']:
|
||||
ContextConfig._add_config_call(config_call_dict, {c[0]: c[1]})
|
||||
# if the parser succeeded, extract some data in easy-to-compare formats
|
||||
if isinstance(experimentally_parsed, dict):
|
||||
# create second config format
|
||||
for c in experimentally_parsed['configs']:
|
||||
ContextConfig._add_config_call(config_call_dict, {c[0]: c[1]})
|
||||
|
||||
# format sources TODO change extractor to match this type
|
||||
source_calls: List[List[str]] = []
|
||||
for s in experimentally_parsed['sources']:
|
||||
source_calls.append([s[0], s[1]])
|
||||
experimentally_parsed['sources'] = source_calls
|
||||
|
||||
except ExtractionError as e:
|
||||
experimentally_parsed = e
|
||||
# format sources TODO change extractor to match this type
|
||||
for s in experimentally_parsed['sources']:
|
||||
source_calls.append([s[0], s[1]])
|
||||
experimentally_parsed['sources'] = source_calls
|
||||
|
||||
# normal dbt run
|
||||
if not flags.USE_EXPERIMENTAL_PARSER:
|
||||
@@ -59,57 +81,19 @@ class ModelParser(SimpleSQLParser[ParsedModelNode]):
|
||||
super().render_update(node, config)
|
||||
# if we're sampling, compare for correctness
|
||||
if sample:
|
||||
result: List[str] = []
|
||||
# experimental parser couldn't parse
|
||||
if isinstance(experimentally_parsed, Exception):
|
||||
result += ["01_experimental_parser_cannot_parse"]
|
||||
else:
|
||||
# look for false positive configs
|
||||
for k in config_call_dict.keys():
|
||||
if k not in config._config_call_dict:
|
||||
result += ["02_false_positive_config_value"]
|
||||
break
|
||||
|
||||
# look for missed configs
|
||||
for k in config._config_call_dict.keys():
|
||||
if k not in config_call_dict:
|
||||
result += ["03_missed_config_value"]
|
||||
break
|
||||
|
||||
# look for false positive sources
|
||||
for s in experimentally_parsed['sources']:
|
||||
if s not in node.sources:
|
||||
result += ["04_false_positive_source_value"]
|
||||
break
|
||||
|
||||
# look for missed sources
|
||||
for s in node.sources:
|
||||
if s not in experimentally_parsed['sources']:
|
||||
result += ["05_missed_source_value"]
|
||||
break
|
||||
|
||||
# look for false positive refs
|
||||
for r in experimentally_parsed['refs']:
|
||||
if r not in node.refs:
|
||||
result += ["06_false_positive_ref_value"]
|
||||
break
|
||||
|
||||
# look for missed refs
|
||||
for r in node.refs:
|
||||
if r not in experimentally_parsed['refs']:
|
||||
result += ["07_missed_ref_value"]
|
||||
break
|
||||
|
||||
# if there are no errors, return a success value
|
||||
if not result:
|
||||
result = ["00_exact_match"]
|
||||
|
||||
result = _get_sample_result(
|
||||
experimentally_parsed,
|
||||
config_call_dict,
|
||||
source_calls,
|
||||
node,
|
||||
config
|
||||
)
|
||||
# fire a tracking event. this fires one event for every sample
|
||||
# so that we have data on a per file basis. Not only can we expect
|
||||
# no false positives or misses, we can expect the number model
|
||||
# files parseable by the experimental parser to match our internal
|
||||
# testing.
|
||||
if dbt.tracking.active_user is not None: # None in some tests
|
||||
if tracking.active_user is not None: # None in some tests
|
||||
tracking.track_experimental_parser_sample({
|
||||
"project_id": self.root_project.hashed_name(),
|
||||
"file_id": utils.get_hash(node),
|
||||
@@ -117,7 +101,7 @@ class ModelParser(SimpleSQLParser[ParsedModelNode]):
|
||||
})
|
||||
|
||||
# if the --use-experimental-parser flag was set, and the experimental parser succeeded
|
||||
elif not isinstance(experimentally_parsed, Exception):
|
||||
elif isinstance(experimentally_parsed, Dict):
|
||||
# since it doesn't need python jinja, fit the refs, sources, and configs
|
||||
# into the node. Down the line the rest of the node will be updated with
|
||||
# this information. (e.g. depends_on etc.)
|
||||
@@ -141,7 +125,102 @@ class ModelParser(SimpleSQLParser[ParsedModelNode]):
|
||||
|
||||
self.manifest._parsing_info.static_analysis_parsed_path_count += 1
|
||||
|
||||
# the experimental parser tried and failed on this model.
|
||||
# the experimental parser didn't run on this model.
|
||||
# fall back to python jinja rendering.
|
||||
elif experimentally_parsed in ["has_banned_macro"]:
|
||||
# not logging here since the reason should have been logged above
|
||||
super().render_update(node, config)
|
||||
# the experimental parser ran on this model and failed.
|
||||
# fall back to python jinja rendering.
|
||||
else:
|
||||
logger.debug(
|
||||
f"1602: parser fallback to jinja because of extractor failure for {node.path}"
|
||||
)
|
||||
super().render_update(node, config)
|
||||
|
||||
# checks for banned macros
|
||||
def _has_banned_macro(
|
||||
self, node: ParsedModelNode
|
||||
) -> bool:
|
||||
# first check if there is a banned macro defined in scope for this model file
|
||||
root_project_name = self.root_project.project_name
|
||||
project_name = node.package_name
|
||||
banned_macros = ['ref', 'source', 'config']
|
||||
|
||||
all_banned_macro_keys: Iterator[str] = chain.from_iterable(
|
||||
map(
|
||||
lambda name: [
|
||||
f"macro.{project_name}.{name}",
|
||||
f"macro.{root_project_name}.{name}"
|
||||
],
|
||||
banned_macros
|
||||
)
|
||||
)
|
||||
|
||||
return reduce(
|
||||
lambda z, key: z or (key in self.manifest.macros),
|
||||
all_banned_macro_keys,
|
||||
False
|
||||
)
|
||||
|
||||
|
||||
# returns a list of string codes to be sent as a tracking event
|
||||
def _get_sample_result(
|
||||
sample_output: Optional[Union[str, Dict[str, Any]]],
|
||||
config_call_dict: Dict[str, Any],
|
||||
source_calls: List[List[str]],
|
||||
node: ParsedModelNode,
|
||||
config: ContextConfig
|
||||
) -> List[str]:
|
||||
result: List[str] = []
|
||||
# experimental parser didn't run
|
||||
if sample_output is None:
|
||||
result += ["09_experimental_parser_skipped"]
|
||||
# experimental parser couldn't parse
|
||||
elif (isinstance(sample_output, str)):
|
||||
if sample_output == "cannot_parse":
|
||||
result += ["01_experimental_parser_cannot_parse"]
|
||||
elif sample_output == "has_banned_macro":
|
||||
result += ["08_has_banned_macro"]
|
||||
else:
|
||||
# look for false positive configs
|
||||
for k in config_call_dict.keys():
|
||||
if k not in config._config_call_dict:
|
||||
result += ["02_false_positive_config_value"]
|
||||
break
|
||||
|
||||
# look for missed configs
|
||||
for k in config._config_call_dict.keys():
|
||||
if k not in config_call_dict:
|
||||
result += ["03_missed_config_value"]
|
||||
break
|
||||
|
||||
# look for false positive sources
|
||||
for s in sample_output['sources']:
|
||||
if s not in node.sources:
|
||||
result += ["04_false_positive_source_value"]
|
||||
break
|
||||
|
||||
# look for missed sources
|
||||
for s in node.sources:
|
||||
if s not in sample_output['sources']:
|
||||
result += ["05_missed_source_value"]
|
||||
break
|
||||
|
||||
# look for false positive refs
|
||||
for r in sample_output['refs']:
|
||||
if r not in node.refs:
|
||||
result += ["06_false_positive_ref_value"]
|
||||
break
|
||||
|
||||
# look for missed refs
|
||||
for r in node.refs:
|
||||
if r not in sample_output['refs']:
|
||||
result += ["07_missed_ref_value"]
|
||||
break
|
||||
|
||||
# if there are no errors, return a success value
|
||||
if not result:
|
||||
result = ["00_exact_match"]
|
||||
|
||||
return result
|
||||
|
||||
@@ -12,13 +12,27 @@ from typing import Optional
|
||||
# This loads the files contents and creates the SourceFile object
|
||||
def load_source_file(
|
||||
path: FilePath, parse_file_type: ParseFileType,
|
||||
project_name: str) -> Optional[AnySourceFile]:
|
||||
file_contents = load_file_contents(path.absolute_path, strip=False)
|
||||
checksum = FileHash.from_contents(file_contents)
|
||||
project_name: str, saved_files,) -> Optional[AnySourceFile]:
|
||||
|
||||
sf_cls = SchemaSourceFile if parse_file_type == ParseFileType.Schema else SourceFile
|
||||
source_file = sf_cls(path=path, checksum=checksum,
|
||||
source_file = sf_cls(path=path, checksum=FileHash.empty(),
|
||||
parse_file_type=parse_file_type, project_name=project_name)
|
||||
source_file.contents = file_contents.strip()
|
||||
|
||||
skip_loading_schema_file = False
|
||||
if (parse_file_type == ParseFileType.Schema and
|
||||
saved_files and source_file.file_id in saved_files):
|
||||
old_source_file = saved_files[source_file.file_id]
|
||||
if (source_file.path.modification_time != 0.0 and
|
||||
old_source_file.path.modification_time == source_file.path.modification_time):
|
||||
source_file.checksum = old_source_file.checksum
|
||||
source_file.dfy = old_source_file.dfy
|
||||
skip_loading_schema_file = True
|
||||
|
||||
if not skip_loading_schema_file:
|
||||
file_contents = load_file_contents(path.absolute_path, strip=False)
|
||||
source_file.checksum = FileHash.from_contents(file_contents)
|
||||
source_file.contents = file_contents.strip()
|
||||
|
||||
if parse_file_type == ParseFileType.Schema and source_file.contents:
|
||||
dfy = yaml_from_file(source_file)
|
||||
if dfy:
|
||||
@@ -69,7 +83,7 @@ def load_seed_source_file(match: FilePath, project_name) -> SourceFile:
|
||||
|
||||
# Use the FilesystemSearcher to get a bunch of FilePaths, then turn
|
||||
# them into a bunch of FileSource objects
|
||||
def get_source_files(project, paths, extension, parse_file_type):
|
||||
def get_source_files(project, paths, extension, parse_file_type, saved_files):
|
||||
# file path list
|
||||
fp_list = list(FilesystemSearcher(
|
||||
project, paths, extension
|
||||
@@ -80,17 +94,17 @@ def get_source_files(project, paths, extension, parse_file_type):
|
||||
if parse_file_type == ParseFileType.Seed:
|
||||
fb_list.append(load_seed_source_file(fp, project.project_name))
|
||||
else:
|
||||
file = load_source_file(fp, parse_file_type, project.project_name)
|
||||
file = load_source_file(fp, parse_file_type, project.project_name, saved_files)
|
||||
# only append the list if it has contents. added to fix #3568
|
||||
if file:
|
||||
fb_list.append(file)
|
||||
return fb_list
|
||||
|
||||
|
||||
def read_files_for_parser(project, files, dirs, extension, parse_ft):
|
||||
def read_files_for_parser(project, files, dirs, extension, parse_ft, saved_files):
|
||||
parser_files = []
|
||||
source_files = get_source_files(
|
||||
project, dirs, extension, parse_ft
|
||||
project, dirs, extension, parse_ft, saved_files
|
||||
)
|
||||
for sf in source_files:
|
||||
files[sf.file_id] = sf
|
||||
@@ -102,46 +116,46 @@ def read_files_for_parser(project, files, dirs, extension, parse_ft):
|
||||
# dictionary needs to be passed in. What determines the order of
|
||||
# the various projects? Is the root project always last? Do the
|
||||
# non-root projects need to be done separately in order?
|
||||
def read_files(project, files, parser_files):
|
||||
def read_files(project, files, parser_files, saved_files):
|
||||
|
||||
project_files = {}
|
||||
|
||||
project_files['MacroParser'] = read_files_for_parser(
|
||||
project, files, project.macro_paths, '.sql', ParseFileType.Macro,
|
||||
project, files, project.macro_paths, '.sql', ParseFileType.Macro, saved_files
|
||||
)
|
||||
|
||||
project_files['ModelParser'] = read_files_for_parser(
|
||||
project, files, project.source_paths, '.sql', ParseFileType.Model,
|
||||
project, files, project.source_paths, '.sql', ParseFileType.Model, saved_files
|
||||
)
|
||||
|
||||
project_files['SnapshotParser'] = read_files_for_parser(
|
||||
project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot,
|
||||
project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot, saved_files
|
||||
)
|
||||
|
||||
project_files['AnalysisParser'] = read_files_for_parser(
|
||||
project, files, project.analysis_paths, '.sql', ParseFileType.Analysis,
|
||||
project, files, project.analysis_paths, '.sql', ParseFileType.Analysis, saved_files
|
||||
)
|
||||
|
||||
project_files['DataTestParser'] = read_files_for_parser(
|
||||
project, files, project.test_paths, '.sql', ParseFileType.Test,
|
||||
project, files, project.test_paths, '.sql', ParseFileType.Test, saved_files
|
||||
)
|
||||
|
||||
project_files['SeedParser'] = read_files_for_parser(
|
||||
project, files, project.data_paths, '.csv', ParseFileType.Seed,
|
||||
project, files, project.data_paths, '.csv', ParseFileType.Seed, saved_files
|
||||
)
|
||||
|
||||
project_files['DocumentationParser'] = read_files_for_parser(
|
||||
project, files, project.docs_paths, '.md', ParseFileType.Documentation,
|
||||
project, files, project.docs_paths, '.md', ParseFileType.Documentation, saved_files
|
||||
)
|
||||
|
||||
project_files['SchemaParser'] = read_files_for_parser(
|
||||
project, files, project.all_source_paths, '.yml', ParseFileType.Schema,
|
||||
project, files, project.all_source_paths, '.yml', ParseFileType.Schema, saved_files
|
||||
)
|
||||
|
||||
# Also read .yaml files for schema files. Might be better to change
|
||||
# 'read_files_for_parser' accept an array in the future.
|
||||
yaml_files = read_files_for_parser(
|
||||
project, files, project.all_source_paths, '.yaml', ParseFileType.Schema,
|
||||
project, files, project.all_source_paths, '.yaml', ParseFileType.Schema, saved_files
|
||||
)
|
||||
project_files['SchemaParser'].extend(yaml_files)
|
||||
|
||||
|
||||
@@ -433,12 +433,8 @@ class TestBuilder(Generic[Testable]):
|
||||
|
||||
def build_model_str(self):
|
||||
targ = self.target
|
||||
cfg_where = "config.get('where')"
|
||||
if isinstance(self.target, UnparsedNodeUpdate):
|
||||
identifier = self.target.name
|
||||
target_str = f"{{{{ ref('{targ.name}') }}}}"
|
||||
target_str = f"ref('{targ.name}')"
|
||||
elif isinstance(self.target, UnpatchedSourceDefinition):
|
||||
identifier = self.target.table.name
|
||||
target_str = f"{{{{ source('{targ.source.name}', '{targ.table.name}') }}}}"
|
||||
filtered = f"(select * from {target_str} where {{{{{cfg_where}}}}}) {identifier}"
|
||||
return f"{{% if {cfg_where} %}}{filtered}{{% else %}}{target_str}{{% endif %}}"
|
||||
target_str = f"source('{targ.source.name}', '{targ.table.name}')"
|
||||
return f"{{{{ get_where_subquery({target_str}) }}}}"
|
||||
|
||||
@@ -84,6 +84,7 @@ class FilesystemSearcher(Iterable[FilePath]):
|
||||
file_match = FilePath(
|
||||
searched_path=result['searched_path'],
|
||||
relative_path=result['relative_path'],
|
||||
modification_time=result['modification_time'],
|
||||
project_root=root,
|
||||
)
|
||||
yield file_match
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import inspect
|
||||
from abc import abstractmethod
|
||||
from copy import deepcopy
|
||||
from typing import List, Optional, Type, TypeVar, Generic, Dict, Any
|
||||
|
||||
from dbt.dataclass_schema import dbtClassMixin, ValidationError
|
||||
@@ -21,7 +20,7 @@ class RemoteMethod(Generic[Parameters, Result]):
|
||||
METHOD_NAME: Optional[str] = None
|
||||
|
||||
def __init__(self, args, config):
|
||||
self.args = deepcopy(args)
|
||||
self.args = args
|
||||
self.config = config
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from copy import deepcopy
|
||||
import threading
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
@@ -155,7 +156,7 @@ class TaskManager:
|
||||
f'Manifest should not be None if the last parse state is '
|
||||
f'{state}'
|
||||
)
|
||||
return task(self.args, self.config, self.manifest)
|
||||
return task(deepcopy(self.args), self.config, self.manifest)
|
||||
|
||||
def rpc_task(
|
||||
self, method_name: str
|
||||
@@ -167,7 +168,7 @@ class TaskManager:
|
||||
elif issubclass(task, RemoteManifestMethod):
|
||||
return self._get_manifest_callable(task)
|
||||
elif issubclass(task, RemoteMethod):
|
||||
return task(self.args, self.config)
|
||||
return task(deepcopy(self.args), self.config)
|
||||
else:
|
||||
raise dbt.exceptions.InternalException(
|
||||
f'Got a task with an invalid type! {task} with method '
|
||||
|
||||
@@ -437,11 +437,15 @@ def filter_installable(
|
||||
versions: List[str],
|
||||
install_prerelease: bool
|
||||
) -> List[str]:
|
||||
if install_prerelease:
|
||||
return versions
|
||||
installable = []
|
||||
installable_dict = {}
|
||||
for version_string in versions:
|
||||
version = VersionSpecifier.from_version_string(version_string)
|
||||
if not version.prerelease:
|
||||
installable.append(version_string)
|
||||
return installable
|
||||
if install_prerelease or not version.prerelease:
|
||||
installable.append(version)
|
||||
installable_dict[str(version)] = version_string
|
||||
sorted_installable = sorted(installable)
|
||||
sorted_installable_original_versions = [
|
||||
str(installable_dict.get(str(version))) for version in sorted_installable
|
||||
]
|
||||
return sorted_installable_original_versions
|
||||
|
||||
@@ -3,19 +3,22 @@ from .snapshot import SnapshotRunner as snapshot_model_runner
|
||||
from .seed import SeedRunner as seed_runner
|
||||
from .test import TestRunner as test_runner
|
||||
|
||||
from dbt.graph import ResourceTypeSelector
|
||||
from dbt.contracts.results import NodeStatus
|
||||
from dbt.exceptions import InternalException
|
||||
from dbt.graph import ResourceTypeSelector
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.task.test import TestSelector
|
||||
|
||||
|
||||
class BuildTask(RunTask):
|
||||
"""The Build task processes all assets of a given process and attempts to 'build'
|
||||
them in an opinionated fashion. Every resource type outlined in RUNNER_MAP
|
||||
will be processed by the mapped runner class.
|
||||
"""The Build task processes all assets of a given process and attempts to
|
||||
'build' them in an opinionated fashion. Every resource type outlined in
|
||||
RUNNER_MAP will be processed by the mapped runner class.
|
||||
|
||||
I.E. a resource of type Model is handled by the ModelRunner which is imported
|
||||
as run_model_runner.
|
||||
"""
|
||||
I.E. a resource of type Model is handled by the ModelRunner which is
|
||||
imported as run_model_runner. """
|
||||
|
||||
MARK_DEPENDENT_ERRORS_STATUSES = [NodeStatus.Error, NodeStatus.Fail]
|
||||
|
||||
RUNNER_MAP = {
|
||||
NodeType.Model: run_model_runner,
|
||||
@@ -23,6 +26,20 @@ class BuildTask(RunTask):
|
||||
NodeType.Seed: seed_runner,
|
||||
NodeType.Test: test_runner,
|
||||
}
|
||||
ALL_RESOURCE_VALUES = frozenset({x for x in RUNNER_MAP.keys()})
|
||||
|
||||
@property
|
||||
def resource_types(self):
|
||||
if not self.args.resource_types:
|
||||
return list(self.ALL_RESOURCE_VALUES)
|
||||
|
||||
values = set(self.args.resource_types)
|
||||
|
||||
if 'all' in values:
|
||||
values.remove('all')
|
||||
values.update(self.ALL_RESOURCE_VALUES)
|
||||
|
||||
return list(values)
|
||||
|
||||
def get_node_selector(self) -> ResourceTypeSelector:
|
||||
if self.manifest is None or self.graph is None:
|
||||
@@ -30,11 +47,19 @@ class BuildTask(RunTask):
|
||||
'manifest and graph must be set to get node selection'
|
||||
)
|
||||
|
||||
resource_types = self.resource_types
|
||||
|
||||
if resource_types == [NodeType.Test]:
|
||||
return TestSelector(
|
||||
graph=self.graph,
|
||||
manifest=self.manifest,
|
||||
previous_state=self.previous_state,
|
||||
)
|
||||
return ResourceTypeSelector(
|
||||
graph=self.graph,
|
||||
manifest=self.manifest,
|
||||
previous_state=self.previous_state,
|
||||
resource_types=[x for x in self.RUNNER_MAP.keys()],
|
||||
resource_types=resource_types,
|
||||
)
|
||||
|
||||
def get_runner_type(self, node):
|
||||
|
||||
@@ -4,7 +4,7 @@ from .base import BaseRunner
|
||||
|
||||
from dbt.contracts.results import RunStatus, RunResult
|
||||
from dbt.exceptions import InternalException
|
||||
from dbt.graph import ResourceTypeSelector, SelectionSpec, parse_difference
|
||||
from dbt.graph import ResourceTypeSelector
|
||||
from dbt.logger import print_timestamped_line
|
||||
from dbt.node_types import NodeType
|
||||
|
||||
@@ -37,13 +37,6 @@ class CompileTask(GraphRunnableTask):
|
||||
def raise_on_first_error(self):
|
||||
return True
|
||||
|
||||
def get_selection_spec(self) -> SelectionSpec:
|
||||
if self.args.selector_name:
|
||||
spec = self.config.get_selector(self.args.selector_name)
|
||||
else:
|
||||
spec = parse_difference(self.args.models, self.args.exclude)
|
||||
return spec
|
||||
|
||||
def get_node_selector(self) -> ResourceTypeSelector:
|
||||
if self.manifest is None or self.graph is None:
|
||||
raise InternalException(
|
||||
|
||||
@@ -16,6 +16,7 @@ from dbt.context.target import generate_target_context
|
||||
from dbt.clients.yaml_helper import load_yaml_text
|
||||
from dbt.links import ProfileConfigDocs
|
||||
from dbt.ui import green, red
|
||||
from dbt.utils import pluralize
|
||||
from dbt.version import get_installed_version
|
||||
|
||||
from dbt.task.base import BaseTask, get_nearest_project_dir
|
||||
@@ -125,6 +126,11 @@ class DebugTask(BaseTask):
|
||||
self.test_dependencies()
|
||||
self.test_connection()
|
||||
|
||||
if self.any_failure:
|
||||
print(red(f"{(pluralize(len(self.messages), 'check'))} failed:"))
|
||||
else:
|
||||
print(green('All checks passed!'))
|
||||
|
||||
for message in self.messages:
|
||||
print(message)
|
||||
print('')
|
||||
@@ -321,9 +327,12 @@ class DebugTask(BaseTask):
|
||||
self.any_failure = True
|
||||
if self.project_fail_details == FILE_NOT_FOUND:
|
||||
return
|
||||
print('Project loading failed for the following reason:')
|
||||
print(self.project_fail_details)
|
||||
print('')
|
||||
msg = (
|
||||
f'Project loading failed for the following reason:'
|
||||
f'\n{self.project_fail_details}'
|
||||
f'\n'
|
||||
)
|
||||
self.messages.append(msg)
|
||||
|
||||
def _log_profile_fail(self):
|
||||
if not self.profile_fail_details:
|
||||
@@ -332,9 +341,12 @@ class DebugTask(BaseTask):
|
||||
self.any_failure = True
|
||||
if self.profile_fail_details == FILE_NOT_FOUND:
|
||||
return
|
||||
print('Profile loading failed for the following reason:')
|
||||
print(self.profile_fail_details)
|
||||
print('')
|
||||
msg = (
|
||||
f'Profile loading failed for the following reason:'
|
||||
f'\n{self.profile_fail_details}'
|
||||
f'\n'
|
||||
)
|
||||
self.messages.append(msg)
|
||||
|
||||
@staticmethod
|
||||
def attempt_connection(profile):
|
||||
@@ -368,7 +380,7 @@ class DebugTask(BaseTask):
|
||||
print('Connection:')
|
||||
for k, v in self.profile.credentials.connection_info():
|
||||
print(' {}: {}'.format(k, v))
|
||||
print(' Connection test: {}'.format(self._connection_result()))
|
||||
print(' Connection test: [{}]'.format(self._connection_result()))
|
||||
print('')
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -56,19 +56,36 @@ class DepsTask(BaseTask):
|
||||
self.config, self.config.cli_vars
|
||||
))
|
||||
|
||||
packages_to_upgrade = []
|
||||
for package in final_deps:
|
||||
package_name = package.name
|
||||
source_type = package.source_type()
|
||||
version = package.get_version()
|
||||
|
||||
logger.info('Installing {}', package)
|
||||
package.install(self.config, renderer)
|
||||
logger.info(' Installed from {}',
|
||||
package.nice_version_name())
|
||||
if source_type == 'hub':
|
||||
version_latest = package.get_version_latest()
|
||||
if version_latest != version:
|
||||
packages_to_upgrade.append(package_name)
|
||||
logger.info(' Updated version available: {}',
|
||||
version_latest)
|
||||
else:
|
||||
logger.info(' Up to date!')
|
||||
if package.get_subdirectory():
|
||||
logger.info(' and subdirectory {}\n',
|
||||
logger.info(' and subdirectory {}',
|
||||
package.get_subdirectory())
|
||||
|
||||
self.track_package_install(
|
||||
package_name=package.name,
|
||||
source_type=package.source_type(),
|
||||
version=package.get_version())
|
||||
package_name=package_name,
|
||||
source_type=source_type,
|
||||
version=version)
|
||||
if packages_to_upgrade:
|
||||
logger.info('\nUpdates available for packages: {} \
|
||||
\nUpdate your versions in packages.yml, then run dbt deps',
|
||||
packages_to_upgrade)
|
||||
|
||||
@classmethod
|
||||
def from_args(cls, args):
|
||||
|
||||
@@ -19,7 +19,7 @@ from dbt.exceptions import RuntimeException, InternalException
|
||||
from dbt.logger import print_timestamped_line
|
||||
from dbt.node_types import NodeType
|
||||
|
||||
from dbt.graph import ResourceTypeSelector, SelectionSpec, parse_difference
|
||||
from dbt.graph import ResourceTypeSelector
|
||||
from dbt.contracts.graph.parsed import ParsedSourceDefinition
|
||||
|
||||
|
||||
@@ -136,19 +136,6 @@ class FreshnessTask(GraphRunnableTask):
|
||||
def raise_on_first_error(self):
|
||||
return False
|
||||
|
||||
def get_selection_spec(self) -> SelectionSpec:
|
||||
"""Generates a selection spec from task arguments to use when
|
||||
processing graph. A SelectionSpec describes what nodes to select
|
||||
when creating queue from graph of nodes.
|
||||
"""
|
||||
if self.args.selector_name:
|
||||
# use pre-defined selector (--selector) to create selection spec
|
||||
spec = self.config.get_selector(self.args.selector_name)
|
||||
else:
|
||||
# use --select and --exclude args to create selection spec
|
||||
spec = parse_difference(self.args.select, self.args.exclude)
|
||||
return spec
|
||||
|
||||
def get_node_selector(self):
|
||||
if self.manifest is None or self.graph is None:
|
||||
raise InternalException(
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
import json
|
||||
from typing import Type
|
||||
|
||||
from dbt.contracts.graph.parsed import (
|
||||
ParsedExposure,
|
||||
ParsedSourceDefinition
|
||||
)
|
||||
from dbt.graph import (
|
||||
parse_difference,
|
||||
ResourceTypeSelector,
|
||||
SelectionSpec,
|
||||
)
|
||||
from dbt.graph import ResourceTypeSelector
|
||||
from dbt.task.runnable import GraphRunnableTask, ManifestTask
|
||||
from dbt.task.test import TestSelector
|
||||
from dbt.node_types import NodeType
|
||||
@@ -44,7 +39,6 @@ class ListTask(GraphRunnableTask):
|
||||
|
||||
def __init__(self, args, config):
|
||||
super().__init__(args, config)
|
||||
self.args.single_threaded = True
|
||||
if self.args.models:
|
||||
if self.args.select:
|
||||
raise RuntimeException(
|
||||
@@ -113,7 +107,11 @@ class ListTask(GraphRunnableTask):
|
||||
yield json.dumps({
|
||||
k: v
|
||||
for k, v in node.to_dict(omit_none=False).items()
|
||||
if k in self.ALLOWED_KEYS
|
||||
if (
|
||||
k in self.args.output_keys
|
||||
if self.args.output_keys is not None
|
||||
else k in self.ALLOWED_KEYS
|
||||
)
|
||||
})
|
||||
|
||||
def generate_paths(self):
|
||||
@@ -162,25 +160,19 @@ class ListTask(GraphRunnableTask):
|
||||
return list(values)
|
||||
|
||||
@property
|
||||
def selector(self):
|
||||
def selection_arg(self):
|
||||
# for backwards compatibility, list accepts both --models and --select,
|
||||
# with slightly different behavior: --models implies --resource-type model
|
||||
if self.args.models:
|
||||
return self.args.models
|
||||
else:
|
||||
return self.args.select
|
||||
|
||||
def get_selection_spec(self) -> SelectionSpec:
|
||||
if self.args.selector_name:
|
||||
spec = self.config.get_selector(self.args.selector_name)
|
||||
else:
|
||||
spec = parse_difference(self.selector, self.args.exclude)
|
||||
return spec
|
||||
|
||||
def get_node_selector(self):
|
||||
if self.manifest is None or self.graph is None:
|
||||
raise InternalException(
|
||||
'manifest and graph must be set to get perform node selection'
|
||||
)
|
||||
cls: Type[ResourceTypeSelector]
|
||||
if self.resource_types == [NodeType.Test]:
|
||||
return TestSelector(
|
||||
graph=self.graph,
|
||||
|
||||
@@ -79,7 +79,10 @@ class RemoteCompileProjectTask(
|
||||
METHOD_NAME = 'compile'
|
||||
|
||||
def set_args(self, params: RPCCompileParameters) -> None:
|
||||
self.args.models = self._listify(params.models)
|
||||
if params.models:
|
||||
self.args.select = self._listify(params.models)
|
||||
else:
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
if params.threads is not None:
|
||||
@@ -94,7 +97,10 @@ class RemoteRunProjectTask(RPCCommandTask[RPCRunParameters], RunTask):
|
||||
METHOD_NAME = 'run'
|
||||
|
||||
def set_args(self, params: RPCRunParameters) -> None:
|
||||
self.args.models = self._listify(params.models)
|
||||
if params.models:
|
||||
self.args.select = self._listify(params.models)
|
||||
else:
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
|
||||
@@ -114,7 +120,7 @@ class RemoteSeedProjectTask(RPCCommandTask[RPCSeedParameters], SeedTask):
|
||||
|
||||
def set_args(self, params: RPCSeedParameters) -> None:
|
||||
# select has an argparse `dest` value of `models`.
|
||||
self.args.models = self._listify(params.select)
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
if params.threads is not None:
|
||||
@@ -129,7 +135,10 @@ class RemoteTestProjectTask(RPCCommandTask[RPCTestParameters], TestTask):
|
||||
METHOD_NAME = 'test'
|
||||
|
||||
def set_args(self, params: RPCTestParameters) -> None:
|
||||
self.args.models = self._listify(params.models)
|
||||
if params.models:
|
||||
self.args.select = self._listify(params.models)
|
||||
else:
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
self.args.data = params.data
|
||||
@@ -152,7 +161,7 @@ class RemoteDocsGenerateProjectTask(
|
||||
METHOD_NAME = 'docs.generate'
|
||||
|
||||
def set_args(self, params: RPCDocsGenerateParameters) -> None:
|
||||
self.args.models = None
|
||||
self.args.select = None
|
||||
self.args.exclude = None
|
||||
self.args.selector_name = None
|
||||
self.args.compile = params.compile
|
||||
@@ -216,7 +225,7 @@ class RemoteSnapshotTask(RPCCommandTask[RPCSnapshotParameters], SnapshotTask):
|
||||
|
||||
def set_args(self, params: RPCSnapshotParameters) -> None:
|
||||
# select has an argparse `dest` value of `models`.
|
||||
self.args.models = self._listify(params.select)
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
if params.threads is not None:
|
||||
@@ -255,7 +264,7 @@ class GetManifest(
|
||||
METHOD_NAME = 'get-manifest'
|
||||
|
||||
def set_args(self, params: GetManifestParameters) -> None:
|
||||
self.args.models = None
|
||||
self.args.select = None
|
||||
self.args.exclude = None
|
||||
self.args.selector_name = None
|
||||
|
||||
@@ -282,13 +291,14 @@ class RemoteListTask(
|
||||
METHOD_NAME = 'list'
|
||||
|
||||
def set_args(self, params: RPCListParameters) -> None:
|
||||
|
||||
self.args.output = params.output
|
||||
self.args.output_keys = params.output_keys
|
||||
self.args.resource_types = self._listify(params.resource_types)
|
||||
self.args.models = self._listify(params.models)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.single_threaded = True
|
||||
|
||||
if self.args.models:
|
||||
if self.args.select:
|
||||
@@ -310,10 +320,12 @@ class RemoteListTask(
|
||||
|
||||
|
||||
class RemoteBuildProjectTask(RPCCommandTask[RPCBuildParameters], BuildTask):
|
||||
|
||||
METHOD_NAME = 'build'
|
||||
|
||||
def set_args(self, params: RPCBuildParameters) -> None:
|
||||
self.args.models = self._listify(params.models)
|
||||
self.args.resource_types = self._listify(params.resource_types)
|
||||
self.args.select = self._listify(params.select)
|
||||
self.args.exclude = self._listify(params.exclude)
|
||||
self.args.selector_name = params.selector
|
||||
|
||||
|
||||
@@ -41,7 +41,13 @@ from dbt.exceptions import (
|
||||
FailFastException,
|
||||
)
|
||||
|
||||
from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, Graph
|
||||
from dbt.graph import (
|
||||
GraphQueue,
|
||||
NodeSelector,
|
||||
SelectionSpec,
|
||||
parse_difference,
|
||||
Graph
|
||||
)
|
||||
from dbt.parser.manifest import ManifestLoader
|
||||
|
||||
import dbt.exceptions
|
||||
@@ -83,6 +89,9 @@ class ManifestTask(ConfiguredTask):
|
||||
|
||||
|
||||
class GraphRunnableTask(ManifestTask):
|
||||
|
||||
MARK_DEPENDENT_ERRORS_STATUSES = [NodeStatus.Error]
|
||||
|
||||
def __init__(self, args, config):
|
||||
super().__init__(args, config)
|
||||
self.job_queue: Optional[GraphQueue] = None
|
||||
@@ -103,11 +112,27 @@ class GraphRunnableTask(ManifestTask):
|
||||
def index_offset(self, value: int) -> int:
|
||||
return value
|
||||
|
||||
@abstractmethod
|
||||
@property
|
||||
def selection_arg(self):
|
||||
return self.args.select
|
||||
|
||||
@property
|
||||
def exclusion_arg(self):
|
||||
return self.args.exclude
|
||||
|
||||
def get_selection_spec(self) -> SelectionSpec:
|
||||
raise NotImplementedException(
|
||||
f'get_selection_spec not implemented for task {type(self)}'
|
||||
)
|
||||
default_selector_name = self.config.get_default_selector_name()
|
||||
if self.args.selector_name:
|
||||
# use pre-defined selector (--selector)
|
||||
spec = self.config.get_selector(self.args.selector_name)
|
||||
elif not (self.selection_arg or self.exclusion_arg) and default_selector_name:
|
||||
# use pre-defined selector (--selector) with default: true
|
||||
logger.info(f"Using default selector {default_selector_name}")
|
||||
spec = self.config.get_selector(default_selector_name)
|
||||
else:
|
||||
# use --select and --exclude args
|
||||
spec = parse_difference(self.selection_arg, self.exclusion_arg)
|
||||
return spec
|
||||
|
||||
@abstractmethod
|
||||
def get_node_selector(self) -> NodeSelector:
|
||||
@@ -289,7 +314,7 @@ class GraphRunnableTask(ManifestTask):
|
||||
else:
|
||||
self.manifest.update_node(node)
|
||||
|
||||
if result.status == NodeStatus.Error:
|
||||
if result.status in self.MARK_DEPENDENT_ERRORS_STATUSES:
|
||||
if is_ephemeral:
|
||||
cause = result
|
||||
else:
|
||||
@@ -413,7 +438,7 @@ class GraphRunnableTask(ManifestTask):
|
||||
)
|
||||
|
||||
if len(self._flattened_nodes) == 0:
|
||||
logger.warning("WARNING: Nothing to do. Try checking your model "
|
||||
logger.warning("\nWARNING: Nothing to do. Try checking your model "
|
||||
"configs and model specification args")
|
||||
result = self.get_result(
|
||||
results=[],
|
||||
|
||||
@@ -21,7 +21,7 @@ sp_logger.setLevel(100)
|
||||
COLLECTOR_URL = "fishtownanalytics.sinter-collect.com"
|
||||
COLLECTOR_PROTOCOL = "https"
|
||||
|
||||
INVOCATION_SPEC = 'iglu:com.dbt/invocation/jsonschema/1-0-1'
|
||||
INVOCATION_SPEC = 'iglu:com.dbt/invocation/jsonschema/1-0-2'
|
||||
PLATFORM_SPEC = 'iglu:com.dbt/platform/jsonschema/1-0-0'
|
||||
RUN_MODEL_SPEC = 'iglu:com.dbt/run_model/jsonschema/1-0-1'
|
||||
INVOCATION_ENV_SPEC = 'iglu:com.dbt/invocation_env/jsonschema/1-0-0'
|
||||
@@ -166,10 +166,15 @@ def get_run_type(args):
|
||||
|
||||
|
||||
def get_invocation_context(user, config, args):
|
||||
# this adapter might not have implemented the type or unique_field properties
|
||||
try:
|
||||
adapter_type = config.credentials.type
|
||||
except Exception:
|
||||
adapter_type = None
|
||||
try:
|
||||
adapter_unique_id = config.credentials.hashed_unique_field()
|
||||
except Exception:
|
||||
adapter_unique_id = None
|
||||
|
||||
return {
|
||||
"project_id": None if config is None else config.hashed_name(),
|
||||
@@ -182,6 +187,7 @@ def get_invocation_context(user, config, args):
|
||||
|
||||
"run_type": get_run_type(args),
|
||||
"adapter_type": adapter_type,
|
||||
"adapter_unique_id": adapter_unique_id,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,12 @@ import itertools
|
||||
import jinja2
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
|
||||
from contextlib import contextmanager
|
||||
from dbt.exceptions import ConnectionException
|
||||
from dbt.logger import GLOBAL_LOGGER as logger
|
||||
from enum import Enum
|
||||
from typing_extensions import Protocol
|
||||
from typing import (
|
||||
@@ -602,3 +607,19 @@ class MultiDict(Mapping[str, Any]):
|
||||
|
||||
def __contains__(self, name) -> bool:
|
||||
return any((name in entry for entry in self._itersource()))
|
||||
|
||||
|
||||
def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0):
|
||||
"""Attempts to run a function that makes an external call, if the call fails
|
||||
on a connection error or timeout, it will be tried up to 5 more times.
|
||||
"""
|
||||
try:
|
||||
return fn()
|
||||
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as exc:
|
||||
if attempt <= max_attempts - 1:
|
||||
logger.debug('Retrying external call. Attempt: ' +
|
||||
f'{attempt} Max attempts: {max_attempts}')
|
||||
time.sleep(1)
|
||||
_connection_exception_retry(fn, max_attempts, attempt + 1)
|
||||
else:
|
||||
raise ConnectionException('External connection exception occurred: ' + str(exc))
|
||||
|
||||
@@ -96,5 +96,5 @@ def _get_dbt_plugins_info():
|
||||
yield plugin_name, mod.version
|
||||
|
||||
|
||||
__version__ = '0.21.0b1'
|
||||
__version__ = '0.21.0b2'
|
||||
installed = get_installed_version()
|
||||
|
||||
@@ -130,7 +130,7 @@ default:
|
||||
|
||||
|
||||
DBTSPEC_TEMPLATE = '''
|
||||
# See https://github.com/fishtown-analytics/dbt-adapter-tests
|
||||
# See https://github.com/dbt-labs/dbt-adapter-tests
|
||||
# for installation and use
|
||||
|
||||
target:
|
||||
@@ -284,12 +284,12 @@ def parse_args(argv=None):
|
||||
parser.add_argument('adapter')
|
||||
parser.add_argument('--title-case', '-t', default=None)
|
||||
parser.add_argument('--dependency', action='append')
|
||||
parser.add_argument('--dbt-core-version', default='0.21.0b1')
|
||||
parser.add_argument('--dbt-core-version', default='0.21.0b2')
|
||||
parser.add_argument('--email')
|
||||
parser.add_argument('--author')
|
||||
parser.add_argument('--url')
|
||||
parser.add_argument('--sql', action='store_true')
|
||||
parser.add_argument('--package-version', default='0.21.0b1')
|
||||
parser.add_argument('--package-version', default='0.21.0b2')
|
||||
parser.add_argument('--project-version', default='1.0')
|
||||
parser.add_argument(
|
||||
'--no-dependency', action='store_false', dest='set_dependency'
|
||||
|
||||
@@ -24,7 +24,7 @@ def read(fname):
|
||||
|
||||
|
||||
package_name = "dbt-core"
|
||||
package_version = "0.21.0b1"
|
||||
package_version = "0.21.0b2"
|
||||
description = """dbt (data build tool) is a command line tool that helps \
|
||||
analysts and engineers transform data in their warehouse more effectively"""
|
||||
|
||||
@@ -62,7 +62,7 @@ setup(
|
||||
'minimal-snowplow-tracker==0.0.2',
|
||||
'networkx>=2.3,<3',
|
||||
'packaging~=20.9',
|
||||
'sqlparse>=0.2.3,<0.4',
|
||||
'sqlparse>=0.2.3,<0.5',
|
||||
'dbt-extractor==0.4.0',
|
||||
'typing-extensions>=3.7.4,<3.11',
|
||||
'werkzeug>=1,<3',
|
||||
|
||||
@@ -8,6 +8,7 @@ pip-tools
|
||||
pytest
|
||||
pytest-dotenv
|
||||
pytest-logbook
|
||||
pytest-csv
|
||||
pytest-xdist
|
||||
pytz
|
||||
tox>=3.13
|
||||
|
||||
@@ -19,7 +19,7 @@ services:
|
||||
GROUP_ID: ${GROUP_ID:-}
|
||||
command: "/root/.virtualenvs/dbt/bin/pytest"
|
||||
environment:
|
||||
DOCKER_TEST_DATABASE_HOST: "database"
|
||||
POSTGRES_TEST_HOST: "database"
|
||||
volumes:
|
||||
- .:/usr/app
|
||||
working_dir: /usr/app
|
||||
|
||||
@@ -5,21 +5,18 @@ ARG BASE_REQUIREMENTS_SRC_PATH
|
||||
ARG WHEEL_REQUIREMENTS_SRC_PATH
|
||||
ARG DIST_PATH
|
||||
|
||||
# We need backport packages to get a more recent version of git
|
||||
RUN printf "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports_git.list && \
|
||||
apt-get update \
|
||||
&& apt-get dist-upgrade -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
git-man/buster-backports \
|
||||
git/buster-backports \
|
||||
ssh-client \
|
||||
software-properties-common \
|
||||
make \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
RUN apt-get update \
|
||||
&& apt-get dist-upgrade -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
ssh-client \
|
||||
software-properties-common \
|
||||
make \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
libpq-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
RUN echo BASE_REQUIREMENTS_SRC_PATH=$BASE_REQUIREMENTS_SRC_PATH
|
||||
RUN echo WHEEL_REQUIREMENTS_SRC_PATH=$WHEEL_REQUIREMENTS_SRC_PATH
|
||||
|
||||
75
docker/requirements/requirements.0.21.0b2.txt
Normal file
75
docker/requirements/requirements.0.21.0b2.txt
Normal file
@@ -0,0 +1,75 @@
|
||||
agate==1.6.1
|
||||
asn1crypto==1.4.0
|
||||
attrs==21.2.0
|
||||
azure-common==1.1.27
|
||||
azure-core==1.17.0
|
||||
azure-storage-blob==12.8.1
|
||||
Babel==2.9.1
|
||||
boto3==1.18.25
|
||||
botocore==1.21.25
|
||||
cachetools==4.2.2
|
||||
certifi==2021.5.30
|
||||
cffi==1.14.6
|
||||
chardet==4.0.0
|
||||
charset-normalizer==2.0.4
|
||||
colorama==0.4.4
|
||||
cryptography==3.4.7
|
||||
google-api-core==1.31.2
|
||||
google-auth==1.35.0
|
||||
google-cloud-bigquery==2.24.1
|
||||
google-cloud-core==1.7.2
|
||||
google-crc32c==1.1.2
|
||||
google-resumable-media==2.0.0
|
||||
googleapis-common-protos==1.53.0
|
||||
grpcio==1.39.0
|
||||
hologram==0.0.14
|
||||
idna==3.2
|
||||
importlib-metadata==4.6.4
|
||||
isodate==0.6.0
|
||||
jeepney==0.7.1
|
||||
Jinja2==2.11.3
|
||||
jmespath==0.10.0
|
||||
json-rpc==1.13.0
|
||||
jsonschema==3.1.1
|
||||
keyring==21.8.0
|
||||
leather==0.3.3
|
||||
Logbook==1.5.3
|
||||
MarkupSafe==2.0.1
|
||||
mashumaro==2.5
|
||||
minimal-snowplow-tracker==0.0.2
|
||||
msgpack==1.0.2
|
||||
msrest==0.6.21
|
||||
networkx==2.6.2
|
||||
oauthlib==3.1.1
|
||||
oscrypto==1.2.1
|
||||
packaging==20.9
|
||||
parsedatetime==2.6
|
||||
proto-plus==1.19.0
|
||||
protobuf==3.17.3
|
||||
psycopg2-binary==2.9.1
|
||||
pyasn1==0.4.8
|
||||
pyasn1-modules==0.2.8
|
||||
pycparser==2.20
|
||||
pycryptodomex==3.10.1
|
||||
PyJWT==2.1.0
|
||||
pyOpenSSL==20.0.1
|
||||
pyparsing==2.4.7
|
||||
pyrsistent==0.18.0
|
||||
python-dateutil==2.8.2
|
||||
python-slugify==5.0.2
|
||||
pytimeparse==1.1.8
|
||||
pytz==2021.1
|
||||
PyYAML==5.4.1
|
||||
requests==2.26.0
|
||||
requests-oauthlib==1.3.0
|
||||
rsa==4.7.2
|
||||
s3transfer==0.5.0
|
||||
SecretStorage==3.3.1
|
||||
six==1.16.0
|
||||
snowflake-connector-python==2.5.1
|
||||
sqlparse==0.3.1
|
||||
text-unidecode==1.3
|
||||
typing-extensions==3.10.0.0
|
||||
urllib3==1.26.6
|
||||
Werkzeug==2.0.1
|
||||
zipp==3.5.0
|
||||
11
docs/arch/README.md
Normal file
11
docs/arch/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
## ADRs
|
||||
|
||||
For any architectural/engineering decisions we make, we will create an ADR (Architectural Design Record) to keep track of what decision we made and why. This allows us to refer back to decisions in the future and see if the reasons we made a choice still holds true. This also allows for others to more easily understand the code. ADRs will follow this process:
|
||||
|
||||
- They will live in the repo, under a directory `docs/arch`
|
||||
- They will be written in markdown
|
||||
- They will follow the naming convention [`adr-NNN-<decision-title>.md`](http://adr-nnn.md/)
|
||||
- `NNN` will just be a counter starting at `001` and will allow us easily keep the records in chronological order.
|
||||
- The common sections that each ADR should have are:
|
||||
- Title, Context, Decision, Status, Consequences
|
||||
- Use this article as a reference: [https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions](https://cognitect.com/blog/2011/11/15/documenting-architecture-decisions)
|
||||
35
docs/arch/adr-001-perf-testing.md
Normal file
35
docs/arch/adr-001-perf-testing.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Performance Regression Framework
|
||||
|
||||
## Context
|
||||
We want the ability to benchmark our perfomance overtime with new changes going forward.
|
||||
|
||||
### Options
|
||||
- Static Window: Compare the develop branch to fastest version and ensure it doesn't exceed a static window (i.e. time parse on develop and time parse on 0.20.latest and make sure it's not more than 5% slower)
|
||||
- Pro: quick to run
|
||||
- Pro: simple to implement
|
||||
- Con: rerunning a failing test could get it to pass in a large number of changes.
|
||||
- Con: several small regressions could press us up against the threshold requiring us to do unexpected additional performance work, or lower the threshold to get a release out.
|
||||
- Variance-aware Testing: Run both the develop branch and our fastest version *many times* to collect a set of timing data. We can fail on a static window based on medians, confidence interval midpoints, and even variance magnitude.
|
||||
- Pro: would catch more small performance regressions
|
||||
- Con: would take much longer to run
|
||||
- Con: Need to be very careful about making sure caching doesn't wreck the curve (or if it does, it wrecks the curve equally for all tests)
|
||||
- Stateful Tracking: For example, the rust compiler team does some [bananas performance tracking](https://perf.rust-lang.org/). This option could be done in tandem with the above options, however it would require results be stored somewhere.
|
||||
- Pro: we can graph our performance history and look really cool.
|
||||
- Pro: Variance-aware testing would run in half the time since you can just reference old runs for comparison
|
||||
- Con: state in tests sucks
|
||||
- Con: longer to build
|
||||
- Performance Profiling: Running a sampling-based profiler through a series of standardized test runs (test designed to hit as many/all of the code paths in the codebase) to determine if any particular function/class/other code has regressed in performance.
|
||||
- Pro: easy to find the cause of the perf. regression
|
||||
- Pro: should be able to run on a fairly small project size without losing much test resolution (a 5% change in a function should be evident with even a single case that runs that code path)
|
||||
- Con: complex to build
|
||||
- Con: compute intensive
|
||||
- Con: requires stored results to compare against
|
||||
|
||||
## Decision
|
||||
We decided to start with variance-aware testing with the ability to add stateful tracking by leveraging `hyperfine` which does all the variance work for us, and outputs clear json artifacts. Since we're running perfornace testing on a schedule it doesn't matter that as we add more tests it may take hours to run. The artifacts are all stored in the github action runs today, but could easily be changed to be sent somewhere in the action to track over time.
|
||||
|
||||
## Status
|
||||
Completed
|
||||
|
||||
## Consequences
|
||||
We now have the ability to more rigorously detect performance regressions, but we do not have a solid way to identify where that regression is coming from. Adding Performance Profiling cababilities will help with this, but for now just running it nightly should help us narrow it down to specific commits. As we add more performance tests, the testing matrix may take hours to run which consumes resources on GitHub Actions. Because performance testing is asynchronous, failures are easier to miss or ignore, and because it is non-deterministic it adds a non-trivial amount of complexity to our development process.
|
||||
1
performance/runner/.gitignore
vendored
1
performance/runner/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
target/
|
||||
projects/*/logs
|
||||
plots/
|
||||
|
||||
662
performance/runner/Cargo.lock
generated
662
performance/runner/Cargo.lock
generated
@@ -2,6 +2,12 @@
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "adler32"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
|
||||
|
||||
[[package]]
|
||||
name = "ansi_term"
|
||||
version = "0.11.0"
|
||||
@@ -22,12 +28,62 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631"
|
||||
|
||||
[[package]]
|
||||
name = "bytemuck"
|
||||
version = "1.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.70"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"serde",
|
||||
"time",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "2.33.3"
|
||||
@@ -43,12 +99,230 @@ dependencies = [
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.45"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb6210b637171dfba4cda12e579ac6dc73f5165ad56133e5d72ef3131f320855"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "color_quant"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62"
|
||||
dependencies = [
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b"
|
||||
|
||||
[[package]]
|
||||
name = "core-graphics"
|
||||
version = "0.22.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "269f35f69b542b80e736a20a89a05215c0ce80c2c03c514abb2e318b78379d86"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"core-foundation",
|
||||
"core-graphics-types",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-graphics-types"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"core-foundation",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-text"
|
||||
version = "19.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99d74ada66e07c1cefa18f8abfba765b486f250de2e4a999e5727fc0dd4b4a25"
|
||||
dependencies = [
|
||||
"core-foundation",
|
||||
"core-graphics",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deflate"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174"
|
||||
dependencies = [
|
||||
"adler32",
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-next"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"dirs-sys-next",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-sys-next"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dwrote"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "439a1c2ba5611ad3ed731280541d36d2e9c4ac5e7fb818a27b604bdc5a6aa65b"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"winapi",
|
||||
"wio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
|
||||
|
||||
[[package]]
|
||||
name = "expat-sys"
|
||||
version = "2.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "658f19728920138342f68408b7cf7644d90d4784353d8ebc32e7e8663dbe45fa"
|
||||
dependencies = [
|
||||
"cmake",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "float-ord"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7bad48618fdb549078c333a7a8528acb57af271d0433bdecd523eb620628364e"
|
||||
|
||||
[[package]]
|
||||
name = "font-kit"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46c9a156ec38864999bc9c4156e5f3b50224d4a5578028a64e5a3875caa9ee28"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"byteorder",
|
||||
"core-foundation",
|
||||
"core-graphics",
|
||||
"core-text",
|
||||
"dirs-next",
|
||||
"dwrote",
|
||||
"float-ord",
|
||||
"freetype",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"pathfinder_geometry",
|
||||
"pathfinder_simd",
|
||||
"servo-fontconfig",
|
||||
"walkdir",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "freetype"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bee38378a9e3db1cc693b4f88d166ae375338a0ff75cb8263e1c601d51f35dc6"
|
||||
dependencies = [
|
||||
"freetype-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "freetype-sys"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a37d4011c0cc628dfa766fcc195454f4b068d7afdc2adfd28861191d866e731a"
|
||||
dependencies = [
|
||||
"cmake",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gif"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de"
|
||||
dependencies = [
|
||||
"color_quant",
|
||||
"weezl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.3.3"
|
||||
@@ -67,6 +341,22 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "image"
|
||||
version = "0.23.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
"color_quant",
|
||||
"jpeg-decoder",
|
||||
"num-iter",
|
||||
"num-rational",
|
||||
"num-traits",
|
||||
"png",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.10.1"
|
||||
@@ -82,6 +372,21 @@ version = "0.4.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
|
||||
|
||||
[[package]]
|
||||
name = "jpeg-decoder"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
@@ -94,6 +399,157 @@ version = "0.2.98"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435"
|
||||
dependencies = [
|
||||
"adler32",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-iter"
|
||||
version = "0.1.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-rational"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pathfinder_geometry"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b7b7e7b4ea703700ce73ebf128e1450eb69c3a8329199ffbfb9b2a0418e5ad3"
|
||||
dependencies = [
|
||||
"log",
|
||||
"pathfinder_simd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pathfinder_simd"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39fe46acc5503595e5949c17b818714d26fdf9b4920eacf3b2947f0199f4a6ff"
|
||||
dependencies = [
|
||||
"rustc_version",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pest"
|
||||
version = "2.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53"
|
||||
dependencies = [
|
||||
"ucd-trie",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"font-kit",
|
||||
"image",
|
||||
"lazy_static",
|
||||
"num-traits",
|
||||
"pathfinder_geometry",
|
||||
"plotters-backend",
|
||||
"plotters-bitmap",
|
||||
"plotters-svg",
|
||||
"ttf-parser",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters-backend"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c"
|
||||
|
||||
[[package]]
|
||||
name = "plotters-bitmap"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21362fa905695e5618aefd169358f52e0e8bc4a8e05333cf780fda8cddc00b54"
|
||||
dependencies = [
|
||||
"gif",
|
||||
"image",
|
||||
"plotters-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters-svg"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9"
|
||||
dependencies = [
|
||||
"plotters-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "png"
|
||||
version = "0.16.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"crc32fast",
|
||||
"deflate",
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-error"
|
||||
version = "1.0.4"
|
||||
@@ -136,23 +592,80 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"redox_syscall",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "runner"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"itertools",
|
||||
"plotters",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"structopt",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee"
|
||||
dependencies = [
|
||||
"semver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "semver"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6"
|
||||
dependencies = [
|
||||
"semver-parser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "semver-parser"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7"
|
||||
dependencies = [
|
||||
"pest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.127"
|
||||
@@ -184,6 +697,27 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "servo-fontconfig"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7e3e22fe5fd73d04ebf0daa049d3efe3eae55369ce38ab16d07ddd9ac5c217c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"servo-fontconfig-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "servo-fontconfig-sys"
|
||||
version = "5.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e36b879db9892dfa40f95da1c38a835d41634b825fbd8c4c418093d53c24b388"
|
||||
dependencies = [
|
||||
"expat-sys",
|
||||
"freetype-sys",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.8.0"
|
||||
@@ -254,6 +788,29 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"wasi",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ttf-parser"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ae2f58a822f08abdaf668897e96a5656fe72f5a9ce66422423e8849384872e6"
|
||||
|
||||
[[package]]
|
||||
name = "ucd-trie"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.8.0"
|
||||
@@ -284,6 +841,93 @@ version = "0.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
|
||||
dependencies = [
|
||||
"same-file",
|
||||
"winapi",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.77"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.77"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.77"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.77"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.77"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "weezl"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8b77fdfd5a253be4ab714e4ffa3c49caf146b4de743e97510c0656cf90f1e8e"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
@@ -300,8 +944,26 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "wio"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
@@ -4,7 +4,9 @@ version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
itertools = "0.10.1"
|
||||
plotters = "^0.3.1"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
structopt = "0.3"
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::exceptions::{CalculateError, IOError};
|
||||
use chrono::prelude::*;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
@@ -45,6 +46,7 @@ pub struct Data {
|
||||
pub struct Calculation {
|
||||
pub metric: String,
|
||||
pub regression: bool,
|
||||
pub ts: DateTime<Utc>,
|
||||
pub data: Data,
|
||||
}
|
||||
|
||||
@@ -60,6 +62,11 @@ pub struct MeasurementGroup {
|
||||
// Given two measurements, return all the calculations. Calculations are
|
||||
// flagged as regressions or not regressions.
|
||||
fn calculate(metric: &str, dev: &Measurement, baseline: &Measurement) -> Vec<Calculation> {
|
||||
// choosing the current timestamp for all calculations to be the same.
|
||||
// this timestamp is not from the time of measurement becuase hyperfine
|
||||
// controls that. Since calculation is run directly after, this is fine.
|
||||
let ts = Utc::now();
|
||||
|
||||
let median_threshold = 1.05; // 5% regression threshold
|
||||
let median_difference = dev.median / baseline.median;
|
||||
|
||||
@@ -70,6 +77,7 @@ fn calculate(metric: &str, dev: &Measurement, baseline: &Measurement) -> Vec<Cal
|
||||
Calculation {
|
||||
metric: ["median", metric].join("_"),
|
||||
regression: median_difference > median_threshold,
|
||||
ts: ts,
|
||||
data: Data {
|
||||
threshold: median_threshold,
|
||||
difference: median_difference,
|
||||
@@ -80,6 +88,7 @@ fn calculate(metric: &str, dev: &Measurement, baseline: &Measurement) -> Vec<Cal
|
||||
Calculation {
|
||||
metric: ["stddev", metric].join("_"),
|
||||
regression: stddev_difference > stddev_threshold,
|
||||
ts: ts,
|
||||
data: Data {
|
||||
threshold: stddev_threshold,
|
||||
difference: stddev_difference,
|
||||
|
||||
@@ -42,6 +42,28 @@ pub enum CalculateError {
|
||||
BadBranchNameErr(String, String),
|
||||
}
|
||||
|
||||
// Parent exception type for the different sub commands of the runner app.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum PlotError {
|
||||
#[error("{}", .0)]
|
||||
PlotIOErr(IOError),
|
||||
#[error("FilenameNotTimestampErr: {}", .0)]
|
||||
FilenameNotTimestampErr(String),
|
||||
#[error("BadJSONErr: JSON in file cannot be deserialized as expected.\nFilepath: {}\nOriginating Exception: {}", .0.to_string_lossy().into_owned(), .1.as_ref().map_or("None".to_owned(), |e| format!("{}", e)))]
|
||||
BadJSONErr(PathBuf, Option<serde_json::Error>),
|
||||
#[error("ChartErr: {}", .0)]
|
||||
ChartErr(Box<dyn std::error::Error>),
|
||||
}
|
||||
|
||||
// Parent exception type for the different sub commands of the runner app.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RunnerError {
|
||||
#[error("CalculateErr: {}", .0)]
|
||||
CalculateErr(CalculateError),
|
||||
#[error("PlotErr: {}", .0)]
|
||||
PlotErr(PlotError),
|
||||
}
|
||||
|
||||
// Tests for exceptions
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -3,10 +3,12 @@ extern crate structopt;
|
||||
mod calculate;
|
||||
mod exceptions;
|
||||
mod measure;
|
||||
mod plot;
|
||||
|
||||
use crate::calculate::Calculation;
|
||||
use crate::exceptions::CalculateError;
|
||||
use std::fs::File;
|
||||
use crate::exceptions::{CalculateError, RunnerError};
|
||||
use chrono::offset::Utc;
|
||||
use std::fs::{metadata, File};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use structopt::StructOpt;
|
||||
@@ -29,7 +31,11 @@ enum Opt {
|
||||
#[structopt(parse(from_os_str))]
|
||||
#[structopt(short)]
|
||||
results_dir: PathBuf,
|
||||
#[structopt(parse(from_os_str))]
|
||||
#[structopt(short)]
|
||||
out_dir: PathBuf,
|
||||
},
|
||||
Plot,
|
||||
}
|
||||
|
||||
// enables proper useage of exit() in main.
|
||||
@@ -37,7 +43,7 @@ enum Opt {
|
||||
//
|
||||
// This is where all the printing should happen. Exiting happens
|
||||
// in main, and module functions should only return values.
|
||||
fn run_app() -> Result<i32, CalculateError> {
|
||||
fn run_app() -> Result<i32, RunnerError> {
|
||||
// match what the user inputs from the cli
|
||||
match Opt::from_args() {
|
||||
// measure subcommand
|
||||
@@ -48,7 +54,8 @@ fn run_app() -> Result<i32, CalculateError> {
|
||||
// if there are any nonzero exit codes from the hyperfine runs,
|
||||
// return the first one. otherwise return zero.
|
||||
measure::measure(&projects_dir, &branch_name)
|
||||
.or_else(|e| Err(CalculateError::CalculateIOError(e)))?
|
||||
.or_else(|e| Err(CalculateError::CalculateIOError(e)))
|
||||
.or_else(|e| Err(RunnerError::CalculateErr(e)))?
|
||||
.iter()
|
||||
.map(|status| status.code())
|
||||
.flatten()
|
||||
@@ -62,9 +69,21 @@ fn run_app() -> Result<i32, CalculateError> {
|
||||
}
|
||||
|
||||
// calculate subcommand
|
||||
Opt::Calculate { results_dir } => {
|
||||
Opt::Calculate {
|
||||
results_dir,
|
||||
out_dir,
|
||||
} => {
|
||||
// validate output directory and exit early if it won't work.
|
||||
let md = metadata(&out_dir)
|
||||
.expect("Main: Failed to read specified output directory metadata. Does it exist?");
|
||||
if !md.is_dir() {
|
||||
eprintln!("Main: Output directory is not a directory");
|
||||
return Ok(1);
|
||||
}
|
||||
|
||||
// get all the calculations or gracefully show the user an exception
|
||||
let calculations = calculate::regressions(&results_dir)?;
|
||||
let calculations = calculate::regressions(&results_dir)
|
||||
.or_else(|e| Err(RunnerError::CalculateErr(e)))?;
|
||||
|
||||
// print all calculations to stdout so they can be easily debugged
|
||||
// via CI.
|
||||
@@ -77,9 +96,18 @@ fn run_app() -> Result<i32, CalculateError> {
|
||||
let json_calcs = serde_json::to_string_pretty(&calculations)
|
||||
.expect("Main: Failed to serialize calculations to json");
|
||||
|
||||
// if there are any calculations, use the first timestamp, if there are none
|
||||
// just use the current time.
|
||||
let ts = calculations
|
||||
.first()
|
||||
.map_or_else(|| Utc::now(), |calc| calc.ts);
|
||||
|
||||
// create the empty destination file, and write the json string
|
||||
let outfile = &mut results_dir.into_os_string();
|
||||
outfile.push("/final_calculations.json");
|
||||
let outfile = &mut out_dir.into_os_string();
|
||||
outfile.push("/final_calculations_");
|
||||
outfile.push(ts.timestamp().to_string());
|
||||
outfile.push(".json");
|
||||
|
||||
let mut f = File::create(outfile).expect("Main: Unable to create file");
|
||||
f.write_all(json_calcs.as_bytes())
|
||||
.expect("Main: Unable to write data");
|
||||
@@ -105,6 +133,13 @@ fn run_app() -> Result<i32, CalculateError> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// plot subcommand
|
||||
Opt::Plot => {
|
||||
plot::draw_plot().or_else(|e| Err(RunnerError::PlotErr(e)))?;
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
189
performance/runner/src/plot.rs
Normal file
189
performance/runner/src/plot.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
use crate::calculate::Calculation;
|
||||
use crate::exceptions::{IOError, PlotError};
|
||||
use chrono::prelude::*;
|
||||
use itertools::Itertools;
|
||||
use plotters::prelude::*;
|
||||
use std::cmp::Ordering;
|
||||
use std::fs;
|
||||
use std::fs::DirEntry;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
struct Graph {
|
||||
title: String,
|
||||
data: Vec<(f32, f32)>,
|
||||
}
|
||||
|
||||
impl Graph {
|
||||
const DEFAULT_MIN_Y: f32 = -15.0;
|
||||
const DEFAULT_MAX_Y: f32 = 15.0;
|
||||
const DEFAULT_X_PADDING: f32 = 86400.0;
|
||||
|
||||
fn min_x(&self) -> f32 {
|
||||
self.data
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(x, _)| x)
|
||||
.reduce(f32::min)
|
||||
.unwrap()
|
||||
- Graph::DEFAULT_X_PADDING
|
||||
}
|
||||
fn min_y(&self) -> f32 {
|
||||
let min_data_point = self
|
||||
.data
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(_, y)| y)
|
||||
.reduce(f32::min)
|
||||
.unwrap();
|
||||
f32::min(Graph::DEFAULT_MIN_Y, min_data_point)
|
||||
}
|
||||
fn max_x(&self) -> f32 {
|
||||
self.data
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(x, _)| x)
|
||||
.reduce(f32::max)
|
||||
.unwrap()
|
||||
+ Graph::DEFAULT_X_PADDING
|
||||
}
|
||||
fn max_y(&self) -> f32 {
|
||||
let max_data_point = self
|
||||
.data
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(_, y)| y)
|
||||
.reduce(f32::max)
|
||||
.unwrap();
|
||||
f32::max(Graph::DEFAULT_MAX_Y, max_data_point)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn draw_plot() -> Result<(), PlotError> {
|
||||
// TODO `as` type coersion sucks. swap it out for something safer.
|
||||
let mut sorted_data: Vec<(NaiveDateTime, Calculation)> =
|
||||
read_data(Path::new("plots/raw_data/"))?;
|
||||
sorted_data.sort_by(|(ts_x, x), (ts_y, y)| {
|
||||
// sort by calculation type, then by timestamp
|
||||
match (&x.metric).cmp(&y.metric) {
|
||||
Ordering::Equal => (&ts_x).cmp(&ts_y),
|
||||
x => x,
|
||||
}
|
||||
});
|
||||
|
||||
let data_lines: Vec<Graph> = sorted_data
|
||||
.into_iter()
|
||||
.group_by(|(_, calc)| calc.metric.clone())
|
||||
.into_iter()
|
||||
.map(|(title, line)| Graph {
|
||||
title: title.to_owned(),
|
||||
data: line
|
||||
.map(|(ts, calc)| (ts.timestamp() as f32, calc.data.difference as f32))
|
||||
.collect(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
for graph in data_lines {
|
||||
let title = format!("plots/{}.png", graph.title);
|
||||
let root = BitMapBackend::new(&title, (1600, 1200)).into_drawing_area();
|
||||
root.fill(&WHITE)
|
||||
.or_else(|e| Err(PlotError::ChartErr(Box::new(e))))?;
|
||||
let root = root.margin(10, 10, 10, 10);
|
||||
|
||||
// build chart foundation
|
||||
let mut chart = ChartBuilder::on(&root)
|
||||
.caption(&graph.title, ("sans-serif", 40).into_font())
|
||||
.x_label_area_size(20)
|
||||
.y_label_area_size(40)
|
||||
.build_cartesian_2d(graph.min_x()..graph.max_x(), graph.min_y()..graph.max_y())
|
||||
.or_else(|e| Err(PlotError::ChartErr(Box::new(e))))?;
|
||||
|
||||
// Draw Mesh
|
||||
chart
|
||||
.configure_mesh()
|
||||
.x_labels(5)
|
||||
.y_labels(5)
|
||||
.y_label_formatter(&|x| format!("{:.3}", x))
|
||||
.draw()
|
||||
.or_else(|e| Err(PlotError::ChartErr(Box::new(e))))?;
|
||||
|
||||
// Draw Line
|
||||
chart
|
||||
.draw_series(LineSeries::new(graph.data.clone(), &RED))
|
||||
.or_else(|e| Err(PlotError::ChartErr(Box::new(e))))?;
|
||||
|
||||
// Draw Points on Line
|
||||
chart
|
||||
.draw_series(PointSeries::of_element(
|
||||
graph.data.clone(),
|
||||
5,
|
||||
&RED,
|
||||
&|c, s, st| {
|
||||
return EmptyElement::at(c)
|
||||
+ Circle::new((0, 0), s, st.filled())
|
||||
+ Text::new(format!("{:?}", c), (10, 0), ("sans-serif", 20).into_font());
|
||||
},
|
||||
))
|
||||
.or_else(|e| Err(PlotError::ChartErr(Box::new(e))))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_data(results_directory: &Path) -> Result<Vec<(NaiveDateTime, Calculation)>, PlotError> {
|
||||
fs::read_dir(results_directory)
|
||||
.or_else(|e| Err(IOError::ReadErr(results_directory.to_path_buf(), Some(e))))
|
||||
.or_else(|e| Err(PlotError::PlotIOErr(e)))?
|
||||
.into_iter()
|
||||
.map(|entry| {
|
||||
let ent: DirEntry = entry
|
||||
.or_else(|e| Err(IOError::ReadErr(results_directory.to_path_buf(), Some(e))))
|
||||
.or_else(|e| Err(PlotError::PlotIOErr(e)))?;
|
||||
|
||||
Ok(ent.path())
|
||||
})
|
||||
.collect::<Result<Vec<PathBuf>, PlotError>>()?
|
||||
.iter()
|
||||
.filter(|path| {
|
||||
path.extension()
|
||||
.and_then(|ext| ext.to_str())
|
||||
.map_or(false, |ext| ext.ends_with("json"))
|
||||
})
|
||||
.map(|p| {
|
||||
// TODO pull this filename nonsense out into a lib fn
|
||||
let filename = p
|
||||
.file_stem()
|
||||
.ok_or_else(|| IOError::MissingFilenameErr(p.to_path_buf()))
|
||||
.and_then(|name| {
|
||||
name.to_str()
|
||||
.ok_or_else(|| IOError::FilenameNotUnicodeErr(p.to_path_buf()))
|
||||
})
|
||||
.or_else(|e| Err(PlotError::PlotIOErr(e)));
|
||||
|
||||
let timestamp: Result<NaiveDateTime, PlotError> = filename.and_then(|fname| {
|
||||
fname
|
||||
.parse::<i64>()
|
||||
// not a timestamp because it's not a number
|
||||
.or_else(|_| Err(PlotError::FilenameNotTimestampErr(fname.to_owned())))
|
||||
.and_then(|secs| {
|
||||
// not a timestamp because the number is out of range
|
||||
NaiveDateTime::from_timestamp_opt(secs, 0)
|
||||
.ok_or_else(|| PlotError::FilenameNotTimestampErr(fname.to_owned()))
|
||||
})
|
||||
});
|
||||
|
||||
let x: Result<Vec<(NaiveDateTime, Calculation)>, PlotError> =
|
||||
timestamp.and_then(|ts| {
|
||||
fs::read_to_string(p)
|
||||
.or_else(|e| Err(IOError::BadFileContentsErr(p.clone(), Some(e))))
|
||||
.or_else(|e| Err(PlotError::PlotIOErr(e)))
|
||||
.and_then(|contents| {
|
||||
serde_json::from_str::<Vec<Calculation>>(&contents)
|
||||
.or_else(|e| Err(PlotError::BadJSONErr(p.clone(), Some(e))))
|
||||
.map(|calcs| calcs.iter().map(|c| (ts, c.clone())).collect())
|
||||
})
|
||||
});
|
||||
x
|
||||
})
|
||||
.collect::<Result<Vec<Vec<(NaiveDateTime, Calculation)>>, PlotError>>()
|
||||
.map(|x| x.concat())
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/fishtown-analytics/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt/6c6649f9129d5d108aa3b0526f634cd8f3a9d1ed/etc/dbt-logo-full.svg" alt="dbt logo" width="500"/>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** (data build tool) enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
@@ -24,8 +24,8 @@ more information on using dbt with BigQuery, consult [the docs](https://docs.get
|
||||
|
||||
## Reporting bugs and contributing code
|
||||
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/fishtown-analytics/dbt/issues/new).
|
||||
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](https://github.com/fishtown-analytics/dbt/blob/HEAD/CONTRIBUTING.md)
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt/issues/new).
|
||||
- Want to help us build dbt? Check out the [Contributing Getting Started Guide](https://github.com/dbt-labs/dbt/blob/HEAD/CONTRIBUTING.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
version = '0.21.0b1'
|
||||
version = '0.21.0b2'
|
||||
|
||||
@@ -83,6 +83,7 @@ class BigQueryCredentials(Credentials):
|
||||
# BigQuery allows an empty database / project, where it defers to the
|
||||
# environment for the project
|
||||
database: Optional[str]
|
||||
execution_project: Optional[str] = None
|
||||
timeout_seconds: Optional[int] = 300
|
||||
location: Optional[str] = None
|
||||
priority: Optional[Priority] = None
|
||||
@@ -104,12 +105,18 @@ class BigQueryCredentials(Credentials):
|
||||
_ALIASES = {
|
||||
'project': 'database',
|
||||
'dataset': 'schema',
|
||||
'target_project': 'target_database',
|
||||
'target_dataset': 'target_schema',
|
||||
}
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return 'bigquery'
|
||||
|
||||
@property
|
||||
def unique_field(self):
|
||||
return self.database
|
||||
|
||||
def _connection_keys(self):
|
||||
return ('method', 'database', 'schema', 'location', 'priority',
|
||||
'timeout_seconds', 'maximum_bytes_billed')
|
||||
@@ -118,12 +125,15 @@ class BigQueryCredentials(Credentials):
|
||||
def __pre_deserialize__(cls, d: Dict[Any, Any]) -> Dict[Any, Any]:
|
||||
# We need to inject the correct value of the database (aka project) at
|
||||
# this stage, ref
|
||||
# https://github.com/fishtown-analytics/dbt/pull/2908#discussion_r532927436.
|
||||
# https://github.com/dbt-labs/dbt/pull/2908#discussion_r532927436.
|
||||
|
||||
# `database` is an alias of `project` in BigQuery
|
||||
if 'database' not in d:
|
||||
_, database = get_bigquery_defaults()
|
||||
d['database'] = database
|
||||
# `execution_project` default to dataset/project
|
||||
if 'execution_project' not in d:
|
||||
d['execution_project'] = d['database']
|
||||
return d
|
||||
|
||||
|
||||
@@ -246,12 +256,12 @@ class BigQueryConnectionManager(BaseConnectionManager):
|
||||
cls.get_impersonated_bigquery_credentials(profile_credentials)
|
||||
else:
|
||||
creds = cls.get_bigquery_credentials(profile_credentials)
|
||||
database = profile_credentials.database
|
||||
execution_project = profile_credentials.execution_project
|
||||
location = getattr(profile_credentials, 'location', None)
|
||||
|
||||
info = client_info.ClientInfo(user_agent=f'dbt-{dbt_version}')
|
||||
return google.cloud.bigquery.Client(
|
||||
database,
|
||||
execution_project,
|
||||
creds,
|
||||
location=location,
|
||||
client_info=info,
|
||||
@@ -458,26 +468,40 @@ class BigQueryConnectionManager(BaseConnectionManager):
|
||||
conn = self.get_thread_connection()
|
||||
client = conn.handle
|
||||
|
||||
source_ref = self.table_ref(
|
||||
source.database, source.schema, source.table, conn)
|
||||
# -------------------------------------------------------------------------------
|
||||
# BigQuery allows to use copy API using two different formats:
|
||||
# 1. client.copy_table(source_table_id, destination_table_id)
|
||||
# where source_table_id = "your-project.source_dataset.source_table"
|
||||
# 2. client.copy_table(source_table_ids, destination_table_id)
|
||||
# where source_table_ids = ["your-project.your_dataset.your_table_name", ...]
|
||||
# Let's use uniform function call and always pass list there
|
||||
# -------------------------------------------------------------------------------
|
||||
if type(source) is not list:
|
||||
source = [source]
|
||||
|
||||
source_ref_array = [self.table_ref(
|
||||
src_table.database, src_table.schema, src_table.table, conn)
|
||||
for src_table in source]
|
||||
destination_ref = self.table_ref(
|
||||
destination.database, destination.schema, destination.table, conn)
|
||||
|
||||
logger.debug(
|
||||
'Copying table "{}" to "{}" with disposition: "{}"',
|
||||
source_ref.path, destination_ref.path, write_disposition)
|
||||
'Copying table(s) "{}" to "{}" with disposition: "{}"',
|
||||
', '.join(source_ref.path for source_ref in source_ref_array),
|
||||
destination_ref.path, write_disposition)
|
||||
|
||||
def copy_and_results():
|
||||
job_config = google.cloud.bigquery.CopyJobConfig(
|
||||
write_disposition=write_disposition)
|
||||
copy_job = client.copy_table(
|
||||
source_ref, destination_ref, job_config=job_config)
|
||||
source_ref_array, destination_ref, job_config=job_config)
|
||||
iterator = copy_job.result(timeout=self.get_timeout(conn))
|
||||
return copy_job, iterator
|
||||
|
||||
self._retry_and_handle(
|
||||
msg='copy table "{}" to "{}"'.format(
|
||||
source_ref.path, destination_ref.path),
|
||||
', '.join(source_ref.path for source_ref in source_ref_array),
|
||||
destination_ref.path),
|
||||
conn=conn, fn=copy_and_results)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -274,7 +274,7 @@ class BigQueryAdapter(BaseAdapter):
|
||||
# next page. If that key table gets dropped before we run
|
||||
# list_relations, then this will 404. So, we avoid this
|
||||
# situation by making the page size sufficiently large.
|
||||
# see: https://github.com/fishtown-analytics/dbt/issues/726
|
||||
# see: https://github.com/dbt-labs/dbt/issues/726
|
||||
# TODO: cache the list of relations up front, and then we
|
||||
# won't need to do this
|
||||
max_results=100000)
|
||||
@@ -769,13 +769,10 @@ class BigQueryAdapter(BaseAdapter):
|
||||
return result
|
||||
|
||||
@available.parse(lambda *a, **k: {})
|
||||
def get_table_options(
|
||||
self, config: Dict[str, Any], node: Dict[str, Any], temporary: bool
|
||||
def get_common_options(
|
||||
self, config: Dict[str, Any], node: Dict[str, Any], temporary: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
opts = {}
|
||||
if temporary:
|
||||
expiration = 'TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 12 hour)'
|
||||
opts['expiration_timestamp'] = expiration
|
||||
|
||||
if (config.get('hours_to_expiration') is not None) and (not temporary):
|
||||
expiration = (
|
||||
@@ -787,13 +784,25 @@ class BigQueryAdapter(BaseAdapter):
|
||||
description = sql_escape(node['description'])
|
||||
opts['description'] = '"""{}"""'.format(description)
|
||||
|
||||
if config.get('kms_key_name') is not None:
|
||||
opts['kms_key_name'] = "'{}'".format(config.get('kms_key_name'))
|
||||
|
||||
if config.get('labels'):
|
||||
labels = config.get('labels', {})
|
||||
opts['labels'] = list(labels.items())
|
||||
|
||||
return opts
|
||||
|
||||
@available.parse(lambda *a, **k: {})
|
||||
def get_table_options(
|
||||
self, config: Dict[str, Any], node: Dict[str, Any], temporary: bool
|
||||
) -> Dict[str, Any]:
|
||||
opts = self.get_common_options(config, node, temporary)
|
||||
|
||||
if temporary:
|
||||
expiration = 'TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 12 hour)'
|
||||
opts['expiration_timestamp'] = expiration
|
||||
|
||||
if config.get('kms_key_name') is not None:
|
||||
opts['kms_key_name'] = "'{}'".format(config.get('kms_key_name'))
|
||||
|
||||
if config.get('require_partition_filter'):
|
||||
opts['require_partition_filter'] = config.get(
|
||||
'require_partition_filter')
|
||||
@@ -804,6 +813,13 @@ class BigQueryAdapter(BaseAdapter):
|
||||
|
||||
return opts
|
||||
|
||||
@available.parse(lambda *a, **k: {})
|
||||
def get_view_options(
|
||||
self, config: Dict[str, Any], node: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
opts = self.get_common_options(config, node)
|
||||
return opts
|
||||
|
||||
@available.parse_none
|
||||
def grant_access_to(self, entity, entity_type, role, grant_target_dict):
|
||||
"""
|
||||
|
||||
@@ -27,10 +27,7 @@
|
||||
|
||||
{%- endmacro -%}
|
||||
|
||||
|
||||
{% macro bigquery_table_options(config, node, temporary) %}
|
||||
{% set opts = adapter.get_table_options(config, node, temporary) %}
|
||||
|
||||
{% macro bigquery_options(opts) %}
|
||||
{% set options -%}
|
||||
OPTIONS({% for opt_key, opt_val in opts.items() %}
|
||||
{{ opt_key }}={{ opt_val }}{{ "," if not loop.last }}
|
||||
@@ -39,6 +36,11 @@
|
||||
{%- do return(options) -%}
|
||||
{%- endmacro -%}
|
||||
|
||||
{% macro bigquery_table_options(config, node, temporary) %}
|
||||
{% set opts = adapter.get_table_options(config, node, temporary) %}
|
||||
{%- do return(bigquery_options(opts)) -%}
|
||||
{%- endmacro -%}
|
||||
|
||||
{% macro bigquery__create_table_as(temporary, relation, sql) -%}
|
||||
{%- set raw_partition_by = config.get('partition_by', none) -%}
|
||||
{%- set raw_cluster_by = config.get('cluster_by', none) -%}
|
||||
@@ -58,13 +60,18 @@
|
||||
|
||||
{%- endmacro -%}
|
||||
|
||||
{% macro bigquery_view_options(config, node) %}
|
||||
{% set opts = adapter.get_view_options(config, node) %}
|
||||
{%- do return(bigquery_options(opts)) -%}
|
||||
{%- endmacro -%}
|
||||
|
||||
{% macro bigquery__create_view_as(relation, sql) -%}
|
||||
{%- set sql_header = config.get('sql_header', none) -%}
|
||||
|
||||
{{ sql_header if sql_header is not none }}
|
||||
|
||||
create or replace view {{ relation }}
|
||||
{{ bigquery_table_options(config, model, temporary=false) }}
|
||||
{{ bigquery_view_options(config, model) }}
|
||||
as {{ sql }};
|
||||
|
||||
{% endmacro %}
|
||||
|
||||
@@ -3,31 +3,24 @@
|
||||
{# Setup #}
|
||||
{{ run_hooks(pre_hooks) }}
|
||||
|
||||
{# there should be exactly one ref or exactly one source #}
|
||||
{% set destination = this.incorporate(type='table') %}
|
||||
|
||||
{% set dependency_type = none %}
|
||||
{% if (model.refs | length) == 1 and (model.sources | length) == 0 %}
|
||||
{% set dependency_type = 'ref' %}
|
||||
{% elif (model.refs | length) == 0 and (model.sources | length) == 1 %}
|
||||
{% set dependency_type = 'source' %}
|
||||
{% else %}
|
||||
{% set msg %}
|
||||
Expected exactly one ref or exactly one source, instead got {{ model.refs | length }} models and {{ model.sources | length }} sources.
|
||||
{% endset %}
|
||||
{% do exceptions.raise_compiler_error(msg) %}
|
||||
{% endif %}
|
||||
{# there can be several ref() or source() according to BQ copy API docs #}
|
||||
{# cycle over ref() and source() to create source tables array #}
|
||||
{% set source_array = [] %}
|
||||
{% for ref_table in model.refs %}
|
||||
{{ source_array.append(ref(*ref_table)) }}
|
||||
{% endfor %}
|
||||
|
||||
{% if dependency_type == 'ref' %}
|
||||
{% set src = ref(*model.refs[0]) %}
|
||||
{% else %}
|
||||
{% set src = source(*model.sources[0]) %}
|
||||
{% endif %}
|
||||
{% for src_table in model.sources %}
|
||||
{{ source_array.append(source(*src_table)) }}
|
||||
{% endfor %}
|
||||
|
||||
{# Call adapter's copy_table function #}
|
||||
{%- set result_str = adapter.copy_table(
|
||||
src,
|
||||
source_array,
|
||||
destination,
|
||||
config.get('copy_materialization', 'table')) -%}
|
||||
config.get('copy_materialization', default = 'table')) -%}
|
||||
|
||||
{{ store_result('main', response=result_str) }}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ except ImportError:
|
||||
|
||||
|
||||
package_name = "dbt-bigquery"
|
||||
package_version = "0.21.0b1"
|
||||
package_version = "0.21.0b2"
|
||||
description = """The bigquery adapter plugin for dbt (data build tool)"""
|
||||
|
||||
this_directory = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user