forked from repo-mirrors/dbt-core
Compare commits
25 Commits
adding-sem
...
ns/add-to-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dd53725f43 | ||
|
|
82d9b2fa87 | ||
|
|
3f96fad4f9 | ||
|
|
c2c4757a2b | ||
|
|
c65ba11ae6 | ||
|
|
b0651b13b5 | ||
|
|
a34521ec07 | ||
|
|
da47b90503 | ||
|
|
db99e2f68d | ||
|
|
cbb9117ab9 | ||
|
|
e2ccf011d9 | ||
|
|
17014bfad3 | ||
|
|
7b464b8a49 | ||
|
|
5c765bf3e2 | ||
|
|
93619a9a37 | ||
|
|
a181cee6ae | ||
|
|
3aeab73740 | ||
|
|
9801eebc58 | ||
|
|
6954c4df1b | ||
|
|
f841a7ca76 | ||
|
|
07a004b301 | ||
|
|
b05582de39 | ||
|
|
fa7c4d19f0 | ||
|
|
066346faa2 | ||
|
|
0a03355ceb |
@@ -5,8 +5,10 @@ parse = (?P<major>\d+)
|
||||
\.(?P<patch>\d+)
|
||||
((?P<prekind>a|b|rc)
|
||||
(?P<pre>\d+) # pre-release version num
|
||||
)(\.(?P<nightly>[a-z..0-9]+)
|
||||
)?
|
||||
serialize =
|
||||
{major}.{minor}.{patch}{prekind}{pre}.{nightly}
|
||||
{major}.{minor}.{patch}{prekind}{pre}
|
||||
{major}.{minor}.{patch}
|
||||
commit = False
|
||||
@@ -24,6 +26,8 @@ values =
|
||||
[bumpversion:part:pre]
|
||||
first_value = 1
|
||||
|
||||
[bumpversion:part:nightly]
|
||||
|
||||
[bumpversion:file:core/setup.py]
|
||||
|
||||
[bumpversion:file:core/dbt/version.py]
|
||||
|
||||
6
.changes/unreleased/Features-20230107-003157.yaml
Normal file
6
.changes/unreleased/Features-20230107-003157.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: Have dbt debug spit out structured json logs with flags enabled.
|
||||
time: 2023-01-07T00:31:57.516063-08:00
|
||||
custom:
|
||||
Author: versusfacit
|
||||
Issue: "5353"
|
||||
6
.changes/unreleased/Features-20230118-233801.yaml
Normal file
6
.changes/unreleased/Features-20230118-233801.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: add adapter_response to dbt test and freshness result
|
||||
time: 2023-01-18T23:38:01.857342+08:00
|
||||
custom:
|
||||
Author: aezomz
|
||||
Issue: "2964"
|
||||
6
.changes/unreleased/Features-20230120-112921.yaml
Normal file
6
.changes/unreleased/Features-20230120-112921.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: Improve error message for packages missing `dbt_project.yml`
|
||||
time: 2023-01-20T11:29:21.509967-07:00
|
||||
custom:
|
||||
Author: dbeatty10
|
||||
Issue: "6663"
|
||||
6
.changes/unreleased/Fixes-20230116-123645.yaml
Normal file
6
.changes/unreleased/Fixes-20230116-123645.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Respect quoting config for dbt.ref(), dbt.source(), and dbt.this() in dbt-py models
|
||||
time: 2023-01-16T12:36:45.63092+01:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
Issue: 6103 6619
|
||||
6
.changes/unreleased/Fixes-20230117-101342.yaml
Normal file
6
.changes/unreleased/Fixes-20230117-101342.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Provide backward compatibility for `get_merge_sql` arguments
|
||||
time: 2023-01-17T10:13:42.118336-06:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "6625"
|
||||
6
.changes/unreleased/Fixes-20230124-115837.yaml
Normal file
6
.changes/unreleased/Fixes-20230124-115837.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Include adapter_response in NodeFinished run_result log event
|
||||
time: 2023-01-24T11:58:37.74179-05:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "6703"
|
||||
6
.changes/unreleased/Fixes-20230124-141943.yaml
Normal file
6
.changes/unreleased/Fixes-20230124-141943.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Sort cli vars before hashing for partial parsing
|
||||
time: 2023-01-24T14:19:43.333628-05:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "6710"
|
||||
6
.changes/unreleased/Fixes-20230125-191739.yaml
Normal file
6
.changes/unreleased/Fixes-20230125-191739.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: '[Regression] exposure_content referenced incorrectly'
|
||||
time: 2023-01-25T19:17:39.942081-05:00
|
||||
custom:
|
||||
Author: Mathyoub
|
||||
Issue: "6738"
|
||||
6
.changes/unreleased/Under the Hood-20230113-150700.yaml
Normal file
6
.changes/unreleased/Under the Hood-20230113-150700.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Port docs tests to pytest
|
||||
time: 2023-01-13T15:07:00.477038-05:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "6573"
|
||||
7
.changes/unreleased/Under the Hood-20230120-172254.yaml
Normal file
7
.changes/unreleased/Under the Hood-20230120-172254.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Under the Hood
|
||||
body: Replaced the EmptyLine event with a more general Formatting event, and added
|
||||
a Note event.
|
||||
time: 2023-01-20T17:22:54.45828-05:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "6481"
|
||||
6
.changes/unreleased/Under the Hood-20230122-215235.yaml
Normal file
6
.changes/unreleased/Under the Hood-20230122-215235.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Small optimization on manifest parsing benefitting large DAGs
|
||||
time: 2023-01-22T21:52:35.549814+01:00
|
||||
custom:
|
||||
Author: boxysean
|
||||
Issue: "6697"
|
||||
6
.changes/unreleased/Under the Hood-20230124-153553.yaml
Normal file
6
.changes/unreleased/Under the Hood-20230124-153553.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Revised and simplified various structured logging events
|
||||
time: 2023-01-24T15:35:53.065356-05:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: 6664 6665 6666
|
||||
6
.changes/unreleased/Under the Hood-20230126-135939.yaml
Normal file
6
.changes/unreleased/Under the Hood-20230126-135939.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: ' Optimized GraphQueue to remove graph analysis bottleneck in large dags.'
|
||||
time: 2023-01-26T13:59:39.518345-05:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "6759"
|
||||
@@ -88,7 +88,7 @@ custom:
|
||||
footerFormat: |
|
||||
{{- $contributorDict := dict }}
|
||||
{{- /* any names added to this list should be all lowercase for later matching purposes */}}
|
||||
{{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }}
|
||||
{{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" "nssalian" }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $authorList := splitList " " $change.Custom.Author }}
|
||||
{{- /* loop through all authors for a single changelog */}}
|
||||
|
||||
109
.github/workflows/nightly-release.yml
vendored
Normal file
109
.github/workflows/nightly-release.yml
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
# **what?**
|
||||
# Nightly releases to GitHub and PyPI. This workflow produces the following outcome:
|
||||
# - generate and validate data for night release (commit SHA, version number, release branch);
|
||||
# - pass data to release workflow;
|
||||
# - night release will be pushed to GitHub as a draft release;
|
||||
# - night build will be pushed to test PyPI;
|
||||
#
|
||||
# **why?**
|
||||
# Ensure an automated and tested release process for nightly builds
|
||||
#
|
||||
# **when?**
|
||||
# This workflow runs on schedule or can be run manually on demand.
|
||||
|
||||
name: Nightly Test Release to GitHub and PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch: # for manual triggering
|
||||
schedule:
|
||||
- cron: 0 9 * * *
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
RELEASE_BRANCH: "main"
|
||||
|
||||
jobs:
|
||||
aggregate-release-data:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
commit_sha: ${{ steps.resolve-commit-sha.outputs.release_commit }}
|
||||
version_number: ${{ steps.nightly-release-version.outputs.number }}
|
||||
release_branch: ${{ steps.release-branch.outputs.name }}
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ env.RELEASE_BRANCH }}
|
||||
|
||||
- name: "Resolve Commit To Release"
|
||||
id: resolve-commit-sha
|
||||
run: |
|
||||
commit_sha=$(git rev-parse HEAD)
|
||||
echo "release_commit=$commit_sha" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Get Current Version Number"
|
||||
id: version-number-sources
|
||||
run: |
|
||||
current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
|
||||
echo "current_version=$current_version" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Audit Version And Parse Into Parts"
|
||||
id: semver
|
||||
uses: dbt-labs/actions/parse-semver@v1.1.0
|
||||
with:
|
||||
version: ${{ steps.version-number-sources.outputs.current_version }}
|
||||
|
||||
- name: "Get Current Date"
|
||||
id: current-date
|
||||
run: echo "date=$(date +'%m%d%Y')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Generate Nightly Release Version Number"
|
||||
id: nightly-release-version
|
||||
run: |
|
||||
number="${{ steps.semver.outputs.version }}.dev${{ steps.current-date.outputs.date }}+nightly"
|
||||
echo "number=$number" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Audit Nightly Release Version And Parse Into Parts"
|
||||
uses: dbt-labs/actions/parse-semver@v1.1.0
|
||||
with:
|
||||
version: ${{ steps.nightly-release-version.outputs.number }}
|
||||
|
||||
- name: "Set Release Branch"
|
||||
id: release-branch
|
||||
run: |
|
||||
echo "name=${{ env.RELEASE_BRANCH }}" >> $GITHUB_OUTPUT
|
||||
|
||||
log-outputs-aggregate-release-data:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [aggregate-release-data]
|
||||
|
||||
steps:
|
||||
- name: "[DEBUG] Log Outputs"
|
||||
run: |
|
||||
echo commit_sha : ${{ needs.aggregate-release-data.outputs.commit_sha }}
|
||||
echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
|
||||
echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
|
||||
|
||||
release-github-pypi:
|
||||
needs: [aggregate-release-data]
|
||||
|
||||
uses: ./.github/workflows/release.yml
|
||||
with:
|
||||
sha: ${{ needs.aggregate-release-data.outputs.commit_sha }}
|
||||
target_branch: ${{ needs.aggregate-release-data.outputs.release-branch }}
|
||||
version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
|
||||
build_script_path: "scripts/build-dist.sh"
|
||||
env_setup_script_path: "scripts/env-setup.sh"
|
||||
s3_bucket_name: "core-team-artifacts"
|
||||
package_test_command: "dbt --version"
|
||||
test_run: true
|
||||
nightly_release: true
|
||||
secrets: inherit
|
||||
2
.github/workflows/release-branch-tests.yml
vendored
2
.github/workflows/release-branch-tests.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
max-parallel: 1
|
||||
fail-fast: false
|
||||
matrix:
|
||||
branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, main]
|
||||
branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, 1.4.latest, main]
|
||||
|
||||
steps:
|
||||
- name: Call CI workflow for ${{ matrix.branch }} branch
|
||||
|
||||
340
.github/workflows/release.yml
vendored
340
.github/workflows/release.yml
vendored
@@ -1,24 +1,110 @@
|
||||
# **what?**
|
||||
# Take the given commit, run unit tests specifically on that sha, build and
|
||||
# package it, and then release to GitHub and PyPi with that specific build
|
||||
|
||||
# Release workflow provides the following steps:
|
||||
# - checkout the given commit;
|
||||
# - validate version in sources and changelog file for given version;
|
||||
# - bump the version and generate a changelog if needed;
|
||||
# - merge all changes to the target branch if needed;
|
||||
# - run unit and integration tests against given commit;
|
||||
# - build and package that SHA;
|
||||
# - release it to GitHub and PyPI with that specific build;
|
||||
#
|
||||
# **why?**
|
||||
# Ensure an automated and tested release process
|
||||
|
||||
#
|
||||
# **when?**
|
||||
# This will only run manually with a given sha and version
|
||||
# This workflow can be run manually on demand or can be called by other workflows
|
||||
|
||||
name: Release to GitHub and PyPi
|
||||
name: Release to GitHub and PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: 'The last commit sha in the release'
|
||||
required: true
|
||||
description: "The last commit sha in the release"
|
||||
type: string
|
||||
required: true
|
||||
target_branch:
|
||||
description: "The branch to release from"
|
||||
type: string
|
||||
required: true
|
||||
version_number:
|
||||
description: 'The release version number (i.e. 1.0.0b1)'
|
||||
required: true
|
||||
description: "The release version number (i.e. 1.0.0b1)"
|
||||
type: string
|
||||
required: true
|
||||
build_script_path:
|
||||
description: "Build script path"
|
||||
type: string
|
||||
default: "scripts/build-dist.sh"
|
||||
required: true
|
||||
env_setup_script_path:
|
||||
description: "Environment setup script path"
|
||||
type: string
|
||||
default: "scripts/env-setup.sh"
|
||||
required: false
|
||||
s3_bucket_name:
|
||||
description: "AWS S3 bucket name"
|
||||
type: string
|
||||
default: "core-team-artifacts"
|
||||
required: true
|
||||
package_test_command:
|
||||
description: "Package test command"
|
||||
type: string
|
||||
default: "dbt --version"
|
||||
required: true
|
||||
test_run:
|
||||
description: "Test run (Publish release as draft)"
|
||||
type: boolean
|
||||
default: true
|
||||
required: false
|
||||
nightly_release:
|
||||
description: "Nightly release to dev environment"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
workflow_call:
|
||||
inputs:
|
||||
sha:
|
||||
description: "The last commit sha in the release"
|
||||
type: string
|
||||
required: true
|
||||
target_branch:
|
||||
description: "The branch to release from"
|
||||
type: string
|
||||
required: true
|
||||
version_number:
|
||||
description: "The release version number (i.e. 1.0.0b1)"
|
||||
type: string
|
||||
required: true
|
||||
build_script_path:
|
||||
description: "Build script path"
|
||||
type: string
|
||||
default: "scripts/build-dist.sh"
|
||||
required: true
|
||||
env_setup_script_path:
|
||||
description: "Environment setup script path"
|
||||
type: string
|
||||
default: "scripts/env-setup.sh"
|
||||
required: false
|
||||
s3_bucket_name:
|
||||
description: "AWS S3 bucket name"
|
||||
type: string
|
||||
default: "core-team-artifacts"
|
||||
required: true
|
||||
package_test_command:
|
||||
description: "Package test command"
|
||||
type: string
|
||||
default: "dbt --version"
|
||||
required: true
|
||||
test_run:
|
||||
description: "Test run (Publish release as draft)"
|
||||
type: boolean
|
||||
default: true
|
||||
required: false
|
||||
nightly_release:
|
||||
description: "Nightly release to dev environment"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
@@ -28,175 +114,117 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
unit:
|
||||
name: Unit test
|
||||
|
||||
log-inputs:
|
||||
name: Log Inputs
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.inputs.sha }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install tox
|
||||
pip --version
|
||||
tox --version
|
||||
echo The last commit sha in the release: ${{ inputs.sha }}
|
||||
echo The branch to release from: ${{ inputs.target_branch }}
|
||||
echo The release version number: ${{ inputs.version_number }}
|
||||
echo Build script path: ${{ inputs.build_script_path }}
|
||||
echo Environment setup script path: ${{ inputs.env_setup_script_path }}
|
||||
echo AWS S3 bucket name: ${{ inputs.s3_bucket_name }}
|
||||
echo Package test command: ${{ inputs.package_test_command }}
|
||||
echo Test run: ${{ inputs.test_run }}
|
||||
echo Nightly release: ${{ inputs.nightly_release }}
|
||||
|
||||
- name: Run tox
|
||||
run: tox
|
||||
bump-version-generate-changelog:
|
||||
name: Bump package version, Generate changelog
|
||||
|
||||
build:
|
||||
name: build packages
|
||||
uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
|
||||
|
||||
with:
|
||||
sha: ${{ inputs.sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
target_branch: ${{ inputs.target_branch }}
|
||||
env_setup_script_path: ${{ inputs.env_setup_script_path }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
|
||||
secrets:
|
||||
FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
log-outputs-bump-version-generate-changelog:
|
||||
name: "[Log output] Bump package version, Generate changelog"
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
||||
needs: [bump-version-generate-changelog]
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
ref: ${{ github.event.inputs.sha }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
- name: Print variables
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install --upgrade setuptools wheel twine check-wheel-contents
|
||||
pip --version
|
||||
echo Final SHA : ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
echo Changelog path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
|
||||
- name: Build distributions
|
||||
run: ./scripts/build-dist.sh
|
||||
build-test-package:
|
||||
name: Build, Test, Package
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [bump-version-generate-changelog]
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
|
||||
|
||||
- name: Check distribution descriptions
|
||||
run: |
|
||||
twine check dist/*
|
||||
with:
|
||||
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
build_script_path: ${{ inputs.build_script_path }}
|
||||
s3_bucket_name: ${{ inputs.s3_bucket_name }}
|
||||
package_test_command: ${{ inputs.package_test_command }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
|
||||
- name: Check wheel contents
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: |
|
||||
dist/
|
||||
!dist/dbt-${{github.event.inputs.version_number}}.tar.gz
|
||||
|
||||
test-build:
|
||||
name: verify packages
|
||||
|
||||
needs: [build, unit]
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip install --upgrade wheel
|
||||
pip --version
|
||||
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check wheel distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
- name: Install source distributions
|
||||
run: |
|
||||
find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
dbt --version
|
||||
secrets:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.PRODUCTION_AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.PRODUCTION_AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
github-release:
|
||||
name: GitHub Release
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
|
||||
needs: test-build
|
||||
needs: [bump-version-generate-changelog, build-test-package]
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
uses: dbt-labs/dbt-release/.github/workflows/github-release.yml@main
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: '.'
|
||||
|
||||
# Need to set an output variable because env variables can't be taken as input
|
||||
# This is needed for the next step with releasing to GitHub
|
||||
- name: Find release type
|
||||
id: release_type
|
||||
env:
|
||||
IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') || contains(github.event.inputs.version_number, 'b') }}
|
||||
run: |
|
||||
echo "isPrerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Creating GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
name: dbt-core v${{github.event.inputs.version_number}}
|
||||
tag_name: v${{github.event.inputs.version_number}}
|
||||
prerelease: ${{ steps.release_type.outputs.isPrerelease }}
|
||||
target_commitish: ${{github.event.inputs.sha}}
|
||||
body: |
|
||||
[Release notes](https://github.com/dbt-labs/dbt-core/blob/main/CHANGELOG.md)
|
||||
files: |
|
||||
dbt_postgres-${{github.event.inputs.version_number}}-py3-none-any.whl
|
||||
dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
|
||||
dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
|
||||
dbt-core-${{github.event.inputs.version_number}}.tar.gz
|
||||
with:
|
||||
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
|
||||
pypi-release:
|
||||
name: Pypi release
|
||||
name: PyPI Release
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
needs: [github-release]
|
||||
|
||||
needs: github-release
|
||||
uses: dbt-labs/dbt-release/.github/workflows/pypi-release.yml@main
|
||||
|
||||
environment: PypiProd
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: dist
|
||||
path: 'dist'
|
||||
with:
|
||||
version_number: ${{ inputs.version_number }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
|
||||
- name: Publish distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@v1.4.2
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
secrets:
|
||||
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||
TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||
|
||||
slack-notification:
|
||||
name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
|
||||
needs:
|
||||
[
|
||||
bump-version-generate-changelog,
|
||||
build-test-package,
|
||||
github-release,
|
||||
pypi-release,
|
||||
]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
|
||||
with:
|
||||
status: "failure"
|
||||
|
||||
secrets:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
|
||||
|
||||
@@ -17,7 +17,6 @@ from typing import (
|
||||
Iterator,
|
||||
Set,
|
||||
)
|
||||
|
||||
import agate
|
||||
import pytz
|
||||
|
||||
@@ -54,7 +53,7 @@ from dbt.events.types import (
|
||||
CodeExecutionStatus,
|
||||
CatalogGenerationError,
|
||||
)
|
||||
from dbt.utils import filter_null_values, executor, cast_to_str
|
||||
from dbt.utils import filter_null_values, executor, cast_to_str, AttrDict
|
||||
|
||||
from dbt.adapters.base.connections import Connection, AdapterResponse
|
||||
from dbt.adapters.base.meta import AdapterMeta, available
|
||||
@@ -943,7 +942,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
context_override: Optional[Dict[str, Any]] = None,
|
||||
kwargs: Dict[str, Any] = None,
|
||||
text_only_columns: Optional[Iterable[str]] = None,
|
||||
) -> agate.Table:
|
||||
) -> AttrDict:
|
||||
"""Look macro_name up in the manifest and execute its results.
|
||||
|
||||
:param macro_name: The name of the macro to execute.
|
||||
@@ -1028,7 +1027,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
manifest=manifest,
|
||||
)
|
||||
|
||||
results = self._catalog_filter_table(table, manifest)
|
||||
results = self._catalog_filter_table(table, manifest) # type: ignore[arg-type]
|
||||
return results
|
||||
|
||||
def get_catalog(self, manifest: Manifest) -> Tuple[agate.Table, List[Exception]]:
|
||||
@@ -1060,7 +1059,7 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
loaded_at_field: str,
|
||||
filter: Optional[str],
|
||||
manifest: Optional[Manifest] = None,
|
||||
) -> Dict[str, Any]:
|
||||
) -> Tuple[AdapterResponse, Dict[str, Any]]:
|
||||
"""Calculate the freshness of sources in dbt, and return it"""
|
||||
kwargs: Dict[str, Any] = {
|
||||
"source": source,
|
||||
@@ -1069,7 +1068,8 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
}
|
||||
|
||||
# run the macro
|
||||
table = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest)
|
||||
result = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest)
|
||||
adapter_response, table = result.response, result.table # type: ignore[attr-defined]
|
||||
# now we have a 1-row table of the maximum `loaded_at_field` value and
|
||||
# the current time according to the db.
|
||||
if len(table) != 1 or len(table[0]) != 2:
|
||||
@@ -1083,11 +1083,12 @@ class BaseAdapter(metaclass=AdapterMeta):
|
||||
|
||||
snapshotted_at = _utc(table[0][1], source, loaded_at_field)
|
||||
age = (snapshotted_at - max_loaded_at).total_seconds()
|
||||
return {
|
||||
freshness = {
|
||||
"max_loaded_at": max_loaded_at,
|
||||
"snapshotted_at": snapshotted_at,
|
||||
"age": age,
|
||||
}
|
||||
return adapter_response, freshness
|
||||
|
||||
def pre_model_hook(self, config: Mapping[str, Any]) -> Any:
|
||||
"""A hook for running some operation before the model materialization
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
|
||||
import argparse
|
||||
import networkx as nx # type: ignore
|
||||
import os
|
||||
import pickle
|
||||
import sqlparse
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
|
||||
from dbt import flags
|
||||
from dbt.adapters.factory import get_adapter
|
||||
from dbt.clients import jinja
|
||||
@@ -32,6 +33,7 @@ from dbt.events.contextvars import get_node_info
|
||||
from dbt.node_types import NodeType, ModelLanguage
|
||||
from dbt.events.format import pluralize
|
||||
import dbt.tracking
|
||||
import dbt.task.list as list_task
|
||||
|
||||
graph_file_name = "graph.gpickle"
|
||||
|
||||
@@ -351,13 +353,6 @@ class Compiler:
|
||||
)
|
||||
|
||||
if node.language == ModelLanguage.python:
|
||||
# TODO could we also 'minify' this code at all? just aesthetic, not functional
|
||||
|
||||
# quoating seems like something very specific to sql so far
|
||||
# for all python implementations we are seeing there's no quating.
|
||||
# TODO try to find better way to do this, given that
|
||||
original_quoting = self.config.quoting
|
||||
self.config.quoting = {key: False for key in original_quoting.keys()}
|
||||
context = self._create_node_context(node, manifest, extra_context)
|
||||
|
||||
postfix = jinja.get_rendered(
|
||||
@@ -367,8 +362,6 @@ class Compiler:
|
||||
)
|
||||
# we should NOT jinja render the python model's 'raw code'
|
||||
node.compiled_code = f"{node.raw_code}\n\n{postfix}"
|
||||
# restore quoting settings in the end since context is lazy evaluated
|
||||
self.config.quoting = original_quoting
|
||||
|
||||
else:
|
||||
context = self._create_node_context(node, manifest, extra_context)
|
||||
@@ -482,7 +475,13 @@ class Compiler:
|
||||
|
||||
if write:
|
||||
self.write_graph_file(linker, manifest)
|
||||
print_compile_stats(stats)
|
||||
|
||||
# Do not print these for ListTask's
|
||||
if not (
|
||||
self.config.args.__class__ == argparse.Namespace
|
||||
and self.config.args.cls == list_task.ListTask
|
||||
):
|
||||
print_compile_stats(stats)
|
||||
|
||||
return Graph(linker.graph)
|
||||
|
||||
|
||||
@@ -75,6 +75,11 @@ Validator Error:
|
||||
{error}
|
||||
"""
|
||||
|
||||
MISSING_DBT_PROJECT_ERROR = """\
|
||||
No dbt_project.yml found at expected path {path}
|
||||
Verify that each entry within packages.yml (and their transitive dependencies) contains a file named dbt_project.yml
|
||||
"""
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class IsFQNResource(Protocol):
|
||||
@@ -163,9 +168,7 @@ def _raw_project_from(project_root: str) -> Dict[str, Any]:
|
||||
|
||||
# get the project.yml contents
|
||||
if not path_exists(project_yaml_filepath):
|
||||
raise DbtProjectError(
|
||||
"no dbt_project.yml found at expected path {}".format(project_yaml_filepath)
|
||||
)
|
||||
raise DbtProjectError(MISSING_DBT_PROJECT_ERROR.format(path=project_yaml_filepath))
|
||||
|
||||
project_dict = _load_yaml(project_yaml_filepath)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from dbt.events.types import TimingInfoCollected
|
||||
from dbt.events.proto_types import RunResultMsg, TimingInfoMsg
|
||||
from dbt.events.contextvars import get_node_info
|
||||
from dbt.logger import TimingProcessor
|
||||
from dbt.utils import lowercase, cast_to_str, cast_to_int
|
||||
from dbt.utils import lowercase, cast_to_str, cast_to_int, cast_dict_to_dict_of_strings
|
||||
from dbt.dataclass_schema import dbtClassMixin, StrEnum
|
||||
|
||||
import agate
|
||||
@@ -130,7 +130,6 @@ class BaseResult(dbtClassMixin):
|
||||
return data
|
||||
|
||||
def to_msg(self):
|
||||
# TODO: add more fields
|
||||
msg = RunResultMsg()
|
||||
msg.status = str(self.status)
|
||||
msg.message = cast_to_str(self.message)
|
||||
@@ -138,7 +137,7 @@ class BaseResult(dbtClassMixin):
|
||||
msg.execution_time = self.execution_time
|
||||
msg.num_failures = cast_to_int(self.failures)
|
||||
msg.timing_info = [ti.to_msg() for ti in self.timing]
|
||||
# adapter_response
|
||||
msg.adapter_response = cast_dict_to_dict_of_strings(self.adapter_response)
|
||||
return msg
|
||||
|
||||
|
||||
|
||||
@@ -283,7 +283,7 @@ def upgrade_manifest_json(manifest: dict) -> dict:
|
||||
if "root_path" in exposure_content:
|
||||
del exposure_content["root_path"]
|
||||
for source_content in manifest.get("sources", {}).values():
|
||||
if "root_path" in exposure_content:
|
||||
if "root_path" in source_content:
|
||||
del source_content["root_path"]
|
||||
for macro_content in manifest.get("macros", {}).values():
|
||||
if "root_path" in macro_content:
|
||||
|
||||
BIN
core/dbt/docs/build/doctrees/environment.pickle
vendored
BIN
core/dbt/docs/build/doctrees/environment.pickle
vendored
Binary file not shown.
BIN
core/dbt/docs/build/doctrees/index.doctree
vendored
BIN
core/dbt/docs/build/doctrees/index.doctree
vendored
Binary file not shown.
@@ -419,7 +419,9 @@ table.footnote td {
|
||||
}
|
||||
|
||||
dl {
|
||||
margin: 0;
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
margin-top: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
|
||||
2
core/dbt/docs/build/html/_static/basic.css
vendored
2
core/dbt/docs/build/html/_static/basic.css
vendored
@@ -4,7 +4,7 @@
|
||||
*
|
||||
* Sphinx stylesheet -- basic theme.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
2
core/dbt/docs/build/html/_static/doctools.js
vendored
2
core/dbt/docs/build/html/_static/doctools.js
vendored
@@ -4,7 +4,7 @@
|
||||
*
|
||||
* Base JavaScript utilities for all Sphinx HTML documentation.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* This script contains the language-specific data used by searchtools.js,
|
||||
* namely the list of stopwords, stemmer, scorer and splitter.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
*
|
||||
* Sphinx JavaScript utilities for the full-text search.
|
||||
*
|
||||
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
|
||||
* :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
|
||||
* :license: BSD, see LICENSE for details.
|
||||
*
|
||||
*/
|
||||
|
||||
4
core/dbt/docs/build/html/genindex.html
vendored
4
core/dbt/docs/build/html/genindex.html
vendored
@@ -87,8 +87,8 @@
|
||||
©2022, dbt Labs.
|
||||
|
||||
|
|
||||
Powered by <a href="http://sphinx-doc.org/">Sphinx 6.0.0</a>
|
||||
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
||||
Powered by <a href="http://sphinx-doc.org/">Sphinx 6.1.3</a>
|
||||
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.13</a>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
6
core/dbt/docs/build/html/index.html
vendored
6
core/dbt/docs/build/html/index.html
vendored
@@ -837,8 +837,8 @@
|
||||
©2022, dbt Labs.
|
||||
|
||||
|
|
||||
Powered by <a href="http://sphinx-doc.org/">Sphinx 6.0.0</a>
|
||||
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
||||
Powered by <a href="http://sphinx-doc.org/">Sphinx 6.1.3</a>
|
||||
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.13</a>
|
||||
|
||||
|
|
||||
<a href="_sources/index.rst.txt"
|
||||
@@ -849,4 +849,4 @@
|
||||
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
4
core/dbt/docs/build/html/search.html
vendored
4
core/dbt/docs/build/html/search.html
vendored
@@ -106,8 +106,8 @@
|
||||
©2022, dbt Labs.
|
||||
|
||||
|
|
||||
Powered by <a href="http://sphinx-doc.org/">Sphinx 6.0.0</a>
|
||||
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
||||
Powered by <a href="http://sphinx-doc.org/">Sphinx 6.1.3</a>
|
||||
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.13</a>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
2
core/dbt/docs/build/html/searchindex.js
vendored
2
core/dbt/docs/build/html/searchindex.js
vendored
File diff suppressed because one or more lines are too long
@@ -3,7 +3,7 @@ from dbt.constants import METADATA_ENV_PREFIX
|
||||
from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut, EventMsg
|
||||
from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter
|
||||
from dbt.events.helpers import env_secrets, scrub_secrets
|
||||
from dbt.events.types import EmptyLine
|
||||
from dbt.events.types import Formatting
|
||||
import dbt.flags as flags
|
||||
from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing
|
||||
from functools import partial
|
||||
@@ -65,7 +65,7 @@ def _stdout_filter(
|
||||
and (not isinstance(msg.data, Cache) or log_cache_events)
|
||||
and (EventLevel(msg.info.level) != EventLevel.DEBUG or debug_mode)
|
||||
and (EventLevel(msg.info.level) == EventLevel.ERROR or not quiet_mode)
|
||||
and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine)
|
||||
and not (flags.LOG_FORMAT == "json" and type(msg.data) == Formatting)
|
||||
)
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ def _logfile_filter(log_cache_events: bool, msg: EventMsg) -> bool:
|
||||
return (
|
||||
not isinstance(msg.data, NoFile)
|
||||
and not (isinstance(msg.data, Cache) and not log_cache_events)
|
||||
and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine)
|
||||
and not (flags.LOG_FORMAT == "json" and type(msg.data) == Formatting)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1055,6 +1055,23 @@ class UnableToPartialParseMsg(betterproto.Message):
|
||||
data: "UnableToPartialParse" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StateCheckVarsHash(betterproto.Message):
|
||||
"""I025"""
|
||||
|
||||
checksum: str = betterproto.string_field(1)
|
||||
vars: str = betterproto.string_field(2)
|
||||
profile: str = betterproto.string_field(3)
|
||||
target: str = betterproto.string_field(4)
|
||||
version: str = betterproto.string_field(5)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StateCheckVarsHashMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "StateCheckVarsHash" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PartialParsingNotEnabled(betterproto.Message):
|
||||
"""I028"""
|
||||
@@ -1083,123 +1100,6 @@ class ParsedFileLoadFailedMsg(betterproto.Message):
|
||||
data: "ParsedFileLoadFailed" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserCausedJinjaRendering(betterproto.Message):
|
||||
"""I031"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserCausedJinjaRenderingMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "StaticParserCausedJinjaRendering" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UsingExperimentalParser(betterproto.Message):
|
||||
"""I032"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class UsingExperimentalParserMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "UsingExperimentalParser" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SampleFullJinjaRendering(betterproto.Message):
|
||||
"""I033"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SampleFullJinjaRenderingMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "SampleFullJinjaRendering" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserFallbackJinjaRendering(betterproto.Message):
|
||||
"""I034"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserFallbackJinjaRenderingMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "StaticParserFallbackJinjaRendering" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParsingMacroOverrideDetected(betterproto.Message):
|
||||
"""I035"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParsingMacroOverrideDetectedMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "StaticParsingMacroOverrideDetected" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserSuccess(betterproto.Message):
|
||||
"""I036"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserSuccessMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "StaticParserSuccess" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserFailure(betterproto.Message):
|
||||
"""I037"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserFailureMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "StaticParserFailure" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalParserSuccess(betterproto.Message):
|
||||
"""I038"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalParserSuccessMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "ExperimentalParserSuccess" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalParserFailure(betterproto.Message):
|
||||
"""I039"""
|
||||
|
||||
path: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalParserFailureMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "ExperimentalParserFailure" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PartialParsingEnabled(betterproto.Message):
|
||||
"""I040"""
|
||||
@@ -1408,6 +1308,34 @@ class JinjaLogWarningMsg(betterproto.Message):
|
||||
data: "JinjaLogWarning" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogInfo(betterproto.Message):
|
||||
"""I062"""
|
||||
|
||||
node_info: "NodeInfo" = betterproto.message_field(1)
|
||||
msg: str = betterproto.string_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogInfoMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "JinjaLogInfo" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogDebug(betterproto.Message):
|
||||
"""I063"""
|
||||
|
||||
node_info: "NodeInfo" = betterproto.message_field(1)
|
||||
msg: str = betterproto.string_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogDebugMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "JinjaLogDebug" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GitSparseCheckoutSubdirectory(betterproto.Message):
|
||||
"""M001"""
|
||||
@@ -1542,34 +1470,6 @@ class SelectorReportInvalidSelectorMsg(betterproto.Message):
|
||||
data: "SelectorReportInvalidSelector" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogInfo(betterproto.Message):
|
||||
"""M011"""
|
||||
|
||||
node_info: "NodeInfo" = betterproto.message_field(1)
|
||||
msg: str = betterproto.string_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogInfoMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "JinjaLogInfo" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogDebug(betterproto.Message):
|
||||
"""M012"""
|
||||
|
||||
node_info: "NodeInfo" = betterproto.message_field(1)
|
||||
msg: str = betterproto.string_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogDebugMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "JinjaLogDebug" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DepsNoPackagesFound(betterproto.Message):
|
||||
"""M013"""
|
||||
@@ -1859,19 +1759,6 @@ class SeedHeaderMsg(betterproto.Message):
|
||||
data: "SeedHeader" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SeedHeaderSeparator(betterproto.Message):
|
||||
"""Q005"""
|
||||
|
||||
len_header: int = betterproto.int32_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SeedHeaderSeparatorMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "SeedHeaderSeparator" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SQLRunnerException(betterproto.Message):
|
||||
"""Q006"""
|
||||
@@ -2511,16 +2398,16 @@ class OpenCommandMsg(betterproto.Message):
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmptyLine(betterproto.Message):
|
||||
class Formatting(betterproto.Message):
|
||||
"""Z017"""
|
||||
|
||||
pass
|
||||
msg: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmptyLineMsg(betterproto.Message):
|
||||
class FormattingMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "EmptyLine" = betterproto.message_field(2)
|
||||
data: "Formatting" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -2847,6 +2734,58 @@ class RunResultWarningMessageMsg(betterproto.Message):
|
||||
data: "RunResultWarningMessage" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugCmdOut(betterproto.Message):
|
||||
"""Z047"""
|
||||
|
||||
msg: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugCmdOutMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "DebugCmdOut" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugCmdResult(betterproto.Message):
|
||||
"""Z048"""
|
||||
|
||||
msg: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugCmdResultMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "DebugCmdResult" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ListCmdOut(betterproto.Message):
|
||||
"""Z049"""
|
||||
|
||||
msg: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ListCmdOutMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "ListCmdOut" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Note(betterproto.Message):
|
||||
"""Z050"""
|
||||
|
||||
msg: str = betterproto.string_field(1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NoteMsg(betterproto.Message):
|
||||
info: "EventInfo" = betterproto.message_field(1)
|
||||
data: "Note" = betterproto.message_field(2)
|
||||
|
||||
|
||||
@dataclass
|
||||
class IntegrationTestInfo(betterproto.Message):
|
||||
"""T001"""
|
||||
|
||||
@@ -839,7 +839,21 @@ message UnableToPartialParseMsg {
|
||||
UnableToPartialParse data = 2;
|
||||
}
|
||||
|
||||
// Skipped I025, I026, I027
|
||||
// I025
|
||||
message StateCheckVarsHash {
|
||||
string checksum = 1;
|
||||
string vars = 2;
|
||||
string profile = 3;
|
||||
string target = 4;
|
||||
string version = 5;
|
||||
}
|
||||
|
||||
message StateCheckVarsHashMsg {
|
||||
EventInfo info = 1;
|
||||
StateCheckVarsHash data = 2;
|
||||
}
|
||||
|
||||
// Skipped I026, I027
|
||||
|
||||
|
||||
// I028
|
||||
@@ -863,98 +877,7 @@ message ParsedFileLoadFailedMsg {
|
||||
ParsedFileLoadFailed data = 2;
|
||||
}
|
||||
|
||||
// Skipping I030
|
||||
|
||||
|
||||
// I031
|
||||
message StaticParserCausedJinjaRendering {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message StaticParserCausedJinjaRenderingMsg {
|
||||
EventInfo info = 1;
|
||||
StaticParserCausedJinjaRendering data = 2;
|
||||
}
|
||||
|
||||
// I032
|
||||
message UsingExperimentalParser {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message UsingExperimentalParserMsg {
|
||||
EventInfo info = 1;
|
||||
UsingExperimentalParser data = 2;
|
||||
}
|
||||
|
||||
// I033
|
||||
message SampleFullJinjaRendering {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message SampleFullJinjaRenderingMsg {
|
||||
EventInfo info = 1;
|
||||
SampleFullJinjaRendering data = 2;
|
||||
}
|
||||
|
||||
// I034
|
||||
message StaticParserFallbackJinjaRendering {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message StaticParserFallbackJinjaRenderingMsg {
|
||||
EventInfo info = 1;
|
||||
StaticParserFallbackJinjaRendering data = 2;
|
||||
}
|
||||
|
||||
// I035
|
||||
message StaticParsingMacroOverrideDetected {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message StaticParsingMacroOverrideDetectedMsg {
|
||||
EventInfo info = 1;
|
||||
StaticParsingMacroOverrideDetected data = 2;
|
||||
}
|
||||
|
||||
// I036
|
||||
message StaticParserSuccess {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message StaticParserSuccessMsg {
|
||||
EventInfo info = 1;
|
||||
StaticParserSuccess data = 2;
|
||||
}
|
||||
|
||||
// I037
|
||||
message StaticParserFailure {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message StaticParserFailureMsg {
|
||||
EventInfo info = 1;
|
||||
StaticParserFailure data = 2;
|
||||
}
|
||||
|
||||
// I038
|
||||
message ExperimentalParserSuccess {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message ExperimentalParserSuccessMsg {
|
||||
EventInfo info = 1;
|
||||
ExperimentalParserSuccess data = 2;
|
||||
}
|
||||
|
||||
// I039
|
||||
message ExperimentalParserFailure {
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message ExperimentalParserFailureMsg {
|
||||
EventInfo info = 1;
|
||||
ExperimentalParserFailure data = 2;
|
||||
}
|
||||
// Skipping I030 - I039
|
||||
|
||||
// I040
|
||||
message PartialParsingEnabled {
|
||||
@@ -1124,6 +1047,28 @@ message JinjaLogWarningMsg {
|
||||
JinjaLogWarning data = 2;
|
||||
}
|
||||
|
||||
// I062
|
||||
message JinjaLogInfo {
|
||||
NodeInfo node_info = 1;
|
||||
string msg = 2;
|
||||
}
|
||||
|
||||
message JinjaLogInfoMsg {
|
||||
EventInfo info = 1;
|
||||
JinjaLogInfo data = 2;
|
||||
}
|
||||
|
||||
// I063
|
||||
message JinjaLogDebug {
|
||||
NodeInfo node_info = 1;
|
||||
string msg = 2;
|
||||
}
|
||||
|
||||
message JinjaLogDebugMsg {
|
||||
EventInfo info = 1;
|
||||
JinjaLogDebug data = 2;
|
||||
}
|
||||
|
||||
// M - Deps generation
|
||||
|
||||
// M001
|
||||
@@ -1230,27 +1175,7 @@ message SelectorReportInvalidSelectorMsg {
|
||||
SelectorReportInvalidSelector data = 2;
|
||||
}
|
||||
|
||||
// M011
|
||||
message JinjaLogInfo {
|
||||
NodeInfo node_info = 1;
|
||||
string msg = 2;
|
||||
}
|
||||
|
||||
message JinjaLogInfoMsg {
|
||||
EventInfo info = 1;
|
||||
JinjaLogInfo data = 2;
|
||||
}
|
||||
|
||||
// M012
|
||||
message JinjaLogDebug {
|
||||
NodeInfo node_info = 1;
|
||||
string msg = 2;
|
||||
}
|
||||
|
||||
message JinjaLogDebugMsg {
|
||||
EventInfo info = 1;
|
||||
JinjaLogDebug data = 2;
|
||||
}
|
||||
// Skipped M011 and M012
|
||||
|
||||
// M013
|
||||
message DepsNoPackagesFound {
|
||||
@@ -1473,15 +1398,7 @@ message SeedHeaderMsg {
|
||||
SeedHeader data = 2;
|
||||
}
|
||||
|
||||
// Q005
|
||||
message SeedHeaderSeparator {
|
||||
int32 len_header = 1;
|
||||
}
|
||||
|
||||
message SeedHeaderSeparatorMsg {
|
||||
EventInfo info = 1;
|
||||
SeedHeaderSeparator data = 2;
|
||||
}
|
||||
// Skipped Q005
|
||||
|
||||
// Q006
|
||||
message SQLRunnerException {
|
||||
@@ -2004,12 +1921,13 @@ message OpenCommandMsg {
|
||||
}
|
||||
|
||||
// Z017
|
||||
message EmptyLine {
|
||||
message Formatting {
|
||||
string msg = 1;
|
||||
}
|
||||
|
||||
message EmptyLineMsg {
|
||||
message FormattingMsg {
|
||||
EventInfo info = 1;
|
||||
EmptyLine data = 2;
|
||||
Formatting data = 2;
|
||||
}
|
||||
|
||||
// Z018
|
||||
@@ -2258,6 +2176,46 @@ message RunResultWarningMessageMsg {
|
||||
RunResultWarningMessage data = 2;
|
||||
}
|
||||
|
||||
// Z047
|
||||
message DebugCmdOut {
|
||||
string msg = 1;
|
||||
}
|
||||
|
||||
message DebugCmdOutMsg {
|
||||
EventInfo info = 1;
|
||||
DebugCmdOut data = 2;
|
||||
}
|
||||
|
||||
// Z048
|
||||
message DebugCmdResult {
|
||||
string msg = 1;
|
||||
}
|
||||
|
||||
message DebugCmdResultMsg {
|
||||
EventInfo info = 1;
|
||||
DebugCmdResult data = 2;
|
||||
}
|
||||
|
||||
// Z049
|
||||
message ListCmdOut {
|
||||
string msg = 1;
|
||||
}
|
||||
|
||||
message ListCmdOutMsg {
|
||||
EventInfo info = 1;
|
||||
ListCmdOut data = 2;
|
||||
}
|
||||
|
||||
// Z050
|
||||
message Note {
|
||||
string msg = 1;
|
||||
}
|
||||
|
||||
message NoteMsg {
|
||||
EventInfo info = 1;
|
||||
Note data = 2;
|
||||
}
|
||||
|
||||
// T - Integration tests
|
||||
|
||||
// T001
|
||||
|
||||
@@ -843,6 +843,15 @@ class UnableToPartialParse(InfoLevel, pt.UnableToPartialParse):
|
||||
return f"Unable to do partial parsing because {self.reason}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StateCheckVarsHash(DebugLevel, pt.StateCheckVarsHash):
|
||||
def code(self):
|
||||
return "I025"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"checksum: {self.checksum}, vars: {self.vars}, profile: {self.profile}, target: {self.target}, version: {self.version}"
|
||||
|
||||
|
||||
# Skipped I025, I026, I026, I027
|
||||
|
||||
|
||||
@@ -864,90 +873,7 @@ class ParsedFileLoadFailed(DebugLevel, pt.ParsedFileLoadFailed): # noqa
|
||||
return f"Failed to load parsed file from disk at {self.path}: {self.exc}"
|
||||
|
||||
|
||||
# Skipped I030
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserCausedJinjaRendering(DebugLevel, pt.StaticParserCausedJinjaRendering):
|
||||
def code(self):
|
||||
return "I031"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1605: jinja rendering because of STATIC_PARSER flag. file: {self.path}"
|
||||
|
||||
|
||||
# TODO: Experimental/static parser uses these for testing and some may be a good use case for
|
||||
# the `TestLevel` logger once we implement it. Some will probably stay `DebugLevel`.
|
||||
@dataclass
|
||||
class UsingExperimentalParser(DebugLevel, pt.UsingExperimentalParser):
|
||||
def code(self):
|
||||
return "I032"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1610: conducting experimental parser sample on {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class SampleFullJinjaRendering(DebugLevel, pt.SampleFullJinjaRendering):
|
||||
def code(self):
|
||||
return "I033"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1611: conducting full jinja rendering sample on {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserFallbackJinjaRendering(DebugLevel, pt.StaticParserFallbackJinjaRendering):
|
||||
def code(self):
|
||||
return "I034"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1602: parser fallback to jinja rendering on {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParsingMacroOverrideDetected(DebugLevel, pt.StaticParsingMacroOverrideDetected):
|
||||
def code(self):
|
||||
return "I035"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1601: detected macro override of ref/source/config in the scope of {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserSuccess(DebugLevel, pt.StaticParserSuccess):
|
||||
def code(self):
|
||||
return "I036"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1699: static parser successfully parsed {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StaticParserFailure(DebugLevel, pt.StaticParserFailure):
|
||||
def code(self):
|
||||
return "I037"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1603: static parser failed on {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalParserSuccess(DebugLevel, pt.ExperimentalParserSuccess):
|
||||
def code(self):
|
||||
return "I038"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1698: experimental parser successfully parsed {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalParserFailure(DebugLevel, pt.ExperimentalParserFailure):
|
||||
def code(self):
|
||||
return "I039"
|
||||
|
||||
def message(self) -> str:
|
||||
return f"1604: experimental parser failed on {self.path}"
|
||||
# Skipped I030-I039
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1162,6 +1088,26 @@ class JinjaLogWarning(WarnLevel, pt.JinjaLogWarning):
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo):
|
||||
def code(self):
|
||||
return "I062"
|
||||
|
||||
def message(self) -> str:
|
||||
# This is for the log method used in macros so msg cannot be built here
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug):
|
||||
def code(self):
|
||||
return "I063"
|
||||
|
||||
def message(self) -> str:
|
||||
# This is for the log method used in macros so msg cannot be built here
|
||||
return self.msg
|
||||
|
||||
|
||||
# =======================================================
|
||||
# M - Deps generation
|
||||
# =======================================================
|
||||
@@ -1173,7 +1119,7 @@ class GitSparseCheckoutSubdirectory(DebugLevel, pt.GitSparseCheckoutSubdirectory
|
||||
return "M001"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" Subdirectory specified: {self.subdir}, using sparse checkout."
|
||||
return f"Subdirectory specified: {self.subdir}, using sparse checkout."
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1182,7 +1128,7 @@ class GitProgressCheckoutRevision(DebugLevel, pt.GitProgressCheckoutRevision):
|
||||
return "M002"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" Checking out revision {self.revision}."
|
||||
return f"Checking out revision {self.revision}."
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1218,7 +1164,7 @@ class GitProgressUpdatedCheckoutRange(DebugLevel, pt.GitProgressUpdatedCheckoutR
|
||||
return "M006"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" Updated checkout from {self.start_sha} to {self.end_sha}."
|
||||
return f"Updated checkout from {self.start_sha} to {self.end_sha}."
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1227,7 +1173,7 @@ class GitProgressCheckedOutAt(DebugLevel, pt.GitProgressCheckedOutAt):
|
||||
return "M007"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" Checked out at {self.end_sha}."
|
||||
return f"Checked out at {self.end_sha}."
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1260,26 +1206,6 @@ class SelectorReportInvalidSelector(InfoLevel, pt.SelectorReportInvalidSelector)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo):
|
||||
def code(self):
|
||||
return "M011"
|
||||
|
||||
def message(self) -> str:
|
||||
# This is for the log method used in macros so msg cannot be built here
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug):
|
||||
def code(self):
|
||||
return "M012"
|
||||
|
||||
def message(self) -> str:
|
||||
# This is for the log method used in macros so msg cannot be built here
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class DepsNoPackagesFound(InfoLevel, pt.DepsNoPackagesFound):
|
||||
def code(self):
|
||||
@@ -1304,7 +1230,7 @@ class DepsInstallInfo(InfoLevel, pt.DepsInstallInfo):
|
||||
return "M015"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" Installed from {self.version_name}"
|
||||
return f"Installed from {self.version_name}"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1313,7 +1239,7 @@ class DepsUpdateAvailable(InfoLevel, pt.DepsUpdateAvailable):
|
||||
return "M016"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" Updated version available: {self.version_latest}"
|
||||
return f"Updated version available: {self.version_latest}"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1322,7 +1248,7 @@ class DepsUpToDate(InfoLevel, pt.DepsUpToDate):
|
||||
return "M017"
|
||||
|
||||
def message(self) -> str:
|
||||
return " Up to date!"
|
||||
return "Up to date!"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1331,7 +1257,7 @@ class DepsListSubdirectory(InfoLevel, pt.DepsListSubdirectory):
|
||||
return "M018"
|
||||
|
||||
def message(self) -> str:
|
||||
return f" and subdirectory {self.subdirectory}"
|
||||
return f"and subdirectory {self.subdirectory}"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -1498,15 +1424,6 @@ class SeedHeader(InfoLevel, pt.SeedHeader):
|
||||
return self.header
|
||||
|
||||
|
||||
@dataclass
|
||||
class SeedHeaderSeparator(InfoLevel, pt.SeedHeaderSeparator):
|
||||
def code(self):
|
||||
return "Q005"
|
||||
|
||||
def message(self) -> str:
|
||||
return "-" * self.len_header
|
||||
|
||||
|
||||
@dataclass
|
||||
class SQLRunnerException(DebugLevel, pt.SQLRunnerException): # noqa
|
||||
def code(self):
|
||||
@@ -2084,13 +2001,18 @@ class OpenCommand(InfoLevel, pt.OpenCommand):
|
||||
return msg
|
||||
|
||||
|
||||
# We use events to create console output, but also think of them as a sequence of important and
|
||||
# meaningful occurrences to be used for debugging and monitoring. The Formatting event helps eases
|
||||
# the tension between these two goals by allowing empty lines, heading separators, and other
|
||||
# formatting to be written to the console, while they can be ignored for other purposes. For
|
||||
# general information that isn't simple formatting, the Note event should be used instead.
|
||||
@dataclass
|
||||
class EmptyLine(InfoLevel, pt.EmptyLine):
|
||||
class Formatting(InfoLevel, pt.Formatting):
|
||||
def code(self):
|
||||
return "Z017"
|
||||
|
||||
def message(self) -> str:
|
||||
return ""
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -2266,7 +2188,7 @@ class DepsCreatingLocalSymlink(DebugLevel, pt.DepsCreatingLocalSymlink):
|
||||
return "Z037"
|
||||
|
||||
def message(self) -> str:
|
||||
return " Creating symlink to local dependency."
|
||||
return "Creating symlink to local dependency."
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -2275,7 +2197,7 @@ class DepsSymlinkNotAvailable(DebugLevel, pt.DepsSymlinkNotAvailable):
|
||||
return "Z038"
|
||||
|
||||
def message(self) -> str:
|
||||
return " Symlinks are not available on this OS, copying dependency."
|
||||
return "Symlinks are not available on this OS, copying dependency."
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -2345,3 +2267,41 @@ class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarning
|
||||
def message(self) -> str:
|
||||
# This is the message on the result object, cannot be formatted in event
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugCmdOut(InfoLevel, pt.DebugCmdOut):
|
||||
def code(self):
|
||||
return "Z047"
|
||||
|
||||
def message(self) -> str:
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugCmdResult(InfoLevel, pt.DebugCmdResult):
|
||||
def code(self):
|
||||
return "Z048"
|
||||
|
||||
def message(self) -> str:
|
||||
return self.msg
|
||||
|
||||
|
||||
@dataclass
|
||||
class ListCmdOut(InfoLevel, pt.ListCmdOut):
|
||||
def code(self):
|
||||
return "Z049"
|
||||
|
||||
def message(self) -> str:
|
||||
return self.msg
|
||||
|
||||
|
||||
# The Note event provides a way to log messages which aren't likely to be useful as more structured events.
|
||||
# For conslole formatting text like empty lines and separator bars, use the Formatting event instead.
|
||||
@dataclass
|
||||
class Note(InfoLevel, pt.Note):
|
||||
def code(self):
|
||||
return "Z050"
|
||||
|
||||
def message(self) -> str:
|
||||
return self.msg
|
||||
|
||||
@@ -40,7 +40,7 @@ class GraphQueue:
|
||||
# store the 'score' of each node as a number. Lower is higher priority.
|
||||
self._scores = self._get_scores(self.graph)
|
||||
# populate the initial queue
|
||||
self._find_new_additions()
|
||||
self._find_new_additions(list(self.graph.nodes()))
|
||||
# awaits after task end
|
||||
self.some_task_done = threading.Condition(self.lock)
|
||||
|
||||
@@ -156,12 +156,12 @@ class GraphQueue:
|
||||
"""
|
||||
return node in self.in_progress or node in self.queued
|
||||
|
||||
def _find_new_additions(self) -> None:
|
||||
def _find_new_additions(self, candidates) -> None:
|
||||
"""Find any nodes in the graph that need to be added to the internal
|
||||
queue and add them.
|
||||
"""
|
||||
for node, in_degree in self.graph.in_degree():
|
||||
if not self._already_known(node) and in_degree == 0:
|
||||
for node in candidates:
|
||||
if self.graph.in_degree(node) == 0 and not self._already_known(node):
|
||||
self.inner.put((self._scores[node], node))
|
||||
self.queued.add(node)
|
||||
|
||||
@@ -174,8 +174,9 @@ class GraphQueue:
|
||||
"""
|
||||
with self.lock:
|
||||
self.in_progress.remove(node_id)
|
||||
successors = list(self.graph.successors(node_id))
|
||||
self.graph.remove_node(node_id)
|
||||
self._find_new_additions()
|
||||
self._find_new_additions(successors)
|
||||
self.inner.task_done()
|
||||
self.some_task_done.notify_all()
|
||||
|
||||
|
||||
@@ -12,5 +12,5 @@
|
||||
where {{ filter }}
|
||||
{% endif %}
|
||||
{% endcall %}
|
||||
{{ return(load_result('collect_freshness').table) }}
|
||||
{{ return(load_result('collect_freshness')) }}
|
||||
{% endmacro %}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%}
|
||||
{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}
|
||||
-- back compat for old kwarg name
|
||||
{% set incremental_predicates = kwargs.get('predicates', incremental_predicates) %}
|
||||
{{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }}
|
||||
{%- endmacro %}
|
||||
|
||||
{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%}
|
||||
{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}
|
||||
{%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%}
|
||||
{%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%}
|
||||
{%- set merge_update_columns = config.get('merge_update_columns') -%}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
{%- set ref_dict = {} -%}
|
||||
{%- for _ref in model.refs -%}
|
||||
{%- set resolved = ref(*_ref) -%}
|
||||
{%- do ref_dict.update({_ref | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}
|
||||
{%- do ref_dict.update({_ref | join("."): resolved | string | replace('"', '\"')}) -%}
|
||||
{%- endfor -%}
|
||||
|
||||
def ref(*args,dbt_load_df_function):
|
||||
@@ -18,7 +18,7 @@ def ref(*args,dbt_load_df_function):
|
||||
{%- set source_dict = {} -%}
|
||||
{%- for _source in model.sources -%}
|
||||
{%- set resolved = source(*_source) -%}
|
||||
{%- do source_dict.update({_source | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}
|
||||
{%- do source_dict.update({_source | join("."): resolved | string | replace('"', '\"')}) -%}
|
||||
{%- endfor -%}
|
||||
|
||||
def source(*args, dbt_load_df_function):
|
||||
@@ -33,8 +33,8 @@ def source(*args, dbt_load_df_function):
|
||||
{% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %}
|
||||
{%- for key, default in config_dbt_used -%}
|
||||
{# weird type testing with enum, would be much easier to write this logic in Python! #}
|
||||
{%- if key == 'language' -%}
|
||||
{%- set value = 'python' -%}
|
||||
{%- if key == "language" -%}
|
||||
{%- set value = "python" -%}
|
||||
{%- endif -%}
|
||||
{%- set value = model.config.get(key, default) -%}
|
||||
{%- do config_dict.update({key: value}) -%}
|
||||
@@ -62,11 +62,12 @@ class config:
|
||||
|
||||
class this:
|
||||
"""dbt.this() or dbt.this.identifier"""
|
||||
database = '{{ this.database }}'
|
||||
schema = '{{ this.schema }}'
|
||||
identifier = '{{ this.identifier }}'
|
||||
database = "{{ this.database }}"
|
||||
schema = "{{ this.schema }}"
|
||||
identifier = "{{ this.identifier }}"
|
||||
{% set this_relation_name = this | string | replace('"', '\\"') %}
|
||||
def __repr__(self):
|
||||
return '{{ this }}'
|
||||
return "{{ this_relation_name }}"
|
||||
|
||||
|
||||
class dbtObj:
|
||||
|
||||
@@ -229,15 +229,15 @@ def run_from_args(parsed):
|
||||
if task.config is not None:
|
||||
log_path = getattr(task.config, "log_path", None)
|
||||
log_manager.set_path(log_path)
|
||||
# if 'list' task: set stdout to WARN instead of INFO
|
||||
level_override = parsed.cls.pre_init_hook(parsed)
|
||||
setup_event_logger(log_path or "logs", level_override)
|
||||
setup_event_logger(log_path or "logs")
|
||||
|
||||
fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION))
|
||||
fire_event(MainReportArgs(args=args_to_dict(parsed)))
|
||||
# For the ListTask, filter out system report logs to allow piping ls output to jq, etc
|
||||
if not list_task.ListTask == parsed.cls:
|
||||
fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION))
|
||||
fire_event(MainReportArgs(args=args_to_dict(parsed)))
|
||||
|
||||
if dbt.tracking.active_user is not None: # mypy appeasement, always true
|
||||
fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state()))
|
||||
if dbt.tracking.active_user is not None: # mypy appeasement, always true
|
||||
fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state()))
|
||||
|
||||
results = None
|
||||
|
||||
@@ -486,7 +486,7 @@ def _build_snapshot_subparser(subparsers, base_subparser):
|
||||
return sub
|
||||
|
||||
|
||||
def _add_defer_argument(*subparsers):
|
||||
def _add_defer_arguments(*subparsers):
|
||||
for sub in subparsers:
|
||||
sub.add_optional_argument_inverse(
|
||||
"--defer",
|
||||
@@ -499,10 +499,6 @@ def _add_defer_argument(*subparsers):
|
||||
""",
|
||||
default=flags.DEFER_MODE,
|
||||
)
|
||||
|
||||
|
||||
def _add_favor_state_argument(*subparsers):
|
||||
for sub in subparsers:
|
||||
sub.add_optional_argument_inverse(
|
||||
"--favor-state",
|
||||
enable_help="""
|
||||
@@ -580,7 +576,7 @@ def _build_docs_generate_subparser(subparsers, base_subparser):
|
||||
Do not run "dbt compile" as part of docs generation
|
||||
""",
|
||||
)
|
||||
_add_defer_argument(generate_sub)
|
||||
_add_defer_arguments(generate_sub)
|
||||
return generate_sub
|
||||
|
||||
|
||||
@@ -1192,9 +1188,7 @@ def parse_args(args, cls=DBTArgumentParser):
|
||||
# list_sub sets up its own arguments.
|
||||
_add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub)
|
||||
# --defer
|
||||
_add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub)
|
||||
# --favor-state
|
||||
_add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub)
|
||||
_add_defer_arguments(run_sub, test_sub, build_sub, snapshot_sub, compile_sub)
|
||||
# --full-refresh
|
||||
_add_table_mutability_arguments(run_sub, compile_sub, build_sub)
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from typing import Dict, Optional, Mapping, Callable, Any, List, Type, Union, Tu
|
||||
from itertools import chain
|
||||
import time
|
||||
from dbt.events.base_types import EventLevel
|
||||
import pprint
|
||||
|
||||
import dbt.exceptions
|
||||
import dbt.tracking
|
||||
@@ -29,6 +30,8 @@ from dbt.events.types import (
|
||||
ParsedFileLoadFailed,
|
||||
InvalidDisabledTargetInTestNode,
|
||||
NodeNotFoundOrDisabled,
|
||||
StateCheckVarsHash,
|
||||
Note,
|
||||
)
|
||||
from dbt.logger import DbtProcessState
|
||||
from dbt.node_types import NodeType
|
||||
@@ -569,6 +572,12 @@ class ManifestLoader:
|
||||
reason="config vars, config profile, or config target have changed"
|
||||
)
|
||||
)
|
||||
fire_event(
|
||||
Note(
|
||||
msg=f"previous checksum: {self.manifest.state_check.vars_hash.checksum}, current checksum: {manifest.state_check.vars_hash.checksum}"
|
||||
),
|
||||
level=EventLevel.DEBUG,
|
||||
)
|
||||
valid = False
|
||||
reparse_reason = ReparseReason.vars_changed
|
||||
if self.manifest.state_check.profile_hash != manifest.state_check.profile_hash:
|
||||
@@ -702,16 +711,28 @@ class ManifestLoader:
|
||||
# arg vars, but since any changes to that file will cause state_check
|
||||
# to not pass, it doesn't matter. If we move to more granular checking
|
||||
# of env_vars, that would need to change.
|
||||
# We are using the parsed cli_vars instead of config.args.vars, in order
|
||||
# to sort them and avoid reparsing because of ordering issues.
|
||||
stringified_cli_vars = pprint.pformat(config.cli_vars)
|
||||
vars_hash = FileHash.from_contents(
|
||||
"\x00".join(
|
||||
[
|
||||
getattr(config.args, "vars", "{}") or "{}",
|
||||
stringified_cli_vars,
|
||||
getattr(config.args, "profile", "") or "",
|
||||
getattr(config.args, "target", "") or "",
|
||||
__version__,
|
||||
]
|
||||
)
|
||||
)
|
||||
fire_event(
|
||||
StateCheckVarsHash(
|
||||
checksum=vars_hash.checksum,
|
||||
vars=stringified_cli_vars,
|
||||
profile=config.args.profile,
|
||||
target=config.args.target,
|
||||
version=__version__,
|
||||
)
|
||||
)
|
||||
|
||||
# Create a FileHash of the env_vars in the project
|
||||
key_list = list(config.project_env_vars.keys())
|
||||
|
||||
@@ -1,19 +1,10 @@
|
||||
from copy import deepcopy
|
||||
from dbt.context.context_config import ContextConfig
|
||||
from dbt.contracts.graph.nodes import ModelNode
|
||||
import dbt.flags as flags
|
||||
from dbt.events.base_types import EventLevel
|
||||
from dbt.events.types import Note
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
StaticParserCausedJinjaRendering,
|
||||
UsingExperimentalParser,
|
||||
SampleFullJinjaRendering,
|
||||
StaticParserFallbackJinjaRendering,
|
||||
StaticParsingMacroOverrideDetected,
|
||||
StaticParserSuccess,
|
||||
StaticParserFailure,
|
||||
ExperimentalParserSuccess,
|
||||
ExperimentalParserFailure,
|
||||
)
|
||||
import dbt.flags as flags
|
||||
from dbt.node_types import NodeType, ModelLanguage
|
||||
from dbt.parser.base import SimpleSQLParser
|
||||
from dbt.parser.search import FileBlock
|
||||
@@ -261,7 +252,10 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
elif not flags.STATIC_PARSER:
|
||||
# jinja rendering
|
||||
super().render_update(node, config)
|
||||
fire_event(StaticParserCausedJinjaRendering(path=node.path))
|
||||
fire_event(
|
||||
Note(f"1605: jinja rendering because of STATIC_PARSER flag. file: {node.path}"),
|
||||
EventLevel.DEBUG,
|
||||
)
|
||||
return
|
||||
|
||||
# only sample for experimental parser correctness on normal runs,
|
||||
@@ -295,7 +289,10 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
|
||||
# sample the experimental parser only during a normal run
|
||||
if exp_sample and not flags.USE_EXPERIMENTAL_PARSER:
|
||||
fire_event(UsingExperimentalParser(path=node.path))
|
||||
fire_event(
|
||||
Note(f"1610: conducting experimental parser sample on {node.path}"),
|
||||
EventLevel.DEBUG,
|
||||
)
|
||||
experimental_sample = self.run_experimental_parser(node)
|
||||
# if the experimental parser succeeded, make a full copy of model parser
|
||||
# and populate _everything_ into it so it can be compared apples-to-apples
|
||||
@@ -325,7 +322,10 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
# sampling rng here, but the effect would be the same since we would only roll
|
||||
# it 40% of the time. So I've opted to keep all the rng code colocated above.
|
||||
if stable_sample and not flags.USE_EXPERIMENTAL_PARSER:
|
||||
fire_event(SampleFullJinjaRendering(path=node.path))
|
||||
fire_event(
|
||||
Note(f"1611: conducting full jinja rendering sample on {node.path}"),
|
||||
EventLevel.DEBUG,
|
||||
)
|
||||
# if this will _never_ mutate anything `self` we could avoid these deep copies,
|
||||
# but we can't really guarantee that going forward.
|
||||
model_parser_copy = self.partial_deepcopy()
|
||||
@@ -360,7 +360,9 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
else:
|
||||
# jinja rendering
|
||||
super().render_update(node, config)
|
||||
fire_event(StaticParserFallbackJinjaRendering(path=node.path))
|
||||
fire_event(
|
||||
Note(f"1602: parser fallback to jinja rendering on {node.path}"), EventLevel.DEBUG
|
||||
)
|
||||
|
||||
# if sampling, add the correct messages for tracking
|
||||
if exp_sample and isinstance(experimental_sample, str):
|
||||
@@ -396,19 +398,26 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
# this log line is used for integration testing. If you change
|
||||
# the code at the beginning of the line change the tests in
|
||||
# test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
|
||||
fire_event(StaticParsingMacroOverrideDetected(path=node.path))
|
||||
fire_event(
|
||||
Note(
|
||||
f"1601: detected macro override of ref/source/config in the scope of {node.path}"
|
||||
),
|
||||
EventLevel.DEBUG,
|
||||
)
|
||||
return "has_banned_macro"
|
||||
|
||||
# run the stable static parser and return the results
|
||||
try:
|
||||
statically_parsed = py_extract_from_source(node.raw_code)
|
||||
fire_event(StaticParserSuccess(path=node.path))
|
||||
fire_event(
|
||||
Note(f"1699: static parser successfully parsed {node.path}"), EventLevel.DEBUG
|
||||
)
|
||||
return _shift_sources(statically_parsed)
|
||||
# if we want information on what features are barring the static
|
||||
# parser from reading model files, this is where we would add that
|
||||
# since that information is stored in the `ExtractionError`.
|
||||
except ExtractionError:
|
||||
fire_event(StaticParserFailure(path=node.path))
|
||||
fire_event(Note(f"1603: static parser failed on {node.path}"), EventLevel.DEBUG)
|
||||
return "cannot_parse"
|
||||
|
||||
def run_experimental_parser(
|
||||
@@ -419,7 +428,12 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
# this log line is used for integration testing. If you change
|
||||
# the code at the beginning of the line change the tests in
|
||||
# test/integration/072_experimental_parser_tests/test_all_experimental_parser.py
|
||||
fire_event(StaticParsingMacroOverrideDetected(path=node.path))
|
||||
fire_event(
|
||||
Note(
|
||||
f"1601: detected macro override of ref/source/config in the scope of {node.path}"
|
||||
),
|
||||
EventLevel.DEBUG,
|
||||
)
|
||||
return "has_banned_macro"
|
||||
|
||||
# run the experimental parser and return the results
|
||||
@@ -428,13 +442,16 @@ class ModelParser(SimpleSQLParser[ModelNode]):
|
||||
# experimental features. Change `py_extract_from_source` to the new
|
||||
# experimental call when we add additional features.
|
||||
experimentally_parsed = py_extract_from_source(node.raw_code)
|
||||
fire_event(ExperimentalParserSuccess(path=node.path))
|
||||
fire_event(
|
||||
Note(f"1698: experimental parser successfully parsed {node.path}"),
|
||||
EventLevel.DEBUG,
|
||||
)
|
||||
return _shift_sources(experimentally_parsed)
|
||||
# if we want information on what features are barring the experimental
|
||||
# parser from reading model files, this is where we would add that
|
||||
# since that information is stored in the `ExtractionError`.
|
||||
except ExtractionError:
|
||||
fire_event(ExperimentalParserFailure(path=node.path))
|
||||
fire_event(Note(f"1604: experimental parser failed on {node.path}"), EventLevel.DEBUG)
|
||||
return "cannot_parse"
|
||||
|
||||
# checks for banned macros
|
||||
|
||||
@@ -83,6 +83,7 @@ class CompileTask(GraphRunnableTask):
|
||||
adapter=adapter,
|
||||
other=deferred_manifest,
|
||||
selected=selected_uids,
|
||||
favor_state=bool(self.args.favor_state),
|
||||
)
|
||||
# TODO: is it wrong to write the manifest here? I think it's right...
|
||||
self.write_manifest()
|
||||
|
||||
@@ -5,7 +5,11 @@ import sys
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import OpenCommand
|
||||
from dbt.events.types import (
|
||||
OpenCommand,
|
||||
DebugCmdOut,
|
||||
DebugCmdResult,
|
||||
)
|
||||
from dbt import flags
|
||||
import dbt.clients.system
|
||||
import dbt.exceptions
|
||||
@@ -99,25 +103,25 @@ class DebugTask(BaseTask):
|
||||
return not self.any_failure
|
||||
|
||||
version = get_installed_version().to_version_string(skip_matcher=True)
|
||||
print("dbt version: {}".format(version))
|
||||
print("python version: {}".format(sys.version.split()[0]))
|
||||
print("python path: {}".format(sys.executable))
|
||||
print("os info: {}".format(platform.platform()))
|
||||
print("Using profiles.yml file at {}".format(self.profile_path))
|
||||
print("Using dbt_project.yml file at {}".format(self.project_path))
|
||||
print("")
|
||||
fire_event(DebugCmdOut(msg="dbt version: {}".format(version)))
|
||||
fire_event(DebugCmdOut(msg="python version: {}".format(sys.version.split()[0])))
|
||||
fire_event(DebugCmdOut(msg="python path: {}".format(sys.executable)))
|
||||
fire_event(DebugCmdOut(msg="os info: {}".format(platform.platform())))
|
||||
fire_event(DebugCmdOut(msg="Using profiles.yml file at {}".format(self.profile_path)))
|
||||
fire_event(DebugCmdOut(msg="Using dbt_project.yml file at {}".format(self.project_path)))
|
||||
self.test_configuration()
|
||||
self.test_dependencies()
|
||||
self.test_connection()
|
||||
|
||||
if self.any_failure:
|
||||
print(red(f"{(pluralize(len(self.messages), 'check'))} failed:"))
|
||||
fire_event(
|
||||
DebugCmdResult(msg=red(f"{(pluralize(len(self.messages), 'check'))} failed:"))
|
||||
)
|
||||
else:
|
||||
print(green("All checks passed!"))
|
||||
fire_event(DebugCmdResult(msg=green("All checks passed!")))
|
||||
|
||||
for message in self.messages:
|
||||
print(message)
|
||||
print("")
|
||||
fire_event(DebugCmdResult(msg=f"{message}\n"))
|
||||
|
||||
return not self.any_failure
|
||||
|
||||
@@ -273,21 +277,33 @@ class DebugTask(BaseTask):
|
||||
return green("OK found")
|
||||
|
||||
def test_dependencies(self):
|
||||
print("Required dependencies:")
|
||||
print(" - git [{}]".format(self.test_git()))
|
||||
print("")
|
||||
fire_event(DebugCmdOut(msg="Required dependencies:"))
|
||||
|
||||
logline_msg = self.test_git()
|
||||
fire_event(DebugCmdResult(msg=f" - git [{logline_msg}]\n"))
|
||||
|
||||
def test_configuration(self):
|
||||
fire_event(DebugCmdOut(msg="Configuration:"))
|
||||
|
||||
profile_status = self._load_profile()
|
||||
fire_event(DebugCmdOut(msg=f" profiles.yml file [{profile_status}]"))
|
||||
|
||||
project_status = self._load_project()
|
||||
print("Configuration:")
|
||||
print(" profiles.yml file [{}]".format(profile_status))
|
||||
print(" dbt_project.yml file [{}]".format(project_status))
|
||||
fire_event(DebugCmdOut(msg=f" dbt_project.yml file [{project_status}]"))
|
||||
|
||||
# skip profile stuff if we can't find a profile name
|
||||
if self.profile_name is not None:
|
||||
print(" profile: {} [{}]".format(self.profile_name, self._profile_found()))
|
||||
print(" target: {} [{}]".format(self.target_name, self._target_found()))
|
||||
print("")
|
||||
fire_event(
|
||||
DebugCmdOut(
|
||||
msg=" profile: {} [{}]\n".format(self.profile_name, self._profile_found())
|
||||
)
|
||||
)
|
||||
fire_event(
|
||||
DebugCmdOut(
|
||||
msg=" target: {} [{}]\n".format(self.target_name, self._target_found())
|
||||
)
|
||||
)
|
||||
|
||||
self._log_project_fail()
|
||||
self._log_profile_fail()
|
||||
|
||||
@@ -348,11 +364,12 @@ class DebugTask(BaseTask):
|
||||
def test_connection(self):
|
||||
if not self.profile:
|
||||
return
|
||||
print("Connection:")
|
||||
fire_event(DebugCmdOut(msg="Connection:"))
|
||||
for k, v in self.profile.credentials.connection_info():
|
||||
print(" {}: {}".format(k, v))
|
||||
print(" Connection test: [{}]".format(self._connection_result()))
|
||||
print("")
|
||||
fire_event(DebugCmdOut(msg=f" {k}: {v}"))
|
||||
|
||||
res = self._connection_result()
|
||||
fire_event(DebugCmdOut(msg=f" Connection test: [{res}]\n"))
|
||||
|
||||
@classmethod
|
||||
def validate_connection(cls, target_dict):
|
||||
|
||||
@@ -20,7 +20,7 @@ from dbt.events.types import (
|
||||
DepsInstallInfo,
|
||||
DepsListSubdirectory,
|
||||
DepsNotifyUpdatesAvailable,
|
||||
EmptyLine,
|
||||
Formatting,
|
||||
)
|
||||
from dbt.clients import system
|
||||
|
||||
@@ -88,7 +88,7 @@ class DepsTask(BaseTask):
|
||||
package_name=package_name, source_type=source_type, version=version
|
||||
)
|
||||
if packages_to_upgrade:
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade)))
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -105,10 +105,10 @@ class FreshnessRunner(BaseRunner):
|
||||
)
|
||||
|
||||
relation = self.adapter.Relation.create_from_source(compiled_node)
|
||||
# given a Source, calculate its fresnhess.
|
||||
# given a Source, calculate its freshness.
|
||||
with self.adapter.connection_for(compiled_node):
|
||||
self.adapter.clear_transaction()
|
||||
freshness = self.adapter.calculate_freshness(
|
||||
adapter_response, freshness = self.adapter.calculate_freshness(
|
||||
relation,
|
||||
compiled_node.loaded_at_field,
|
||||
compiled_node.freshness.filter,
|
||||
@@ -124,7 +124,7 @@ class FreshnessRunner(BaseRunner):
|
||||
timing=[],
|
||||
execution_time=0,
|
||||
message=None,
|
||||
adapter_response={},
|
||||
adapter_response=adapter_response.to_dict(omit_none=True),
|
||||
failures=None,
|
||||
**freshness,
|
||||
)
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
import json
|
||||
|
||||
import dbt.flags
|
||||
|
||||
from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric
|
||||
from dbt.graph import ResourceTypeSelector
|
||||
from dbt.task.runnable import GraphRunnableTask, ManifestTask
|
||||
from dbt.task.test import TestSelector
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.events.functions import warn_or_error
|
||||
from dbt.events.types import NoNodesSelected
|
||||
from dbt.events.functions import (
|
||||
fire_event,
|
||||
warn_or_error,
|
||||
)
|
||||
from dbt.events.types import (
|
||||
NoNodesSelected,
|
||||
ListCmdOut,
|
||||
)
|
||||
from dbt.exceptions import DbtRuntimeError, DbtInternalError
|
||||
from dbt.logger import log_manager
|
||||
from dbt.events.eventmgr import EventLevel
|
||||
|
||||
|
||||
class ListTask(GraphRunnableTask):
|
||||
@@ -50,20 +56,6 @@ class ListTask(GraphRunnableTask):
|
||||
'"models" and "resource_type" are mutually exclusive ' "arguments"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def pre_init_hook(cls, args):
|
||||
"""A hook called before the task is initialized."""
|
||||
# Filter out all INFO-level logging to allow piping ls output to jq, etc
|
||||
# WARN level will still include all warnings + errors
|
||||
# Do this by:
|
||||
# - returning the log level so that we can pass it into the 'level_override'
|
||||
# arg of events.functions.setup_event_logger() -- good!
|
||||
# - mutating the initialized, not-yet-configured STDOUT event logger
|
||||
# because it's being configured too late -- bad! TODO refactor!
|
||||
log_manager.stderr_console()
|
||||
super().pre_init_hook(args)
|
||||
return EventLevel.WARN
|
||||
|
||||
def _iterate_selected_nodes(self):
|
||||
selector = self.get_node_selector()
|
||||
spec = self.get_selection_spec()
|
||||
@@ -148,9 +140,14 @@ class ListTask(GraphRunnableTask):
|
||||
return self.output_results(generator())
|
||||
|
||||
def output_results(self, results):
|
||||
"""Log, or output a plain, newline-delimited, and ready-to-pipe list of nodes found."""
|
||||
for result in results:
|
||||
self.node_results.append(result)
|
||||
print(result)
|
||||
if dbt.flags.LOG_FORMAT == "json":
|
||||
fire_event(ListCmdOut(msg=result))
|
||||
else:
|
||||
# Cleaner to leave as print than to mutate the logger not to print timestamps.
|
||||
print(result)
|
||||
return self.node_results
|
||||
|
||||
@property
|
||||
|
||||
@@ -5,7 +5,7 @@ from dbt.logger import (
|
||||
)
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
EmptyLine,
|
||||
Formatting,
|
||||
RunResultWarning,
|
||||
RunResultWarningMessage,
|
||||
RunResultFailure,
|
||||
@@ -72,14 +72,14 @@ def print_run_status_line(results) -> None:
|
||||
stats["total"] += 1
|
||||
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(StatsLine(stats=stats))
|
||||
|
||||
|
||||
def print_run_result_error(result, newline: bool = True, is_warning: bool = False) -> None:
|
||||
if newline:
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
|
||||
if result.status == NodeStatus.Fail or (is_warning and result.status == NodeStatus.Warn):
|
||||
if is_warning:
|
||||
@@ -109,12 +109,12 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals
|
||||
|
||||
if result.node.build_path is not None:
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(SQLCompiledPath(path=result.node.compiled_path))
|
||||
|
||||
if result.node.should_store_failures:
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(CheckNodeTestFailure(relation_name=result.node.relation_name))
|
||||
|
||||
elif result.message is not None:
|
||||
@@ -143,7 +143,7 @@ def print_run_end_messages(results, keyboard_interrupt: bool = False) -> None:
|
||||
|
||||
with DbtStatusMessage(), InvocationProcessor():
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(
|
||||
EndOfRunSummary(
|
||||
num_errors=len(errors),
|
||||
|
||||
@@ -30,7 +30,7 @@ from dbt.exceptions import (
|
||||
from dbt.events.functions import fire_event, get_invocation_id
|
||||
from dbt.events.types import (
|
||||
DatabaseErrorRunningHook,
|
||||
EmptyLine,
|
||||
Formatting,
|
||||
HooksRunning,
|
||||
FinishedRunningStats,
|
||||
LogModelResult,
|
||||
@@ -335,7 +335,7 @@ class RunTask(CompileTask):
|
||||
num_hooks = len(ordered_hooks)
|
||||
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(HooksRunning(num_hooks=num_hooks, hook_type=hook_type))
|
||||
|
||||
startctx = TimestampNamed("node_started_at")
|
||||
@@ -388,7 +388,7 @@ class RunTask(CompileTask):
|
||||
self._total_executed += len(ordered_hooks)
|
||||
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
|
||||
def safe_run_hooks(
|
||||
self, adapter, hook_type: RunHookType, extra_context: Dict[str, Any]
|
||||
@@ -419,7 +419,7 @@ class RunTask(CompileTask):
|
||||
execution = utils.humanize_execution_time(execution_time=execution_time)
|
||||
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(
|
||||
FinishedRunningStats(
|
||||
stat_line=stat_line, execution=execution, execution_time=execution_time
|
||||
|
||||
@@ -28,7 +28,7 @@ from dbt.logger import (
|
||||
)
|
||||
from dbt.events.functions import fire_event, warn_or_error
|
||||
from dbt.events.types import (
|
||||
EmptyLine,
|
||||
Formatting,
|
||||
LogCancelLine,
|
||||
DefaultSelector,
|
||||
NodeStart,
|
||||
@@ -377,7 +377,7 @@ class GraphRunnableTask(ManifestTask):
|
||||
)
|
||||
)
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
|
||||
pool = ThreadPool(num_threads)
|
||||
try:
|
||||
@@ -458,7 +458,7 @@ class GraphRunnableTask(ManifestTask):
|
||||
|
||||
if len(self._flattened_nodes) == 0:
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
warn_or_error(NothingToDo())
|
||||
result = self.get_result(
|
||||
results=[],
|
||||
@@ -467,7 +467,7 @@ class GraphRunnableTask(ManifestTask):
|
||||
)
|
||||
else:
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
selected_uids = frozenset(n.unique_id for n in self._flattened_nodes)
|
||||
result = self.execute_with_hooks(selected_uids)
|
||||
|
||||
|
||||
@@ -12,8 +12,7 @@ from dbt.logger import TextOnly
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
SeedHeader,
|
||||
SeedHeaderSeparator,
|
||||
EmptyLine,
|
||||
Formatting,
|
||||
LogSeedResult,
|
||||
LogStartLine,
|
||||
)
|
||||
@@ -99,13 +98,13 @@ class SeedTask(RunTask):
|
||||
|
||||
header = "Random sample of table: {}.{}".format(schema, alias)
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(SeedHeader(header=header))
|
||||
fire_event(SeedHeaderSeparator(len_header=len(header)))
|
||||
fire_event(Formatting("-" * len(header)))
|
||||
|
||||
rand_table.print_table(max_rows=10, max_columns=None)
|
||||
with TextOnly():
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
|
||||
def show_tables(self, results):
|
||||
for result in results:
|
||||
|
||||
@@ -6,7 +6,12 @@ from dbt.include.global_project import DOCS_INDEX_FILE_PATH
|
||||
from http.server import SimpleHTTPRequestHandler
|
||||
from socketserver import TCPServer
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import ServingDocsPort, ServingDocsAccessInfo, ServingDocsExitInfo, EmptyLine
|
||||
from dbt.events.types import (
|
||||
ServingDocsPort,
|
||||
ServingDocsAccessInfo,
|
||||
ServingDocsExitInfo,
|
||||
Formatting,
|
||||
)
|
||||
|
||||
from dbt.task.base import ConfiguredTask
|
||||
|
||||
@@ -22,8 +27,8 @@ class ServeTask(ConfiguredTask):
|
||||
|
||||
fire_event(ServingDocsPort(address=address, port=port))
|
||||
fire_event(ServingDocsAccessInfo(port=port))
|
||||
fire_event(EmptyLine())
|
||||
fire_event(EmptyLine())
|
||||
fire_event(Formatting(""))
|
||||
fire_event(Formatting(""))
|
||||
fire_event(ServingDocsExitInfo())
|
||||
|
||||
# mypy doesn't think SimpleHTTPRequestHandler is ok here, but it is
|
||||
|
||||
@@ -5,6 +5,7 @@ from dbt.utils import _coerce_decimal
|
||||
from dbt.events.format import pluralize
|
||||
from dbt.dataclass_schema import dbtClassMixin
|
||||
import threading
|
||||
from typing import Dict, Any
|
||||
|
||||
from .compile import CompileRunner
|
||||
from .run import RunTask
|
||||
@@ -38,6 +39,7 @@ class TestResultData(dbtClassMixin):
|
||||
failures: int
|
||||
should_warn: bool
|
||||
should_error: bool
|
||||
adapter_response: Dict[str, Any]
|
||||
|
||||
@classmethod
|
||||
def validate(cls, data):
|
||||
@@ -137,6 +139,7 @@ class TestRunner(CompileRunner):
|
||||
map(_coerce_decimal, table.rows[0]),
|
||||
)
|
||||
)
|
||||
test_result_dct["adapter_response"] = result["response"].to_dict(omit_none=True)
|
||||
TestResultData.validate(test_result_dct)
|
||||
return TestResultData.from_dict(test_result_dct)
|
||||
|
||||
@@ -171,7 +174,7 @@ class TestRunner(CompileRunner):
|
||||
thread_id=thread_id,
|
||||
execution_time=0,
|
||||
message=message,
|
||||
adapter_response={},
|
||||
adapter_response=result.adapter_response,
|
||||
failures=failures,
|
||||
)
|
||||
|
||||
|
||||
6
scripts/env-setup.sh
Normal file
6
scripts/env-setup.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
# Set environment variables required for integration tests
|
||||
echo "DBT_INVOCATION_ENV=github-actions" >> $GITHUB_ENV
|
||||
echo "DBT_TEST_USER_1=dbt_test_user_1" >> $GITHUB_ENV
|
||||
echo "DBT_TEST_USER_2=dbt_test_user_2" >> $GITHUB_ENV
|
||||
echo "DBT_TEST_USER_3=dbt_test_user_3" >> $GITHUB_ENV
|
||||
@@ -1,184 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
|
||||
import dbt.exceptions
|
||||
|
||||
class TestGoodDocsBlocks(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return 'docs_blocks_035'
|
||||
|
||||
@staticmethod
|
||||
def dir(path):
|
||||
return os.path.normpath(path)
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir("models")
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_valid_doc_ref(self):
|
||||
self.assertEqual(len(self.run_dbt()), 1)
|
||||
|
||||
self.assertTrue(os.path.exists('./target/manifest.json'))
|
||||
|
||||
with open('./target/manifest.json') as fp:
|
||||
manifest = json.load(fp)
|
||||
|
||||
model_data = manifest['nodes']['model.test.model']
|
||||
self.assertEqual(
|
||||
model_data['description'],
|
||||
'My model is just a copy of the seed'
|
||||
)
|
||||
self.assertEqual(
|
||||
{
|
||||
'name': 'id',
|
||||
'description': 'The user ID number',
|
||||
'data_type': None,
|
||||
'meta': {},
|
||||
'quote': None,
|
||||
'tags': [],
|
||||
},
|
||||
model_data['columns']['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
{
|
||||
'name': 'first_name',
|
||||
'description': "The user's first name",
|
||||
'data_type': None,
|
||||
'meta': {},
|
||||
'quote': None,
|
||||
'tags': [],
|
||||
},
|
||||
model_data['columns']['first_name']
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
{
|
||||
'name': 'last_name',
|
||||
'description': "The user's last name",
|
||||
'data_type': None,
|
||||
'meta': {},
|
||||
'quote': None,
|
||||
'tags': [],
|
||||
},
|
||||
model_data['columns']['last_name']
|
||||
)
|
||||
self.assertEqual(len(model_data['columns']), 3)
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_alternative_docs_path(self):
|
||||
self.use_default_project({"docs-paths": [self.dir("docs")]})
|
||||
self.assertEqual(len(self.run_dbt()), 1)
|
||||
|
||||
self.assertTrue(os.path.exists('./target/manifest.json'))
|
||||
|
||||
with open('./target/manifest.json') as fp:
|
||||
manifest = json.load(fp)
|
||||
|
||||
model_data = manifest['nodes']['model.test.model']
|
||||
self.assertEqual(
|
||||
model_data['description'],
|
||||
'Alt text about the model'
|
||||
)
|
||||
self.assertEqual(
|
||||
{
|
||||
'name': 'id',
|
||||
'description': 'The user ID number with alternative text',
|
||||
'data_type': None,
|
||||
'meta': {},
|
||||
'quote': None,
|
||||
'tags': [],
|
||||
},
|
||||
model_data['columns']['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
{
|
||||
'name': 'first_name',
|
||||
'description': "The user's first name",
|
||||
'data_type': None,
|
||||
'meta': {},
|
||||
'quote': None,
|
||||
'tags': [],
|
||||
},
|
||||
model_data['columns']['first_name']
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
{
|
||||
'name': 'last_name',
|
||||
'description': "The user's last name in this other file",
|
||||
'data_type': None,
|
||||
'meta': {},
|
||||
'quote': None,
|
||||
'tags': [],
|
||||
},
|
||||
model_data['columns']['last_name']
|
||||
)
|
||||
self.assertEqual(len(model_data['columns']), 3)
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_alternative_docs_path_missing(self):
|
||||
self.use_default_project({"docs-paths": [self.dir("not-docs")]})
|
||||
with self.assertRaises(dbt.exceptions.CompilationError):
|
||||
self.run_dbt()
|
||||
|
||||
|
||||
class TestMissingDocsBlocks(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return 'docs_blocks_035'
|
||||
|
||||
@staticmethod
|
||||
def dir(path):
|
||||
return os.path.normpath(path)
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir("missing_docs_models")
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_missing_doc_ref(self):
|
||||
# The run should fail since we could not find the docs reference.
|
||||
with self.assertRaises(dbt.exceptions.CompilationError):
|
||||
self.run_dbt()
|
||||
|
||||
|
||||
class TestBadDocsBlocks(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return 'docs_blocks_035'
|
||||
|
||||
@staticmethod
|
||||
def dir(path):
|
||||
return os.path.normpath(path)
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir("invalid_name_models")
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_invalid_doc_ref(self):
|
||||
# The run should fail since we could not find the docs reference.
|
||||
with self.assertRaises(dbt.exceptions.CompilationError):
|
||||
self.run_dbt(expect_pass=False)
|
||||
|
||||
class TestDuplicateDocsBlock(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return 'docs_blocks_035'
|
||||
|
||||
@staticmethod
|
||||
def dir(path):
|
||||
return os.path.normpath(path)
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir("duplicate_docs")
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_duplicate_doc_ref(self):
|
||||
with self.assertRaises(dbt.exceptions.CompilationError):
|
||||
self.run_dbt(expect_pass=False)
|
||||
@@ -1,67 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
from dbt.adapters.factory import FACTORY
|
||||
|
||||
class TestBaseCaching(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return "caching_038"
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'quoting': {
|
||||
'identifier': False,
|
||||
'schema': False,
|
||||
}
|
||||
}
|
||||
|
||||
def run_and_get_adapter(self):
|
||||
# we want to inspect the adapter that dbt used for the run, which is
|
||||
# not self.adapter. You can't do this until after you've run dbt once.
|
||||
self.run_dbt(['run'])
|
||||
return FACTORY.adapters[self.adapter_type]
|
||||
|
||||
def cache_run(self):
|
||||
adapter = self.run_and_get_adapter()
|
||||
self.assertEqual(len(adapter.cache.relations), 1)
|
||||
relation = next(iter(adapter.cache.relations.values()))
|
||||
self.assertEqual(relation.inner.schema, self.unique_schema())
|
||||
self.assertEqual(relation.schema, self.unique_schema().lower())
|
||||
|
||||
self.run_dbt(['run'])
|
||||
self.assertEqual(len(adapter.cache.relations), 1)
|
||||
second_relation = next(iter(adapter.cache.relations.values()))
|
||||
self.assertEqual(relation, second_relation)
|
||||
|
||||
class TestCachingLowercaseModel(TestBaseCaching):
|
||||
@property
|
||||
def models(self):
|
||||
return "models"
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_cache(self):
|
||||
self.cache_run()
|
||||
|
||||
class TestCachingUppercaseModel(TestBaseCaching):
|
||||
@property
|
||||
def models(self):
|
||||
return "shouting_models"
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_cache(self):
|
||||
self.cache_run()
|
||||
|
||||
class TestCachingSelectedSchemaOnly(TestBaseCaching):
|
||||
@property
|
||||
def models(self):
|
||||
return "models_multi_schemas"
|
||||
|
||||
def run_and_get_adapter(self):
|
||||
# select only the 'model' in the default schema
|
||||
self.run_dbt(['--cache-selected-only', 'run', '--select', 'model'])
|
||||
return FACTORY.adapters[self.adapter_type]
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_cache(self):
|
||||
self.cache_run()
|
||||
@@ -1,755 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
from unittest import mock
|
||||
from unittest.mock import Mock, call
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
from pytest import mark
|
||||
|
||||
class TestInit(DBTIntegrationTest):
|
||||
def tearDown(self):
|
||||
project_name = self.get_project_name()
|
||||
|
||||
if os.path.exists(project_name):
|
||||
shutil.rmtree(project_name)
|
||||
|
||||
super().tearDown()
|
||||
|
||||
def get_project_name(self):
|
||||
return 'my_project_{}'.format(self.unique_schema())
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
return 'init_040'
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return 'models'
|
||||
|
||||
# See CT-570 / GH 5180
|
||||
@mark.skip(
|
||||
reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
|
||||
)
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_task_in_project_with_existing_profiles_yml(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
manager.confirm.side_effect = ["y"]
|
||||
manager.prompt.side_effect = [
|
||||
1,
|
||||
'localhost',
|
||||
5432,
|
||||
'test_user',
|
||||
'test_password',
|
||||
'test_db',
|
||||
'test_schema',
|
||||
4,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
self.run_dbt(['init'])
|
||||
|
||||
manager.assert_has_calls([
|
||||
call.confirm(f"The profile test already exists in {os.path.join(self.test_root_dir, 'profiles.yml')}. Continue and overwrite it?"),
|
||||
call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
|
||||
call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
|
||||
call.prompt('port', default=5432, hide_input=False, type=click.INT),
|
||||
call.prompt('user (dev username)', default=None, hide_input=False, type=None),
|
||||
call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
|
||||
call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT),
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == """config:
|
||||
send_anonymous_usage_stats: false
|
||||
test:
|
||||
outputs:
|
||||
dev:
|
||||
dbname: test_db
|
||||
host: localhost
|
||||
pass: test_password
|
||||
port: 5432
|
||||
schema: test_schema
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: test_user
|
||||
target: dev
|
||||
"""
|
||||
|
||||
# See CT-570 / GH 5180
|
||||
@mark.skip(
|
||||
reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
|
||||
)
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
@mock.patch.object(Path, 'exists', autospec=True)
|
||||
def test_postgres_init_task_in_project_without_existing_profiles_yml(self, exists, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
|
||||
def exists_side_effect(path):
|
||||
# Override responses on specific files, default to 'real world' if not overriden
|
||||
return {
|
||||
'profiles.yml': False
|
||||
}.get(path.name, os.path.exists(path))
|
||||
|
||||
exists.side_effect = exists_side_effect
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.prompt.side_effect = [
|
||||
1,
|
||||
'localhost',
|
||||
5432,
|
||||
'test_user',
|
||||
'test_password',
|
||||
'test_db',
|
||||
'test_schema',
|
||||
4,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
self.run_dbt(['init'])
|
||||
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
|
||||
call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
|
||||
call.prompt('port', default=5432, hide_input=False, type=click.INT),
|
||||
call.prompt('user (dev username)', default=None, hide_input=False, type=None),
|
||||
call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
|
||||
call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT)
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == """test:
|
||||
outputs:
|
||||
dev:
|
||||
dbname: test_db
|
||||
host: localhost
|
||||
pass: test_password
|
||||
port: 5432
|
||||
schema: test_schema
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: test_user
|
||||
target: dev
|
||||
"""
|
||||
|
||||
# See CT-570 / GH 5180
|
||||
@mark.skip(
|
||||
reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
|
||||
)
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
@mock.patch.object(Path, 'exists', autospec=True)
|
||||
def test_postgres_init_task_in_project_without_existing_profiles_yml_or_profile_template(self, exists, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
|
||||
def exists_side_effect(path):
|
||||
# Override responses on specific files, default to 'real world' if not overriden
|
||||
return {
|
||||
'profiles.yml': False,
|
||||
'profile_template.yml': False,
|
||||
}.get(path.name, os.path.exists(path))
|
||||
|
||||
exists.side_effect = exists_side_effect
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
manager.prompt.side_effect = [
|
||||
1,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
self.run_dbt(['init'])
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == """test:
|
||||
outputs:
|
||||
|
||||
dev:
|
||||
type: postgres
|
||||
threads: [1 or more]
|
||||
host: [host]
|
||||
port: [port]
|
||||
user: [dev_username]
|
||||
pass: [dev_password]
|
||||
dbname: [dbname]
|
||||
schema: [dev_schema]
|
||||
|
||||
prod:
|
||||
type: postgres
|
||||
threads: [1 or more]
|
||||
host: [host]
|
||||
port: [port]
|
||||
user: [prod_username]
|
||||
pass: [prod_password]
|
||||
dbname: [dbname]
|
||||
schema: [prod_schema]
|
||||
|
||||
target: dev
|
||||
"""
|
||||
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
@mock.patch.object(Path, 'exists', autospec=True)
|
||||
def test_postgres_init_task_in_project_with_profile_template_without_existing_profiles_yml(self, exists, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
|
||||
def exists_side_effect(path):
|
||||
# Override responses on specific files, default to 'real world' if not overriden
|
||||
return {
|
||||
'profiles.yml': False,
|
||||
}.get(path.name, os.path.exists(path))
|
||||
exists.side_effect = exists_side_effect
|
||||
|
||||
with open("profile_template.yml", 'w') as f:
|
||||
f.write("""fixed:
|
||||
type: postgres
|
||||
threads: 4
|
||||
host: localhost
|
||||
dbname: my_db
|
||||
schema: my_schema
|
||||
target: my_target
|
||||
prompts:
|
||||
target:
|
||||
hint: 'The target name'
|
||||
type: string
|
||||
port:
|
||||
hint: 'The port (for integer test purposes)'
|
||||
type: int
|
||||
default: 5432
|
||||
user:
|
||||
hint: 'Your username'
|
||||
pass:
|
||||
hint: 'Your password'
|
||||
hide_input: true""")
|
||||
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
manager.prompt.side_effect = [
|
||||
'my_target',
|
||||
5432,
|
||||
'test_username',
|
||||
'test_password'
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
self.run_dbt(['init'])
|
||||
manager.assert_has_calls([
|
||||
call.prompt('target (The target name)', default=None, hide_input=False, type=click.STRING),
|
||||
call.prompt('port (The port (for integer test purposes))', default=5432, hide_input=False, type=click.INT),
|
||||
call.prompt('user (Your username)', default=None, hide_input=False, type=None),
|
||||
call.prompt('pass (Your password)', default=None, hide_input=True, type=None)
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == """test:
|
||||
outputs:
|
||||
my_target:
|
||||
dbname: my_db
|
||||
host: localhost
|
||||
pass: test_password
|
||||
port: 5432
|
||||
schema: my_schema
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: test_username
|
||||
target: my_target
|
||||
"""
|
||||
# See CT-570 / GH 5180
|
||||
@mark.skip(
|
||||
reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
|
||||
)
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_task_in_project_with_invalid_profile_template(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
"""Test that when an invalid profile_template.yml is provided in the project,
|
||||
init command falls back to the target's profile_template.yml"""
|
||||
|
||||
with open("profile_template.yml", 'w') as f:
|
||||
f.write("""invalid template""")
|
||||
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
manager.confirm.side_effect = ["y"]
|
||||
manager.prompt.side_effect = [
|
||||
1,
|
||||
'localhost',
|
||||
5432,
|
||||
'test_username',
|
||||
'test_password',
|
||||
'test_db',
|
||||
'test_schema',
|
||||
4,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
self.run_dbt(['init'])
|
||||
|
||||
manager.assert_has_calls([
|
||||
call.confirm(f"The profile test already exists in {os.path.join(self.test_root_dir, 'profiles.yml')}. Continue and overwrite it?"),
|
||||
call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
|
||||
call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
|
||||
call.prompt('port', default=5432, hide_input=False, type=click.INT),
|
||||
call.prompt('user (dev username)', default=None, hide_input=False, type=None),
|
||||
call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
|
||||
call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT)
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == """config:
|
||||
send_anonymous_usage_stats: false
|
||||
test:
|
||||
outputs:
|
||||
dev:
|
||||
dbname: test_db
|
||||
host: localhost
|
||||
pass: test_password
|
||||
port: 5432
|
||||
schema: test_schema
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: test_username
|
||||
target: dev
|
||||
"""
|
||||
# See CT-570 / GH 5180
|
||||
@mark.skip(
|
||||
reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
|
||||
)
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_task_outside_of_project(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
|
||||
# Start by removing the dbt_project.yml so that we're not in an existing project
|
||||
os.remove('dbt_project.yml')
|
||||
|
||||
project_name = self.get_project_name()
|
||||
manager.prompt.side_effect = [
|
||||
project_name,
|
||||
1,
|
||||
'localhost',
|
||||
5432,
|
||||
'test_username',
|
||||
'test_password',
|
||||
'test_db',
|
||||
'test_schema',
|
||||
4,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
self.run_dbt(['init'])
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Enter a name for your project (letters, digits, underscore)"),
|
||||
call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
|
||||
call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
|
||||
call.prompt('port', default=5432, hide_input=False, type=click.INT),
|
||||
call.prompt('user (dev username)', default=None, hide_input=False, type=None),
|
||||
call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
|
||||
call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT),
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == f"""config:
|
||||
send_anonymous_usage_stats: false
|
||||
{project_name}:
|
||||
outputs:
|
||||
dev:
|
||||
dbname: test_db
|
||||
host: localhost
|
||||
pass: test_password
|
||||
port: 5432
|
||||
schema: test_schema
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: test_username
|
||||
target: dev
|
||||
test:
|
||||
outputs:
|
||||
default2:
|
||||
dbname: dbt
|
||||
host: localhost
|
||||
pass: password
|
||||
port: 5432
|
||||
schema: {self.unique_schema()}
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: root
|
||||
noaccess:
|
||||
dbname: dbt
|
||||
host: localhost
|
||||
pass: password
|
||||
port: 5432
|
||||
schema: {self.unique_schema()}
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: noaccess
|
||||
target: default2
|
||||
"""
|
||||
|
||||
with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
|
||||
assert f.read() == f"""
|
||||
# Name your project! Project names should contain only lowercase characters
|
||||
# and underscores. A good package name should reflect your organization's
|
||||
# name or the intended use of these models
|
||||
name: '{project_name}'
|
||||
version: '1.0.0'
|
||||
config-version: 2
|
||||
|
||||
# This setting configures which "profile" dbt uses for this project.
|
||||
profile: '{project_name}'
|
||||
|
||||
# These configurations specify where dbt should look for different types of files.
|
||||
# The `model-paths` config, for example, states that models in this project can be
|
||||
# found in the "models/" directory. You probably won't need to change these!
|
||||
model-paths: ["models"]
|
||||
analysis-paths: ["analyses"]
|
||||
test-paths: ["tests"]
|
||||
seed-paths: ["seeds"]
|
||||
macro-paths: ["macros"]
|
||||
snapshot-paths: ["snapshots"]
|
||||
|
||||
target-path: "target" # directory which will store compiled SQL files
|
||||
clean-targets: # directories to be removed by `dbt clean`
|
||||
- "target"
|
||||
- "dbt_packages"
|
||||
|
||||
|
||||
# Configuring models
|
||||
# Full documentation: https://docs.getdbt.com/docs/configuring-models
|
||||
|
||||
# In this example config, we tell dbt to build all models in the example/
|
||||
# directory as views. These settings can be overridden in the individual model
|
||||
# files using the `{{{{ config(...) }}}}` macro.
|
||||
models:
|
||||
{project_name}:
|
||||
# Config indicated by + and applies to all files under models/example/
|
||||
example:
|
||||
+materialized: view
|
||||
"""
|
||||
# See CT-570 / GH 5180
|
||||
@mark.skip(
|
||||
reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171"
|
||||
)
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_with_provided_project_name(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
|
||||
# Start by removing the dbt_project.yml so that we're not in an existing project
|
||||
os.remove('dbt_project.yml')
|
||||
|
||||
manager.prompt.side_effect = [
|
||||
1,
|
||||
'localhost',
|
||||
5432,
|
||||
'test_username',
|
||||
'test_password',
|
||||
'test_db',
|
||||
'test_schema',
|
||||
4,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
# Provide project name through the init command.
|
||||
project_name = self.get_project_name()
|
||||
self.run_dbt(['init', project_name])
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT),
|
||||
call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None),
|
||||
call.prompt('port', default=5432, hide_input=False, type=click.INT),
|
||||
call.prompt('user (dev username)', default=None, hide_input=False, type=None),
|
||||
call.prompt('pass (dev password)', default=None, hide_input=True, type=None),
|
||||
call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None),
|
||||
call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT),
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f:
|
||||
assert f.read() == f"""config:
|
||||
send_anonymous_usage_stats: false
|
||||
{project_name}:
|
||||
outputs:
|
||||
dev:
|
||||
dbname: test_db
|
||||
host: localhost
|
||||
pass: test_password
|
||||
port: 5432
|
||||
schema: test_schema
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: test_username
|
||||
target: dev
|
||||
test:
|
||||
outputs:
|
||||
default2:
|
||||
dbname: dbt
|
||||
host: localhost
|
||||
pass: password
|
||||
port: 5432
|
||||
schema: {self.unique_schema()}
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: root
|
||||
noaccess:
|
||||
dbname: dbt
|
||||
host: localhost
|
||||
pass: password
|
||||
port: 5432
|
||||
schema: {self.unique_schema()}
|
||||
threads: 4
|
||||
type: postgres
|
||||
user: noaccess
|
||||
target: default2
|
||||
"""
|
||||
|
||||
with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
|
||||
assert f.read() == f"""
|
||||
# Name your project! Project names should contain only lowercase characters
|
||||
# and underscores. A good package name should reflect your organization's
|
||||
# name or the intended use of these models
|
||||
name: '{project_name}'
|
||||
version: '1.0.0'
|
||||
config-version: 2
|
||||
|
||||
# This setting configures which "profile" dbt uses for this project.
|
||||
profile: '{project_name}'
|
||||
|
||||
# These configurations specify where dbt should look for different types of files.
|
||||
# The `model-paths` config, for example, states that models in this project can be
|
||||
# found in the "models/" directory. You probably won't need to change these!
|
||||
model-paths: ["models"]
|
||||
analysis-paths: ["analyses"]
|
||||
test-paths: ["tests"]
|
||||
seed-paths: ["seeds"]
|
||||
macro-paths: ["macros"]
|
||||
snapshot-paths: ["snapshots"]
|
||||
|
||||
target-path: "target" # directory which will store compiled SQL files
|
||||
clean-targets: # directories to be removed by `dbt clean`
|
||||
- "target"
|
||||
- "dbt_packages"
|
||||
|
||||
|
||||
# Configuring models
|
||||
# Full documentation: https://docs.getdbt.com/docs/configuring-models
|
||||
|
||||
# In this example config, we tell dbt to build all models in the example/
|
||||
# directory as views. These settings can be overridden in the individual model
|
||||
# files using the `{{{{ config(...) }}}}` macro.
|
||||
models:
|
||||
{project_name}:
|
||||
# Config indicated by + and applies to all files under models/example/
|
||||
example:
|
||||
+materialized: view
|
||||
"""
|
||||
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_invalid_project_name_cli(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
|
||||
os.remove('dbt_project.yml')
|
||||
invalid_name = 'name-with-hyphen'
|
||||
valid_name = self.get_project_name()
|
||||
manager.prompt.side_effect = [
|
||||
valid_name
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
self.run_dbt(['init', invalid_name, '-s'])
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Enter a name for your project (letters, digits, underscore)"),
|
||||
])
|
||||
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_invalid_project_name_prompt(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
|
||||
os.remove('dbt_project.yml')
|
||||
|
||||
invalid_name = 'name-with-hyphen'
|
||||
valid_name = self.get_project_name()
|
||||
manager.prompt.side_effect = [
|
||||
invalid_name, valid_name
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
self.run_dbt(['init', '-s'])
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Enter a name for your project (letters, digits, underscore)"),
|
||||
call.prompt("Enter a name for your project (letters, digits, underscore)"),
|
||||
])
|
||||
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_skip_profile_setup(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
|
||||
# Start by removing the dbt_project.yml so that we're not in an existing project
|
||||
os.remove('dbt_project.yml')
|
||||
|
||||
project_name = self.get_project_name()
|
||||
manager.prompt.side_effect = [
|
||||
project_name,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
# provide project name through the ini command
|
||||
self.run_dbt(['init', '-s'])
|
||||
manager.assert_has_calls([
|
||||
call.prompt("Enter a name for your project (letters, digits, underscore)")
|
||||
])
|
||||
|
||||
with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
|
||||
assert f.read() == f"""
|
||||
# Name your project! Project names should contain only lowercase characters
|
||||
# and underscores. A good package name should reflect your organization's
|
||||
# name or the intended use of these models
|
||||
name: '{project_name}'
|
||||
version: '1.0.0'
|
||||
config-version: 2
|
||||
|
||||
# This setting configures which "profile" dbt uses for this project.
|
||||
profile: '{project_name}'
|
||||
|
||||
# These configurations specify where dbt should look for different types of files.
|
||||
# The `model-paths` config, for example, states that models in this project can be
|
||||
# found in the "models/" directory. You probably won't need to change these!
|
||||
model-paths: ["models"]
|
||||
analysis-paths: ["analyses"]
|
||||
test-paths: ["tests"]
|
||||
seed-paths: ["seeds"]
|
||||
macro-paths: ["macros"]
|
||||
snapshot-paths: ["snapshots"]
|
||||
|
||||
target-path: "target" # directory which will store compiled SQL files
|
||||
clean-targets: # directories to be removed by `dbt clean`
|
||||
- "target"
|
||||
- "dbt_packages"
|
||||
|
||||
|
||||
# Configuring models
|
||||
# Full documentation: https://docs.getdbt.com/docs/configuring-models
|
||||
|
||||
# In this example config, we tell dbt to build all models in the example/
|
||||
# directory as views. These settings can be overridden in the individual model
|
||||
# files using the `{{{{ config(...) }}}}` macro.
|
||||
models:
|
||||
{project_name}:
|
||||
# Config indicated by + and applies to all files under models/example/
|
||||
example:
|
||||
+materialized: view
|
||||
"""
|
||||
|
||||
@use_profile('postgres')
|
||||
@mock.patch('dbt.task.init._get_adapter_plugin_names')
|
||||
@mock.patch('click.confirm')
|
||||
@mock.patch('click.prompt')
|
||||
def test_postgres_init_provided_project_name_and_skip_profile_setup(self, mock_prompt, mock_confirm, mock_get_adapter):
|
||||
manager = Mock()
|
||||
manager.attach_mock(mock_prompt, 'prompt')
|
||||
manager.attach_mock(mock_confirm, 'confirm')
|
||||
|
||||
# Start by removing the dbt_project.yml so that we're not in an existing project
|
||||
os.remove('dbt_project.yml')
|
||||
|
||||
manager.prompt.side_effect = [
|
||||
1,
|
||||
'localhost',
|
||||
5432,
|
||||
'test_username',
|
||||
'test_password',
|
||||
'test_db',
|
||||
'test_schema',
|
||||
4,
|
||||
]
|
||||
mock_get_adapter.return_value = [1]
|
||||
|
||||
# provide project name through the ini command
|
||||
project_name = self.get_project_name()
|
||||
self.run_dbt(['init', project_name, '-s'])
|
||||
manager.assert_not_called()
|
||||
|
||||
with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f:
|
||||
assert f.read() == f"""
|
||||
# Name your project! Project names should contain only lowercase characters
|
||||
# and underscores. A good package name should reflect your organization's
|
||||
# name or the intended use of these models
|
||||
name: '{project_name}'
|
||||
version: '1.0.0'
|
||||
config-version: 2
|
||||
|
||||
# This setting configures which "profile" dbt uses for this project.
|
||||
profile: '{project_name}'
|
||||
|
||||
# These configurations specify where dbt should look for different types of files.
|
||||
# The `model-paths` config, for example, states that models in this project can be
|
||||
# found in the "models/" directory. You probably won't need to change these!
|
||||
model-paths: ["models"]
|
||||
analysis-paths: ["analyses"]
|
||||
test-paths: ["tests"]
|
||||
seed-paths: ["seeds"]
|
||||
macro-paths: ["macros"]
|
||||
snapshot-paths: ["snapshots"]
|
||||
|
||||
target-path: "target" # directory which will store compiled SQL files
|
||||
clean-targets: # directories to be removed by `dbt clean`
|
||||
- "target"
|
||||
- "dbt_packages"
|
||||
|
||||
|
||||
# Configuring models
|
||||
# Full documentation: https://docs.getdbt.com/docs/configuring-models
|
||||
|
||||
# In this example config, we tell dbt to build all models in the example/
|
||||
# directory as views. These settings can be overridden in the individual model
|
||||
# files using the `{{{{ config(...) }}}}` macro.
|
||||
models:
|
||||
{project_name}:
|
||||
# Config indicated by + and applies to all files under models/example/
|
||||
example:
|
||||
+materialized: view
|
||||
"""
|
||||
@@ -1,12 +0,0 @@
|
||||
{% set col_a = '"col_a"' %}
|
||||
{% set col_b = '"col_b"' %}
|
||||
|
||||
{{config(
|
||||
materialized = 'incremental',
|
||||
unique_key = col_a,
|
||||
incremental_strategy = var('strategy')
|
||||
)}}
|
||||
|
||||
select
|
||||
{{ col_a }}, {{ col_b }}
|
||||
from {{ref('seed')}}
|
||||
@@ -1,12 +0,0 @@
|
||||
{% set col_a = '"col_A"' %}
|
||||
{% set col_b = '"col_B"' %}
|
||||
|
||||
{{config(
|
||||
materialized = 'incremental',
|
||||
unique_key = col_a,
|
||||
incremental_strategy = var('strategy')
|
||||
)}}
|
||||
|
||||
select
|
||||
{{ col_a }}, {{ col_b }}
|
||||
from {{ref('seed')}}
|
||||
@@ -1,4 +0,0 @@
|
||||
col_A,col_B
|
||||
1,2
|
||||
3,4
|
||||
5,6
|
||||
|
@@ -1,78 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
import os
|
||||
|
||||
|
||||
class BaseColumnQuotingTest(DBTIntegrationTest):
|
||||
def column_quoting(self):
|
||||
raise NotImplementedError('column_quoting not implemented')
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
return 'dbt_column_quoting_052'
|
||||
|
||||
@staticmethod
|
||||
def dir(value):
|
||||
return os.path.normpath(value)
|
||||
|
||||
def _run_columnn_quotes(self, strategy='delete+insert'):
|
||||
strategy_vars = '{{"strategy": "{}"}}'.format(strategy)
|
||||
self.run_dbt(['seed', '--vars', strategy_vars])
|
||||
self.run_dbt(['run', '--vars', strategy_vars])
|
||||
self.run_dbt(['run', '--vars', strategy_vars])
|
||||
|
||||
|
||||
class TestColumnQuotingDefault(BaseColumnQuotingTest):
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2
|
||||
}
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir('models')
|
||||
|
||||
def run_dbt(self, *args, **kwargs):
|
||||
return super().run_dbt(*args, **kwargs)
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_column_quotes(self):
|
||||
self._run_columnn_quotes()
|
||||
|
||||
|
||||
class TestColumnQuotingDisabled(BaseColumnQuotingTest):
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir('models-unquoted')
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'seeds': {
|
||||
'quote_columns': False,
|
||||
},
|
||||
}
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_column_quotes(self):
|
||||
self._run_columnn_quotes()
|
||||
|
||||
|
||||
class TestColumnQuotingEnabled(BaseColumnQuotingTest):
|
||||
@property
|
||||
def models(self):
|
||||
return self.dir('models')
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'seeds': {
|
||||
'quote_columns': True,
|
||||
},
|
||||
}
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_column_quotes(self):
|
||||
self._run_columnn_quotes()
|
||||
@@ -1,5 +0,0 @@
|
||||
-- Macro to alter a column type
|
||||
{% macro test_alter_column_type(model_name, column_name, new_column_type) %}
|
||||
{% set relation = ref(model_name) %}
|
||||
{{ alter_column_type(relation, column_name, new_column_type) }}
|
||||
{% endmacro %}
|
||||
@@ -1,9 +0,0 @@
|
||||
select
|
||||
1::smallint as smallint_col,
|
||||
2::integer as int_col,
|
||||
3::bigint as bigint_col,
|
||||
4.0::real as real_col,
|
||||
5.0::double precision as double_col,
|
||||
6.0::numeric as numeric_col,
|
||||
'7'::text as text_col,
|
||||
'8'::varchar(20) as varchar_col
|
||||
@@ -1,14 +0,0 @@
|
||||
version: 2
|
||||
models:
|
||||
- name: model
|
||||
tests:
|
||||
- is_type:
|
||||
column_map:
|
||||
smallint_col: ['integer', 'number']
|
||||
int_col: ['integer', 'number']
|
||||
bigint_col: ['integer', 'number']
|
||||
real_col: ['float', 'number']
|
||||
double_col: ['float', 'number']
|
||||
numeric_col: ['numeric', 'number']
|
||||
text_col: ['string', 'not number']
|
||||
varchar_col: ['string', 'not number']
|
||||
@@ -1,13 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
import yaml
|
||||
|
||||
|
||||
class TestAlterColumnTypes(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return '056_alter_column_types'
|
||||
|
||||
def run_and_alter_and_test(self, alter_column_type_args):
|
||||
self.assertEqual(len(self.run_dbt(['run'])), 1)
|
||||
self.run_dbt(['run-operation', 'test_alter_column_type', '--args', alter_column_type_args])
|
||||
self.assertEqual(len(self.run_dbt(['test'])), 1)
|
||||
@@ -1,22 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
|
||||
|
||||
class TestColumnTypes(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return '056_column_types'
|
||||
|
||||
def run_and_test(self):
|
||||
self.assertEqual(len(self.run_dbt(['run'])), 1)
|
||||
self.assertEqual(len(self.run_dbt(['test'])), 1)
|
||||
|
||||
|
||||
class TestPostgresColumnTypes(TestColumnTypes):
|
||||
@property
|
||||
def models(self):
|
||||
return 'pg_models'
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_column_types(self):
|
||||
self.run_and_test()
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
{{ config(materialized='ephemeral') }}
|
||||
select * from {{ ref('view_model') }}
|
||||
@@ -1,9 +0,0 @@
|
||||
version: 2
|
||||
models:
|
||||
- name: view_model
|
||||
columns:
|
||||
- name: id
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
- name: name
|
||||
@@ -1,5 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
select * from {{ ref('ephemeral_model') }}
|
||||
|
||||
-- establish a macro dependency to trigger state:modified.macros
|
||||
-- depends on: {{ my_macro() }}
|
||||
@@ -1 +0,0 @@
|
||||
select * from no.such.table
|
||||
@@ -1,2 +0,0 @@
|
||||
{{ config(materialized='ephemeral') }}
|
||||
select * from no.such.table
|
||||
@@ -1,9 +0,0 @@
|
||||
version: 2
|
||||
models:
|
||||
- name: view_model
|
||||
columns:
|
||||
- name: id
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
- name: name
|
||||
@@ -1,5 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
select * from {{ ref('ephemeral_model') }}
|
||||
|
||||
-- establish a macro dependency to trigger state:modified.macros
|
||||
-- depends on: {{ my_macro() }}
|
||||
@@ -1 +0,0 @@
|
||||
select * from no.such.table
|
||||
@@ -1,9 +0,0 @@
|
||||
version: 2
|
||||
models:
|
||||
- name: view_model
|
||||
columns:
|
||||
- name: id
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
- name: name
|
||||
@@ -1,2 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
select 1 as fun
|
||||
@@ -1 +0,0 @@
|
||||
select * from {{ ref('seed') }}
|
||||
@@ -1,13 +0,0 @@
|
||||
{# trigger infinite recursion if not handled #}
|
||||
|
||||
{% macro my_infinitely_recursive_macro() %}
|
||||
{{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }}
|
||||
{% endmacro %}
|
||||
|
||||
{% macro default__my_infinitely_recursive_macro() %}
|
||||
{% if unmet_condition %}
|
||||
{{ my_infinitely_recursive_macro() }}
|
||||
{% else %}
|
||||
{{ return('') }}
|
||||
{% endif %}
|
||||
{% endmacro %}
|
||||
@@ -1,3 +0,0 @@
|
||||
{% macro my_macro() %}
|
||||
{% do log('in a macro' ) %}
|
||||
{% endmacro %}
|
||||
@@ -1,2 +0,0 @@
|
||||
{{ config(materialized='ephemeral') }}
|
||||
select * from {{ ref('view_model') }}
|
||||
@@ -1,8 +0,0 @@
|
||||
version: 2
|
||||
exposures:
|
||||
- name: my_exposure
|
||||
type: application
|
||||
depends_on:
|
||||
- ref('view_model')
|
||||
owner:
|
||||
email: test@example.com
|
||||
@@ -1,10 +0,0 @@
|
||||
version: 2
|
||||
models:
|
||||
- name: view_model
|
||||
columns:
|
||||
- name: id
|
||||
tests:
|
||||
- unique:
|
||||
severity: error
|
||||
- not_null
|
||||
- name: name
|
||||
@@ -1,5 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
select * from {{ ref('ephemeral_model') }}
|
||||
|
||||
-- establish a macro dependency to trigger state:modified.macros
|
||||
-- depends on: {{ my_macro() }}
|
||||
@@ -1,4 +0,0 @@
|
||||
select * from {{ ref('seed') }}
|
||||
|
||||
-- establish a macro dependency that trips infinite recursion if not handled
|
||||
-- depends on: {{ my_infinitely_recursive_macro() }}
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v3.json",
|
||||
"dbt_version": "0.21.1"
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
id,name
|
||||
1,Alice
|
||||
2,Bob
|
||||
|
@@ -1,14 +0,0 @@
|
||||
{% snapshot my_cool_snapshot %}
|
||||
|
||||
{{
|
||||
config(
|
||||
target_database=database,
|
||||
target_schema=schema,
|
||||
unique_key='id',
|
||||
strategy='check',
|
||||
check_cols=['id'],
|
||||
)
|
||||
}}
|
||||
select * from {{ ref('view_model') }}
|
||||
|
||||
{% endsnapshot %}
|
||||
@@ -1,354 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
import dbt.exceptions
|
||||
|
||||
|
||||
class TestDeferState(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return "defer_state_062"
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return "models"
|
||||
|
||||
def setUp(self):
|
||||
self.other_schema = None
|
||||
super().setUp()
|
||||
self._created_schemas.add(self.other_schema)
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'seeds': {
|
||||
'test': {
|
||||
'quote_columns': False,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def get_profile(self, adapter_type):
|
||||
if self.other_schema is None:
|
||||
self.other_schema = self.unique_schema() + '_other'
|
||||
profile = super().get_profile(adapter_type)
|
||||
default_name = profile['test']['target']
|
||||
profile['test']['outputs']['otherschema'] = copy.deepcopy(profile['test']['outputs'][default_name])
|
||||
profile['test']['outputs']['otherschema']['schema'] = self.other_schema
|
||||
return profile
|
||||
|
||||
def copy_state(self):
|
||||
assert not os.path.exists('state')
|
||||
os.makedirs('state')
|
||||
shutil.copyfile('target/manifest.json', 'state/manifest.json')
|
||||
|
||||
def run_and_compile_defer(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['test'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files
|
||||
self.copy_state()
|
||||
|
||||
# defer test, it succeeds
|
||||
results, success = self.run_dbt_and_check(['compile', '--state', 'state', '--defer'])
|
||||
self.assertEqual(len(results.results), 6)
|
||||
self.assertEqual(results.results[0].node.name, "seed")
|
||||
self.assertTrue(success)
|
||||
|
||||
def run_and_snapshot_defer(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['test'])
|
||||
assert len(results) == 2
|
||||
|
||||
# snapshot succeeds without --defer
|
||||
results = self.run_dbt(['snapshot'])
|
||||
|
||||
# no state, snapshot fails
|
||||
with pytest.raises(dbt.exceptions.DbtRuntimeError):
|
||||
results = self.run_dbt(['snapshot', '--state', 'state', '--defer'])
|
||||
|
||||
# copy files
|
||||
self.copy_state()
|
||||
|
||||
# defer test, it succeeds
|
||||
results = self.run_dbt(['snapshot', '--state', 'state', '--defer'])
|
||||
|
||||
# favor_state test, it succeeds
|
||||
results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state'])
|
||||
|
||||
def run_and_defer(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['test'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
|
||||
# test tests first, because run will change things
|
||||
# no state, wrong schema, failure.
|
||||
self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False)
|
||||
|
||||
# test generate docs
|
||||
# no state, wrong schema, empty nodes
|
||||
catalog = self.run_dbt(['docs','generate','--target', 'otherschema'])
|
||||
assert not catalog.nodes
|
||||
|
||||
# no state, run also fails
|
||||
self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False)
|
||||
|
||||
# defer test, it succeeds
|
||||
results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--target', 'otherschema'])
|
||||
|
||||
# defer docs generate with state, catalog refers schema from the happy times
|
||||
catalog = self.run_dbt(['docs','generate', '-m', 'view_model+', '--state', 'state', '--defer','--target', 'otherschema'])
|
||||
assert self.other_schema not in catalog.nodes["seed.test.seed"].metadata.schema
|
||||
assert self.unique_schema() in catalog.nodes["seed.test.seed"].metadata.schema
|
||||
|
||||
# with state it should work though
|
||||
results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'])
|
||||
assert self.other_schema not in results[0].node.compiled_code
|
||||
assert self.unique_schema() in results[0].node.compiled_code
|
||||
|
||||
with open('target/manifest.json') as fp:
|
||||
data = json.load(fp)
|
||||
assert data['nodes']['seed.test.seed']['deferred']
|
||||
|
||||
assert len(results) == 1
|
||||
|
||||
def run_and_defer_favor_state(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
assert not any(r.node.deferred for r in results)
|
||||
results = self.run_dbt(['test'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
|
||||
# test tests first, because run will change things
|
||||
# no state, wrong schema, failure.
|
||||
self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False)
|
||||
|
||||
# no state, run also fails
|
||||
self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False)
|
||||
|
||||
# defer test, it succeeds
|
||||
results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'])
|
||||
|
||||
# with state it should work though
|
||||
results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'])
|
||||
assert self.other_schema not in results[0].node.compiled_code
|
||||
assert self.unique_schema() in results[0].node.compiled_code
|
||||
|
||||
with open('target/manifest.json') as fp:
|
||||
data = json.load(fp)
|
||||
assert data['nodes']['seed.test.seed']['deferred']
|
||||
|
||||
assert len(results) == 1
|
||||
|
||||
def run_switchdirs_defer(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
|
||||
self.use_default_project({'model-paths': ['changed_models']})
|
||||
# the sql here is just wrong, so it should fail
|
||||
self.run_dbt(
|
||||
['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'],
|
||||
expect_pass=False,
|
||||
)
|
||||
# but this should work since we just use the old happy model
|
||||
self.run_dbt(
|
||||
['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'],
|
||||
expect_pass=True,
|
||||
)
|
||||
|
||||
self.use_default_project({'model-paths': ['changed_models_bad']})
|
||||
# this should fail because the table model refs a broken ephemeral
|
||||
# model, which it should see
|
||||
self.run_dbt(
|
||||
['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'],
|
||||
expect_pass=False,
|
||||
)
|
||||
|
||||
def run_switchdirs_defer_favor_state(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
|
||||
self.use_default_project({'model-paths': ['changed_models']})
|
||||
# the sql here is just wrong, so it should fail
|
||||
self.run_dbt(
|
||||
['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
|
||||
expect_pass=False,
|
||||
)
|
||||
# but this should work since we just use the old happy model
|
||||
self.run_dbt(
|
||||
['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
|
||||
expect_pass=True,
|
||||
)
|
||||
|
||||
self.use_default_project({'model-paths': ['changed_models_bad']})
|
||||
# this should fail because the table model refs a broken ephemeral
|
||||
# model, which it should see
|
||||
self.run_dbt(
|
||||
['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
|
||||
expect_pass=False,
|
||||
)
|
||||
|
||||
def run_defer_iff_not_exists(self):
|
||||
results = self.run_dbt(['seed', '--target', 'otherschema'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run', '--target', 'otherschema'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run', '--state', 'state', '--defer'])
|
||||
assert len(results) == 2
|
||||
|
||||
# because the seed now exists in our schema, we shouldn't defer it
|
||||
assert self.other_schema not in results[0].node.compiled_code
|
||||
assert self.unique_schema() in results[0].node.compiled_code
|
||||
|
||||
def run_defer_iff_not_exists_favor_state(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'])
|
||||
assert len(results) == 2
|
||||
|
||||
# because the seed exists in other schema, we should defer it
|
||||
assert self.other_schema not in results[0].node.compiled_code
|
||||
assert self.unique_schema() in results[0].node.compiled_code
|
||||
|
||||
def run_defer_deleted_upstream(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
|
||||
self.use_default_project({'model-paths': ['changed_models_missing']})
|
||||
# ephemeral_model is now gone. previously this caused a
|
||||
# keyerror (dbt#2875), now it should pass
|
||||
self.run_dbt(
|
||||
['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'],
|
||||
expect_pass=True,
|
||||
)
|
||||
|
||||
# despite deferral, test should use models just created in our schema
|
||||
results = self.run_dbt(['test', '--state', 'state', '--defer'])
|
||||
assert self.other_schema not in results[0].node.compiled_code
|
||||
assert self.unique_schema() in results[0].node.compiled_code
|
||||
|
||||
def run_defer_deleted_upstream_favor_state(self):
|
||||
results = self.run_dbt(['seed'])
|
||||
assert len(results) == 1
|
||||
results = self.run_dbt(['run'])
|
||||
assert len(results) == 2
|
||||
|
||||
# copy files over from the happy times when we had a good target
|
||||
self.copy_state()
|
||||
|
||||
self.use_default_project({'model-paths': ['changed_models_missing']})
|
||||
|
||||
self.run_dbt(
|
||||
['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'],
|
||||
expect_pass=True,
|
||||
)
|
||||
|
||||
# despite deferral, test should use models just created in our schema
|
||||
results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state'])
|
||||
assert self.other_schema not in results[0].node.compiled_code
|
||||
assert self.unique_schema() in results[0].node.compiled_code
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_changetarget(self):
|
||||
self.run_and_defer()
|
||||
|
||||
# make sure these commands don't work with --defer
|
||||
with pytest.raises(SystemExit):
|
||||
self.run_dbt(['seed', '--defer'])
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_changetarget_favor_state(self):
|
||||
self.run_and_defer_favor_state()
|
||||
|
||||
# make sure these commands don't work with --defer
|
||||
with pytest.raises(SystemExit):
|
||||
self.run_dbt(['seed', '--defer'])
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_changedir(self):
|
||||
self.run_switchdirs_defer()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_changedir_favor_state(self):
|
||||
self.run_switchdirs_defer_favor_state()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_defer_iffnotexists(self):
|
||||
self.run_defer_iff_not_exists()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_defer_iffnotexists_favor_state(self):
|
||||
self.run_defer_iff_not_exists_favor_state()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_defer_deleted_upstream(self):
|
||||
self.run_defer_deleted_upstream()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_defer_deleted_upstream_favor_state(self):
|
||||
self.run_defer_deleted_upstream_favor_state()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_snapshot_defer(self):
|
||||
self.run_and_snapshot_defer()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_state_compile_defer(self):
|
||||
self.run_and_compile_defer()
|
||||
@@ -1,211 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import string
|
||||
|
||||
import pytest
|
||||
|
||||
from dbt.exceptions import CompilationError, IncompatibleSchemaError
|
||||
|
||||
|
||||
class TestModifiedState(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return "modified_state_062"
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return "models"
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'macro-paths': ['macros'],
|
||||
'seeds': {
|
||||
'test': {
|
||||
'quote_columns': True,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _symlink_test_folders(self):
|
||||
# dbt's normal symlink behavior breaks this test. Copy the files
|
||||
# so we can freely modify them.
|
||||
for entry in os.listdir(self.test_original_source_path):
|
||||
src = os.path.join(self.test_original_source_path, entry)
|
||||
tst = os.path.join(self.test_root_dir, entry)
|
||||
if entry in {'models', 'seeds', 'macros', 'previous_state'}:
|
||||
shutil.copytree(src, tst)
|
||||
elif os.path.isdir(entry) or entry.endswith('.sql'):
|
||||
os.symlink(src, tst)
|
||||
|
||||
def copy_state(self):
|
||||
assert not os.path.exists('state')
|
||||
os.makedirs('state')
|
||||
shutil.copyfile('target/manifest.json', 'state/manifest.json')
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.run_dbt(['seed'])
|
||||
self.run_dbt(['run'])
|
||||
self.copy_state()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_changed_seed_contents_state(self):
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 0
|
||||
with open('seeds/seed.csv') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
with open('seeds/seed.csv', 'a') as fp:
|
||||
fp.write(f'3,carl{newline}')
|
||||
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'state:modified+', '--state', './state'])
|
||||
assert len(results) == 7
|
||||
assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
|
||||
|
||||
shutil.rmtree('./state')
|
||||
self.copy_state()
|
||||
|
||||
with open('seeds/seed.csv', 'a') as fp:
|
||||
# assume each line is ~2 bytes + len(name)
|
||||
target_size = 1*1024*1024
|
||||
line_size = 64
|
||||
|
||||
num_lines = target_size // line_size
|
||||
|
||||
maxlines = num_lines + 4
|
||||
|
||||
for idx in range(4, maxlines):
|
||||
value = ''.join(random.choices(string.ascii_letters, k=62))
|
||||
fp.write(f'{idx},{value}{newline}')
|
||||
|
||||
# now if we run again, we should get a warning
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
with pytest.raises(CompilationError) as exc:
|
||||
self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
|
||||
assert '>1MB' in str(exc.value)
|
||||
|
||||
shutil.rmtree('./state')
|
||||
self.copy_state()
|
||||
|
||||
# once it's in path mode, we don't mark it as modified if it changes
|
||||
with open('seeds/seed.csv', 'a') as fp:
|
||||
fp.write(f'{random},test{newline}')
|
||||
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 0
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_changed_seed_config(self):
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 0
|
||||
|
||||
self.use_default_project({'seeds': {'test': {'quote_columns': False}}})
|
||||
|
||||
# quoting change -> seed changed
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_unrendered_config_same(self):
|
||||
results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 0
|
||||
|
||||
# although this is the default value, dbt will recognize it as a change
|
||||
# for previously-unconfigured models, because it's been explicitly set
|
||||
self.use_default_project({'models': {'test': {'materialized': 'view'}}})
|
||||
results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.view_model'
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_changed_model_contents(self):
|
||||
results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
with open('models/table_model.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
with open('models/table_model.sql', 'w') as fp:
|
||||
fp.write("{{ config(materialized='table') }}")
|
||||
fp.write(newline)
|
||||
fp.write("select * from {{ ref('seed') }}")
|
||||
fp.write(newline)
|
||||
|
||||
results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'table_model'
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_new_macro(self):
|
||||
with open('macros/macros.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
new_macro = '{% macro my_other_macro() %}{% endmacro %}' + newline
|
||||
|
||||
# add a new macro to a new file
|
||||
with open('macros/second_macro.sql', 'w') as fp:
|
||||
fp.write(new_macro)
|
||||
|
||||
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
os.remove('macros/second_macro.sql')
|
||||
# add a new macro to the existing file
|
||||
with open('macros/macros.sql', 'a') as fp:
|
||||
fp.write(new_macro)
|
||||
|
||||
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_changed_macro_contents(self):
|
||||
with open('macros/macros.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
# modify an existing macro
|
||||
with open('macros/macros.sql', 'w') as fp:
|
||||
fp.write("{% macro my_macro() %}")
|
||||
fp.write(newline)
|
||||
fp.write(" {% do log('in a macro', info=True) %}")
|
||||
fp.write(newline)
|
||||
fp.write('{% endmacro %}')
|
||||
fp.write(newline)
|
||||
|
||||
# table_model calls this macro
|
||||
results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_changed_exposure(self):
|
||||
with open('models/exposures.yml', 'a') as fp:
|
||||
fp.write(' name: John Doe\n')
|
||||
|
||||
results, stdout = self.run_dbt_and_capture(['run', '--models', '+state:modified', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'view_model'
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_previous_version_manifest(self):
|
||||
# This tests that a different schema version in the file throws an error
|
||||
with self.assertRaises(IncompatibleSchemaError) as exc:
|
||||
results = self.run_dbt(['ls', '-s', 'state:modified', '--state', './previous_state'])
|
||||
self.assertEqual(exc.CODE, 10014)
|
||||
@@ -1,434 +0,0 @@
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import string
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class TestRunResultsState(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return "run_results_state_062"
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return "models"
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'macro-paths': ['macros'],
|
||||
'seeds': {
|
||||
'test': {
|
||||
'quote_columns': True,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _symlink_test_folders(self):
|
||||
# dbt's normal symlink behavior breaks this test. Copy the files
|
||||
# so we can freely modify them.
|
||||
for entry in os.listdir(self.test_original_source_path):
|
||||
src = os.path.join(self.test_original_source_path, entry)
|
||||
tst = os.path.join(self.test_root_dir, entry)
|
||||
if entry in {'models', 'seeds', 'macros'}:
|
||||
shutil.copytree(src, tst)
|
||||
elif os.path.isdir(entry) or entry.endswith('.sql'):
|
||||
os.symlink(src, tst)
|
||||
|
||||
def copy_state(self):
|
||||
assert not os.path.exists('state')
|
||||
os.makedirs('state')
|
||||
shutil.copyfile('target/manifest.json', 'state/manifest.json')
|
||||
shutil.copyfile('target/run_results.json', 'state/run_results.json')
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.run_dbt(['build'])
|
||||
self.copy_state()
|
||||
|
||||
def rebuild_run_dbt(self, expect_pass=True):
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['build'], expect_pass=expect_pass)
|
||||
self.copy_state()
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_seed_run_results_state(self):
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['seed'])
|
||||
self.copy_state()
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:success', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:success', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:success+', '--state', './state'])
|
||||
assert len(results) == 7
|
||||
assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
|
||||
|
||||
with open('seeds/seed.csv') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
with open('seeds/seed.csv', 'a') as fp:
|
||||
fp.write(f'\"\'\'3,carl{newline}')
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['seed'], expect_pass=False)
|
||||
self.copy_state()
|
||||
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state'])
|
||||
assert len(results) == 7
|
||||
assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
|
||||
|
||||
|
||||
with open('seeds/seed.csv') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
with open('seeds/seed.csv', 'a') as fp:
|
||||
# assume each line is ~2 bytes + len(name)
|
||||
target_size = 1*1024*1024
|
||||
line_size = 64
|
||||
|
||||
num_lines = target_size // line_size
|
||||
|
||||
maxlines = num_lines + 4
|
||||
|
||||
for idx in range(4, maxlines):
|
||||
value = ''.join(random.choices(string.ascii_letters, k=62))
|
||||
fp.write(f'{idx},{value}{newline}')
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['seed'], expect_pass=False)
|
||||
self.copy_state()
|
||||
|
||||
results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.seed'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state'])
|
||||
assert len(results) == 7
|
||||
assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_build_run_results_state(self):
|
||||
results = self.run_dbt(['build', '--select', 'result:error', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
with open('models/view_model.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
with open('models/view_model.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_error")
|
||||
fp.write(newline)
|
||||
|
||||
self.rebuild_run_dbt(expect_pass=False)
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'result:error', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 3
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'view_model', 'not_null_view_model_id','unique_view_model_id'}
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state'])
|
||||
assert len(results) == 3
|
||||
assert set(results) == {'test.view_model', 'test.not_null_view_model_id', 'test.unique_view_model_id'}
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'result:error+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 4
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'table_model','view_model', 'not_null_view_model_id','unique_view_model_id'}
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state'])
|
||||
assert len(results) == 6 # includes exposure
|
||||
assert set(results) == {'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'}
|
||||
|
||||
# test failure on build tests
|
||||
# fail the unique test
|
||||
with open('models/view_model.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select 1 as id union all select 1 as id")
|
||||
fp.write(newline)
|
||||
|
||||
self.rebuild_run_dbt(expect_pass=False)
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'result:fail', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'unique_view_model_id'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:fail', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.unique_view_model_id'
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'result:fail+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 2
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'table_model', 'unique_view_model_id'}
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:fail+', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert set(results) == {'test.unique_view_model_id'}
|
||||
|
||||
# change the unique test severity from error to warn and reuse the same view_model.sql changes above
|
||||
f = open('models/schema.yml', 'r')
|
||||
filedata = f.read()
|
||||
f.close()
|
||||
newdata = filedata.replace('error','warn')
|
||||
f = open('models/schema.yml', 'w')
|
||||
f.write(newdata)
|
||||
f.close()
|
||||
|
||||
self.rebuild_run_dbt(expect_pass=True)
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'result:warn', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'unique_view_model_id'
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:warn', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert results[0] == 'test.unique_view_model_id'
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'result:warn+', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 2 # includes table_model to be run
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'table_model', 'unique_view_model_id'}
|
||||
|
||||
results = self.run_dbt(['ls', '--select', 'result:warn+', '--state', './state'])
|
||||
assert len(results) == 1
|
||||
assert set(results) == {'test.unique_view_model_id'}
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_run_run_results_state(self):
|
||||
results = self.run_dbt(['run', '--select', 'result:success', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 2
|
||||
assert results[0].node.name == 'view_model'
|
||||
assert results[1].node.name == 'table_model'
|
||||
|
||||
# clear state and rerun upstream view model to test + operator
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['run', '--select', 'view_model'], expect_pass=True)
|
||||
self.copy_state()
|
||||
results = self.run_dbt(['run', '--select', 'result:success+', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 2
|
||||
assert results[0].node.name == 'view_model'
|
||||
assert results[1].node.name == 'table_model'
|
||||
|
||||
# check we are starting from a place with 0 errors
|
||||
results = self.run_dbt(['run', '--select', 'result:error', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
# force an error in the view model to test error and skipped states
|
||||
with open('models/view_model.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
with open('models/view_model.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_error")
|
||||
fp.write(newline)
|
||||
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['run'], expect_pass=False)
|
||||
self.copy_state()
|
||||
|
||||
# test single result selector on error
|
||||
results = self.run_dbt(['run', '--select', 'result:error', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'view_model'
|
||||
|
||||
# test + operator selection on error
|
||||
results = self.run_dbt(['run', '--select', 'result:error+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 2
|
||||
assert results[0].node.name == 'view_model'
|
||||
assert results[1].node.name == 'table_model'
|
||||
|
||||
# single result selector on skipped. Expect this to pass becase underlying view already defined above
|
||||
results = self.run_dbt(['run', '--select', 'result:skipped', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'table_model'
|
||||
|
||||
# add a downstream model that depends on table_model for skipped+ selector
|
||||
with open('models/table_model_downstream.sql', 'w') as fp:
|
||||
fp.write("select * from {{ref('table_model')}}")
|
||||
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['run'], expect_pass=False)
|
||||
self.copy_state()
|
||||
|
||||
results = self.run_dbt(['run', '--select', 'result:skipped+', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 2
|
||||
assert results[0].node.name == 'table_model'
|
||||
assert results[1].node.name == 'table_model_downstream'
|
||||
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_test_run_results_state(self):
|
||||
# run passed nodes
|
||||
results = self.run_dbt(['test', '--select', 'result:pass', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 2
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'unique_view_model_id', 'not_null_view_model_id'}
|
||||
|
||||
# run passed nodes with + operator
|
||||
results = self.run_dbt(['test', '--select', 'result:pass+', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 2
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'unique_view_model_id', 'not_null_view_model_id'}
|
||||
|
||||
# update view model to generate a failure case
|
||||
os.remove('./models/view_model.sql')
|
||||
with open('models/view_model.sql', 'w') as fp:
|
||||
fp.write("select 1 as id union all select 1 as id")
|
||||
|
||||
self.rebuild_run_dbt(expect_pass=False)
|
||||
|
||||
# test with failure selector
|
||||
results = self.run_dbt(['test', '--select', 'result:fail', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'unique_view_model_id'
|
||||
|
||||
# test with failure selector and + operator
|
||||
results = self.run_dbt(['test', '--select', 'result:fail+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'unique_view_model_id'
|
||||
|
||||
# change the unique test severity from error to warn and reuse the same view_model.sql changes above
|
||||
with open('models/schema.yml', 'r+') as f:
|
||||
filedata = f.read()
|
||||
newdata = filedata.replace('error','warn')
|
||||
f.seek(0)
|
||||
f.write(newdata)
|
||||
f.truncate()
|
||||
|
||||
# rebuild - expect_pass = True because we changed the error to a warning this time around
|
||||
self.rebuild_run_dbt(expect_pass=True)
|
||||
|
||||
# test with warn selector
|
||||
results = self.run_dbt(['test', '--select', 'result:warn', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'unique_view_model_id'
|
||||
|
||||
# test with warn selector and + operator
|
||||
results = self.run_dbt(['test', '--select', 'result:warn+', '--state', './state'], expect_pass=True)
|
||||
assert len(results) == 1
|
||||
assert results[0].node.name == 'unique_view_model_id'
|
||||
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_concurrent_selectors_run_run_results_state(self):
|
||||
results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
# force an error on a dbt model
|
||||
with open('models/view_model.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
with open('models/view_model.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_error")
|
||||
fp.write(newline)
|
||||
|
||||
shutil.rmtree('./state')
|
||||
self.run_dbt(['run'], expect_pass=False)
|
||||
self.copy_state()
|
||||
|
||||
# modify another dbt model
|
||||
with open('models/table_model_modified_example.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_error")
|
||||
fp.write(newline)
|
||||
|
||||
results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 3
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'view_model', 'table_model_modified_example', 'table_model'}
|
||||
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_concurrent_selectors_test_run_results_state(self):
|
||||
# create failure test case for result:fail selector
|
||||
os.remove('./models/view_model.sql')
|
||||
with open('./models/view_model.sql', 'w') as f:
|
||||
f.write('select 1 as id union all select 1 as id union all select null as id')
|
||||
|
||||
# run dbt build again to trigger test errors
|
||||
self.rebuild_run_dbt(expect_pass=False)
|
||||
|
||||
# get the failures from
|
||||
results = self.run_dbt(['test', '--select', 'result:fail', '--exclude', 'not_null_view_model_id', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 1
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'unique_view_model_id'}
|
||||
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_concurrent_selectors_build_run_results_state(self):
|
||||
results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state'])
|
||||
assert len(results) == 0
|
||||
|
||||
# force an error on a dbt model
|
||||
with open('models/view_model.sql') as fp:
|
||||
fp.readline()
|
||||
newline = fp.newlines
|
||||
|
||||
with open('models/view_model.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_error")
|
||||
fp.write(newline)
|
||||
|
||||
self.rebuild_run_dbt(expect_pass=False)
|
||||
|
||||
# modify another dbt model
|
||||
with open('models/table_model_modified_example.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_error")
|
||||
fp.write(newline)
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 5
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'table_model_modified_example', 'view_model', 'table_model', 'not_null_view_model_id', 'unique_view_model_id'}
|
||||
|
||||
# create failure test case for result:fail selector
|
||||
os.remove('./models/view_model.sql')
|
||||
with open('./models/view_model.sql', 'w') as f:
|
||||
f.write('select 1 as id union all select 1 as id')
|
||||
|
||||
# create error model case for result:error selector
|
||||
with open('./models/error_model.sql', 'w') as f:
|
||||
f.write('select 1 as id from not_exists')
|
||||
|
||||
# create something downstream from the error model to rerun
|
||||
with open('./models/downstream_of_error_model.sql', 'w') as f:
|
||||
f.write('select * from {{ ref("error_model") }} )')
|
||||
|
||||
# regenerate build state
|
||||
self.rebuild_run_dbt(expect_pass=False)
|
||||
|
||||
# modify model again to trigger the state:modified selector
|
||||
with open('models/table_model_modified_example.sql', 'w') as fp:
|
||||
fp.write(newline)
|
||||
fp.write("select * from forced_another_error")
|
||||
fp.write(newline)
|
||||
|
||||
results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', 'result:fail+', '--state', './state'], expect_pass=False)
|
||||
assert len(results) == 5
|
||||
nodes = set([elem.node.name for elem in results])
|
||||
assert nodes == {'error_model', 'downstream_of_error_model', 'table_model_modified_example', 'table_model', 'unique_view_model_id'}
|
||||
@@ -1 +0,0 @@
|
||||
select 1 as {{ adapter.quote("2id") }}
|
||||
@@ -1,9 +0,0 @@
|
||||
version: 2
|
||||
models:
|
||||
- name: quote_model
|
||||
description: "model to test column quotes and comments"
|
||||
columns:
|
||||
- name: 2id
|
||||
description: "XXX My description"
|
||||
quote: true
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
import json
|
||||
|
||||
from test.integration.base import DBTIntegrationTest, use_profile
|
||||
|
||||
|
||||
class TestColumnComment(DBTIntegrationTest):
|
||||
@property
|
||||
def schema(self):
|
||||
return "column_comment_060"
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
return "models"
|
||||
|
||||
@property
|
||||
def project_config(self):
|
||||
return {
|
||||
'config-version': 2,
|
||||
'models': {
|
||||
'test': {
|
||||
'materialized': 'table',
|
||||
'+persist_docs': {
|
||||
"relation": True,
|
||||
"columns": True,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def run_has_comments(self):
|
||||
self.run_dbt()
|
||||
self.run_dbt(['docs', 'generate'])
|
||||
with open('target/catalog.json') as fp:
|
||||
catalog_data = json.load(fp)
|
||||
assert 'nodes' in catalog_data
|
||||
assert len(catalog_data['nodes']) == 1
|
||||
column_node = catalog_data['nodes']['model.test.quote_model']
|
||||
column_comment = column_node['columns']['2id']['comment']
|
||||
assert column_comment.startswith('XXX')
|
||||
|
||||
@use_profile('postgres')
|
||||
def test_postgres_comments(self):
|
||||
self.run_has_comments()
|
||||
@@ -1,3 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
|
||||
select * from {{ ref('countries') }}
|
||||
@@ -1,3 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
|
||||
select * from {{ ref('model_0') }}
|
||||
@@ -1,4 +0,0 @@
|
||||
{{ config(materialized='table') }}
|
||||
|
||||
select '1' as "num"
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
version: 2
|
||||
|
||||
models:
|
||||
- name: model_0
|
||||
columns:
|
||||
- name: iso3
|
||||
tests:
|
||||
- relationships:
|
||||
to: ref('model_1')
|
||||
field: iso3
|
||||
|
||||
- name: model_1
|
||||
columns:
|
||||
- name: iso3
|
||||
tests:
|
||||
- relationships:
|
||||
to: ref('model_0')
|
||||
field: iso3
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user