mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-19 10:31:27 +00:00
Compare commits
23 Commits
enable-pos
...
v1.10.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6df1c65fef | ||
|
|
71a8a08694 | ||
|
|
17806c696c | ||
|
|
4d8f730373 | ||
|
|
f32eee5ff5 | ||
|
|
6602852996 | ||
|
|
92912f0ad1 | ||
|
|
ca1ac9dc9f | ||
|
|
7f3ea39f98 | ||
|
|
7c8d98d240 | ||
|
|
fe876a3ae2 | ||
|
|
3abb1156ed | ||
|
|
cdb9b1c309 | ||
|
|
4c913966d3 | ||
|
|
8bc9b4a9e9 | ||
|
|
981058fb43 | ||
|
|
552cf1aa23 | ||
|
|
9bc130d16e | ||
|
|
40617b42e7 | ||
|
|
24a581649c | ||
|
|
ae329219d9 | ||
|
|
cbb5d3e803 | ||
|
|
49bba06ec6 |
37
.bumpversion.cfg
Normal file
37
.bumpversion.cfg
Normal file
@@ -0,0 +1,37 @@
|
||||
[bumpversion]
|
||||
current_version = 1.10.2
|
||||
parse = (?P<major>[\d]+) # major version number
|
||||
\.(?P<minor>[\d]+) # minor version number
|
||||
\.(?P<patch>[\d]+) # patch version number
|
||||
(?P<prerelease> # optional pre-release - ex: a1, b2, rc25
|
||||
(?P<prekind>a|b|rc) # pre-release type
|
||||
(?P<num>[\d]+) # pre-release version number
|
||||
)?
|
||||
( # optional nightly release indicator
|
||||
\.(?P<nightly>dev[0-9]+) # ex: .dev02142023
|
||||
)? # expected matches: `1.15.0`, `1.5.0a11`, `1.5.0a1.dev123`, `1.5.0.dev123457`, expected failures: `1`, `1.5`, `1.5.2-a1`, `text1.5.0`
|
||||
serialize =
|
||||
{major}.{minor}.{patch}{prekind}{num}.{nightly}
|
||||
{major}.{minor}.{patch}.{nightly}
|
||||
{major}.{minor}.{patch}{prekind}{num}
|
||||
{major}.{minor}.{patch}
|
||||
commit = False
|
||||
tag = False
|
||||
|
||||
[bumpversion:part:prekind]
|
||||
first_value = a
|
||||
optional_value = final
|
||||
values =
|
||||
a
|
||||
b
|
||||
rc
|
||||
final
|
||||
|
||||
[bumpversion:part:num]
|
||||
first_value = 1
|
||||
|
||||
[bumpversion:part:nightly]
|
||||
|
||||
[bumpversion:file:core/setup.py]
|
||||
|
||||
[bumpversion:file:core/dbt/version.py]
|
||||
@@ -3,7 +3,6 @@
|
||||
For information on prior major and minor releases, see their changelogs:
|
||||
|
||||
|
||||
* [1.10](https://github.com/dbt-labs/dbt-core/blob/1.10.latest/CHANGELOG.md)
|
||||
* [1.9](https://github.com/dbt-labs/dbt-core/blob/1.9.latest/CHANGELOG.md)
|
||||
* [1.8](https://github.com/dbt-labs/dbt-core/blob/1.8.latest/CHANGELOG.md)
|
||||
* [1.7](https://github.com/dbt-labs/dbt-core/blob/1.7.latest/CHANGELOG.md)
|
||||
|
||||
128
.changes/1.10.0.md
Normal file
128
.changes/1.10.0.md
Normal file
@@ -0,0 +1,128 @@
|
||||
## dbt-core 1.10.0 - June 16, 2025
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- Add invocations_started_at field to artifact metadata ([#11272](https://github.com/dbt-labs/dbt-core/issues/11272))
|
||||
- Flip behavior flag `source-freshness-run-project-hooks` to true ([#11609](https://github.com/dbt-labs/dbt-core/issues/11609))
|
||||
- Flip behavior flag to disallow spaces in resource names ([#11610](https://github.com/dbt-labs/dbt-core/issues/11610))
|
||||
|
||||
### Features
|
||||
|
||||
- Add new hard_deletes="new_record" mode for snapshots. ([#10235](https://github.com/dbt-labs/dbt-core/issues/10235))
|
||||
- Add `batch` context object to model jinja context ([#11025](https://github.com/dbt-labs/dbt-core/issues/11025))
|
||||
- Ensure pre/post hooks only run on first/last batch respectively for microbatch model batches ([#11094](https://github.com/dbt-labs/dbt-core/issues/11094), [#11104](https://github.com/dbt-labs/dbt-core/issues/11104))
|
||||
- Support "tags" in Saved Queries ([#11155](https://github.com/dbt-labs/dbt-core/issues/11155))
|
||||
- Calculate source freshness via a SQL query ([#8797](https://github.com/dbt-labs/dbt-core/issues/8797))
|
||||
- Add freshness definition on model for adaptive job ([#11123](https://github.com/dbt-labs/dbt-core/issues/11123))
|
||||
- Meta config for dimensions measures and entities ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- Add doc_blocks to manifest for nodes and columns ([#11000](https://github.com/dbt-labs/dbt-core/issues/11000), [#11001](https://github.com/dbt-labs/dbt-core/issues/11001))
|
||||
- Initial implementation of sample mode ([#11227](https://github.com/dbt-labs/dbt-core/issues/11227), [#11230](https://github.com/dbt-labs/dbt-core/issues/11230), [#11231](https://github.com/dbt-labs/dbt-core/issues/11231), [#11248](https://github.com/dbt-labs/dbt-core/issues/11248), [#11252](https://github.com/dbt-labs/dbt-core/issues/11252), [#11254](https://github.com/dbt-labs/dbt-core/issues/11254), [#11258](https://github.com/dbt-labs/dbt-core/issues/11258))
|
||||
- Combine `--sample` and `--sample-window` CLI params ([#11299](https://github.com/dbt-labs/dbt-core/issues/11299))
|
||||
- Allow for sampling of ref'd seeds ([#11300](https://github.com/dbt-labs/dbt-core/issues/11300))
|
||||
- Enable sample mode for 'build' command ([#11298](https://github.com/dbt-labs/dbt-core/issues/11298))
|
||||
- Allow sampling nodes snapshots depend on and of snapshots as a dependency ([#11301](https://github.com/dbt-labs/dbt-core/issues/11301))
|
||||
- Add opt-in validation of macro argument names and types ([#11274](https://github.com/dbt-labs/dbt-core/issues/11274))
|
||||
- Add support for Python 3.13! ([#11401](https://github.com/dbt-labs/dbt-core/issues/11401))
|
||||
- Support artifact upload to dbt Cloud ([#11418](https://github.com/dbt-labs/dbt-core/issues/11418))
|
||||
- Show summaries for deprecations and add ability to toggle seeing all deprecation violation instances ([#11429](https://github.com/dbt-labs/dbt-core/issues/11429))
|
||||
- Add behavior flag for handling all warnings via warn_error logic ([#11116](https://github.com/dbt-labs/dbt-core/issues/11116))
|
||||
- Basic jsonschema validation of `dbt_project.yml` ([#11503](https://github.com/dbt-labs/dbt-core/issues/11503))
|
||||
- Begin checking YAML files for duplicate keys ([#11296](https://github.com/dbt-labs/dbt-core/issues/11296))
|
||||
- Add deprecation warnings for unexpected blocks in jinja. ([#11393](https://github.com/dbt-labs/dbt-core/issues/11393))
|
||||
- Begin validating the jsonschema of resource YAML files ([#11504](https://github.com/dbt-labs/dbt-core/issues/11504))
|
||||
- Add deprecation warning for custom top level keys in YAML files. ([#11338](https://github.com/dbt-labs/dbt-core/issues/11338))
|
||||
- Begin emitting deprecationw warnings for custom keys in config blocks ([#11337](https://github.com/dbt-labs/dbt-core/issues/11337))
|
||||
- Begin emitting deprecation events for custom properties found in objects ([#11336](https://github.com/dbt-labs/dbt-core/issues/11336))
|
||||
- Create a singular deprecations summary event ([#11536](https://github.com/dbt-labs/dbt-core/issues/11536))
|
||||
- Deprecate --output/-o usage in source freshness ([#11559](https://github.com/dbt-labs/dbt-core/issues/11559))
|
||||
- Deprecate usage of `include`/`exclude` terminology with warn-error-options ([#11557](https://github.com/dbt-labs/dbt-core/issues/11557))
|
||||
- Support description and config.meta on groups ([#11319](https://github.com/dbt-labs/dbt-core/issues/11319))
|
||||
- Adding Quoting to manifest metadata ([#na](https://github.com/dbt-labs/dbt-core/issues/na))
|
||||
- Include event names in deprecation warning messages ([#11471](https://github.com/dbt-labs/dbt-core/issues/11471))
|
||||
- Support config on columns ([#11651](https://github.com/dbt-labs/dbt-core/issues/11651))
|
||||
- Add file_format to catalog integration config ([#11695](https://github.com/dbt-labs/dbt-core/issues/11695))
|
||||
- 11561 ([#deprecate](https://github.com/dbt-labs/dbt-core/issues/deprecate), [#--models,--model,](https://github.com/dbt-labs/dbt-core/issues/--models,--model,), [#and](https://github.com/dbt-labs/dbt-core/issues/and), [#-m](https://github.com/dbt-labs/dbt-core/issues/-m), [#flags](https://github.com/dbt-labs/dbt-core/issues/flags))
|
||||
|
||||
### Fixes
|
||||
|
||||
- datetime.datetime.utcnow() is deprecated as of Python 3.12 ([#9791](https://github.com/dbt-labs/dbt-core/issues/9791))
|
||||
- dbt retry does not respect --threads ([#10584](https://github.com/dbt-labs/dbt-core/issues/10584))
|
||||
- update adapter version messages ([#10230](https://github.com/dbt-labs/dbt-core/issues/10230))
|
||||
- Catch DbtRuntimeError for hooks ([#11012](https://github.com/dbt-labs/dbt-core/issues/11012))
|
||||
- Access DBUG flag more consistently with the rest of the codebase in ManifestLoader ([#11068](https://github.com/dbt-labs/dbt-core/issues/11068))
|
||||
- Improve the performance characteristics of add_test_edges() ([#10950](https://github.com/dbt-labs/dbt-core/issues/10950))
|
||||
- Implement partial parsing for singular data test configs in yaml files ([#10801](https://github.com/dbt-labs/dbt-core/issues/10801))
|
||||
- Fix debug log messages for microbatch batch execution information ([#11111](https://github.com/dbt-labs/dbt-core/issues/11111))
|
||||
- Fix running of extra "last" batch when there is only one batch ([#11112](https://github.com/dbt-labs/dbt-core/issues/11112))
|
||||
- Fix interpretation of `PartialSuccess` to result in non-zero exit code ([#11114](https://github.com/dbt-labs/dbt-core/issues/11114))
|
||||
- Warn about invalid usages of `concurrent_batches` config ([#11122](https://github.com/dbt-labs/dbt-core/issues/11122))
|
||||
- Error writing generic test at run time ([#11110](https://github.com/dbt-labs/dbt-core/issues/11110))
|
||||
- Run check_modified_contract for state:modified ([#11034](https://github.com/dbt-labs/dbt-core/issues/11034))
|
||||
- Fix unrendered_config for tests from dbt_project.yml ([#11146](https://github.com/dbt-labs/dbt-core/issues/11146))
|
||||
- Make partial parsing reparse referencing nodes of newly versioned models. ([#8872](https://github.com/dbt-labs/dbt-core/issues/8872))
|
||||
- Ensure warning about microbatch lacking filter inputs is always fired ([#11159](https://github.com/dbt-labs/dbt-core/issues/11159))
|
||||
- Fix microbatch dbt list --output json ([#10556](https://github.com/dbt-labs/dbt-core/issues/10556), [#11098](https://github.com/dbt-labs/dbt-core/issues/11098))
|
||||
- Fix for custom fields in generic test config for not_null and unique tests ([#11208](https://github.com/dbt-labs/dbt-core/issues/11208))
|
||||
- Allow copying asset when dbt docs command is run outside the dbt project ([#9308](https://github.com/dbt-labs/dbt-core/issues/9308))
|
||||
- Loosen validation on freshness to accomodate previously wrong but harmless config. ([#11123](https://github.com/dbt-labs/dbt-core/issues/11123))
|
||||
- Handle `--limit -1` properly in `ShowTaskDirect` so that it propagates None instead of a negative int ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- _get_doc_blocks is crashing parsing if .format is called ([#11310](https://github.com/dbt-labs/dbt-core/issues/11310))
|
||||
- Fix microbatch execution to not block main thread nor hang ([#11243](https://github.com/dbt-labs/dbt-core/issues/11243), [#11306](https://github.com/dbt-labs/dbt-core/issues/11306))
|
||||
- Fixes parsing errors when using the new YAML format for snapshots ([#11164](https://github.com/dbt-labs/dbt-core/issues/11164))
|
||||
- Update ConfigFolderDirectory dir to use str. ([#9768](https://github.com/dbt-labs/dbt-core/issues/9768), [#11305](https://github.com/dbt-labs/dbt-core/issues/11305))
|
||||
- Fix microbatch models couting as success when only having one batch (and that batch failing) ([#11390](https://github.com/dbt-labs/dbt-core/issues/11390))
|
||||
- Add pre-commit installation to Docker container for testing compatibility ([#11498](https://github.com/dbt-labs/dbt-core/issues/11498))
|
||||
- Fix duplicate macro error message with multiple macros and multiple patches ([#4233](https://github.com/dbt-labs/dbt-core/issues/4233))
|
||||
- Fix seed path for partial parsing if project directory name changes ([#11550](https://github.com/dbt-labs/dbt-core/issues/11550))
|
||||
- Add `pre-commit` installation to Docker container for testing compatibility ([#11498](https://github.com/dbt-labs/dbt-core/issues/11498))
|
||||
- Ensure the right key is associatd with the `CustomKeyInConfigDeprecation` deprecation ([#11576](https://github.com/dbt-labs/dbt-core/issues/11576))
|
||||
- Add tags and meta config to exposures ([#11428](https://github.com/dbt-labs/dbt-core/issues/11428))
|
||||
- Add freshness config to sources ([#11506](https://github.com/dbt-labs/dbt-core/issues/11506))
|
||||
- Add freshness config to models ([#11506](https://github.com/dbt-labs/dbt-core/issues/11506))
|
||||
- require count and period on model freshness.build_after ([#11669](https://github.com/dbt-labs/dbt-core/issues/11669))
|
||||
- Don't warn for metricflow_time_spine with non-day grain ([#11690](https://github.com/dbt-labs/dbt-core/issues/11690))
|
||||
- Fix source freshness set via config to handle explicit nulls ([#11685](https://github.com/dbt-labs/dbt-core/issues/11685))
|
||||
- Ensure build_after is present in model freshness in parsing, otherwise skip freshness definition ([#11709](https://github.com/dbt-labs/dbt-core/issues/11709))
|
||||
- Ensure source node `.freshness` is equal to node's `.config.freshness` ([#11717](https://github.com/dbt-labs/dbt-core/issues/11717))
|
||||
- ignore invalid model freshness configs in inline model configs ([#11728](https://github.com/dbt-labs/dbt-core/issues/11728))
|
||||
- Fix store_failures hierarachical config parsing ([#10165](https://github.com/dbt-labs/dbt-core/issues/10165))
|
||||
- Remove model freshness property support in favor of config level support ([#11713](https://github.com/dbt-labs/dbt-core/issues/11713))
|
||||
|
||||
### Under the Hood
|
||||
|
||||
- Create a no-op exposure runner ([#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/))
|
||||
- Improve selection peformance by optimizing the select_children() and select_parents() functions. ([#11099](https://github.com/dbt-labs/dbt-core/issues/11099))
|
||||
- Change exception type from DbtInternalException to UndefinedMacroError when macro not found in 'run operation' command ([#11192](https://github.com/dbt-labs/dbt-core/issues/11192))
|
||||
- Create LogNodeResult event ([#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/))
|
||||
- Fix error counts for exposures ([#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/))
|
||||
- Misc fixes for group info in logging ([#11218](https://github.com/dbt-labs/dbt-core/issues/11218))
|
||||
- Add node_checksum to node_info on structured logs ([#11372](https://github.com/dbt-labs/dbt-core/issues/11372))
|
||||
- Parse catalogs.yml ([#XPLAT-242](https://github.com/dbt-labs/dbt-core/issues/XPLAT-242))
|
||||
- Add package 'name' to lock file ([#11487](https://github.com/dbt-labs/dbt-core/issues/11487))
|
||||
- Allow for deprecation previews ([#11597](https://github.com/dbt-labs/dbt-core/issues/11597))
|
||||
- Move core_types.proto into shared dbt-protos library ([#11608](https://github.com/dbt-labs/dbt-core/issues/11608))
|
||||
- Prevent overcounting PropertyMovedToConfigDeprecation for source freshness ([#11660](https://github.com/dbt-labs/dbt-core/issues/11660))
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Upgrading dbt-semantic-interfaces to 0.8.3 for custom grain support in offset windows ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- Bump codecov/codecov-action from 4 to 5 ([#11009](https://github.com/dbt-labs/dbt-core/issues/11009))
|
||||
|
||||
### Contributors
|
||||
- [@DevonFulcher](https://github.com/DevonFulcher) ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- [@Threynaud](https://github.com/Threynaud) ([#11068](https://github.com/dbt-labs/dbt-core/issues/11068))
|
||||
- [@WilliamDee](https://github.com/WilliamDee) ([#None](https://github.com/dbt-labs/dbt-core/issues/None), [#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- [@amardatar](https://github.com/amardatar) ([#11164](https://github.com/dbt-labs/dbt-core/issues/11164))
|
||||
- [@aranke](https://github.com/aranke) ([#11000](https://github.com/dbt-labs/dbt-core/issues/11000), [#11001](https://github.com/dbt-labs/dbt-core/issues/11001), [#11012](https://github.com/dbt-labs/dbt-core/issues/11012), [#11310](https://github.com/dbt-labs/dbt-core/issues/11310), [#11550](https://github.com/dbt-labs/dbt-core/issues/11550), [#11428](https://github.com/dbt-labs/dbt-core/issues/11428), [#11506](https://github.com/dbt-labs/dbt-core/issues/11506), [#11506](https://github.com/dbt-labs/dbt-core/issues/11506), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#11218](https://github.com/dbt-labs/dbt-core/issues/11218), [#XPLAT-242](https://github.com/dbt-labs/dbt-core/issues/XPLAT-242), [#11660](https://github.com/dbt-labs/dbt-core/issues/11660))
|
||||
- [@cedric-orange](https://github.com/cedric-orange) ([#9308](https://github.com/dbt-labs/dbt-core/issues/9308))
|
||||
- [@cmcarthur](https://github.com/cmcarthur) ([#11608](https://github.com/dbt-labs/dbt-core/issues/11608))
|
||||
- [@courtneyholcomb](https://github.com/courtneyholcomb) ([#11690](https://github.com/dbt-labs/dbt-core/issues/11690))
|
||||
- [@d-cole](https://github.com/d-cole) ([#8872](https://github.com/dbt-labs/dbt-core/issues/8872))
|
||||
- [@dave-connors-3](https://github.com/dave-connors-3) ([#10230](https://github.com/dbt-labs/dbt-core/issues/10230))
|
||||
- [@donjin-master](https://github.com/donjin-master) ([#10584](https://github.com/dbt-labs/dbt-core/issues/10584))
|
||||
- [@internetcoffeephone](https://github.com/internetcoffeephone) ([#10556](https://github.com/dbt-labs/dbt-core/issues/10556), [#11098](https://github.com/dbt-labs/dbt-core/issues/11098))
|
||||
- [@kato1208](https://github.com/kato1208) ([#11498](https://github.com/dbt-labs/dbt-core/issues/11498), [#11498](https://github.com/dbt-labs/dbt-core/issues/11498))
|
||||
- [@slothkong](https://github.com/slothkong) ([#9791](https://github.com/dbt-labs/dbt-core/issues/9791))
|
||||
- [@theyostalservice](https://github.com/theyostalservice) ([#11155](https://github.com/dbt-labs/dbt-core/issues/11155))
|
||||
- [@thorn14](https://github.com/thorn14) ([#9768](https://github.com/dbt-labs/dbt-core/issues/9768), [#11305](https://github.com/dbt-labs/dbt-core/issues/11305))
|
||||
- [@venkaa28](https://github.com/venkaa28) ([#na](https://github.com/dbt-labs/dbt-core/issues/na))
|
||||
5
.changes/1.10.1.md
Normal file
5
.changes/1.10.1.md
Normal file
@@ -0,0 +1,5 @@
|
||||
## dbt-core 1.10.1 - June 16, 2025
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Bump minimum jsonschema version to `4.19.1` ([#11740](https://github.com/dbt-labs/dbt-core/issues/11740))
|
||||
9
.changes/1.10.2.md
Normal file
9
.changes/1.10.2.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## dbt-core 1.10.2 - June 20, 2025
|
||||
|
||||
### Features
|
||||
|
||||
- Update jsonschemas with builtin data test properties and exposure configs in dbt_project.yml for more accurate deprecations ([#11335](https://github.com/dbt-labs/dbt-core/issues/11335))
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Allow for either pydantic v1 and v2 ([#11634](https://github.com/dbt-labs/dbt-core/issues/11634))
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Dependencies
|
||||
body: Use EventCatcher from dbt-common instead of maintaining a local copy
|
||||
time: 2025-11-18T15:53:54.284561+05:30
|
||||
custom:
|
||||
Author: 3loka
|
||||
Issue: "12124"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Support partial parsing for function nodes
|
||||
time: 2025-10-06T14:03:52.258104-05:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "12072"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Allow for defining funciton arguments with default values
|
||||
time: 2025-11-17T14:10:53.860178-06:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "12044"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Raise jsonschema-based deprecation warnings by default
|
||||
time: 2025-12-01T16:52:09.354436-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: 12240
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: ':bug: :snowman: Disable unit tests whose model is disabled'
|
||||
time: 2025-12-03T12:29:26.209248-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10540"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Implement config.meta_get and config.meta_require
|
||||
time: 2025-12-10T20:20:01.354288-05:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "12012"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Address Click 8.2+ deprecation warning
|
||||
time: 2025-09-22T15:17:26.983151-06:00
|
||||
custom:
|
||||
Author: edgarrmondragon
|
||||
Issue: "12038"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Include macros in unit test parsing
|
||||
time: 2025-11-17T14:06:49.518566-05:00
|
||||
custom:
|
||||
Author: michelleark nathanskone
|
||||
Issue: "10157"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Allow dbt deps to run when vars lack defaults in dbt_project.yml
|
||||
time: 2025-11-17T18:50:25.759091+05:30
|
||||
custom:
|
||||
Author: 3loka
|
||||
Issue: "8913"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Restore DuplicateResourceNameError for intra-project node name duplication, behind behavior flag `require_unique_project_resource_names`
|
||||
time: 2025-11-18T17:11:06.454784-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "12152"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Allow the usage of `function` with `--exclude-resource-type` flag
|
||||
time: 2025-11-19T19:50:34.703236-06:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "12143"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix bug where schemas of functions weren't guaranteed to exist
|
||||
time: 2025-11-24T15:56:29.467004-06:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "12142"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix generation of deprecations summary
|
||||
time: 2025-11-24T15:57:56.544123-08:00
|
||||
custom:
|
||||
Author: asiunov
|
||||
Issue: "12146"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Correctly reference foreign key references when --defer and --state provided'
|
||||
time: 2025-11-24T17:08:55.387946-05:00
|
||||
custom:
|
||||
Author: michellark
|
||||
Issue: "11885"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Add exception when using --state and referring to a removed
|
||||
test'
|
||||
time: 2025-11-25T12:02:46.635026-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "10630"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Stop emitting `NoNodesForSelectionCriteria` three times during `build` command'
|
||||
time: 2025-11-25T12:20:20.132379-06:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "11627"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ":bug: :snowman: Fix long Python stack traces appearing when package dependencies have incompatible version requirements"
|
||||
time: 2025-11-27T14:13:08.082542-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "12049"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Fixed issue where changing data type size/precision/scale (e.g.,
|
||||
varchar(3) to varchar(10)) incorrectly triggered a breaking change error fo'
|
||||
time: 2025-11-27T14:59:29.256274-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "11186"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Support unit testing models that depend on sources with the same name'
|
||||
time: 2025-11-27T17:01:24.193516-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: 11975 10433
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix bug in partial parsing when updating a model with a schema file that is referenced by a singular test
|
||||
time: 2025-11-28T10:21:29.911147Z
|
||||
custom:
|
||||
Author: mattogburke
|
||||
Issue: "12223"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Avoid retrying successful run-operation commands'
|
||||
time: 2025-11-28T12:28:38.546261-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "11850"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Fix `dbt deps --add-package` crash when packages.yml contains `warn-unpinned:
|
||||
false`'
|
||||
time: 2025-11-28T16:19:37.608722-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "9104"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Improve `dbt deps --add-package` duplicate detection with better
|
||||
cross-source matching and word boundaries'
|
||||
time: 2025-11-28T16:31:44.344099-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "12239"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: ':bug: :snowman: Fix false positive deprecation warning of pre/post-hook SQL configs'
|
||||
time: 2025-12-02T13:37:05.012112-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "12244"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Ensure recent deprecation warnings include event name in message
|
||||
time: 2025-12-09T17:50:31.334618-06:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "12264"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Improve error message clarity when detecting nodes with space in name
|
||||
time: 2025-12-10T14:39:35.107841-08:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "11835"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Update jsonschemas for schema.yml and dbt_project.yml deprecations
|
||||
time: 2025-11-19T11:01:10.616676-05:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "12180"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Replace setuptools and tox with hatch for build, test, and environment management.
|
||||
time: 2025-11-21T14:05:15.838252-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "12151"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Add add_catalog_integration call even if we have a pre-existing manifest
|
||||
time: 2025-12-09T13:18:57.043254-08:00
|
||||
custom:
|
||||
Author: colin-rogers-dbt
|
||||
Issue: "12262"
|
||||
@@ -41,26 +41,32 @@ newlines:
|
||||
endOfVersion: 1
|
||||
|
||||
custom:
|
||||
- key: Author
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: Issue
|
||||
- key: Issue
|
||||
label: GitHub Issue Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
|
||||
footerFormat: |
|
||||
{{- $contributorDict := dict }}
|
||||
{{- /* ensure we always skip snyk and dependabot */}}
|
||||
{{- $bots := list "dependabot[bot]" "snyk-bot"}}
|
||||
{{- /* ensure all names in this list are all lowercase for later matching purposes */}}
|
||||
{{- $core_team := splitList " " .Env.CORE_TEAM }}
|
||||
{{- /* ensure we always skip snyk and dependabot in addition to the core team */}}
|
||||
{{- $maintainers := list "dependabot[bot]" "snyk-bot"}}
|
||||
{{- range $team_member := $core_team }}
|
||||
{{- $team_member_lower := lower $team_member }}
|
||||
{{- $maintainers = append $maintainers $team_member_lower }}
|
||||
{{- end }}
|
||||
{{- range $change := .Changes }}
|
||||
{{- $authorList := splitList " " $change.Custom.Author }}
|
||||
{{- /* loop through all authors for a single changelog */}}
|
||||
{{- range $author := $authorList }}
|
||||
{{- $authorLower := lower $author }}
|
||||
{{- /* we only want to include non-bot contributors */}}
|
||||
{{- if not (has $authorLower $bots)}}
|
||||
{{- /* we only want to include non-core team contributors */}}
|
||||
{{- if not (has $authorLower $maintainers)}}
|
||||
{{- $changeList := splitList " " $change.Custom.Author }}
|
||||
{{- $IssueList := list }}
|
||||
{{- $changeLink := $change.Kind }}
|
||||
|
||||
1
.flake8
1
.flake8
@@ -10,5 +10,6 @@ ignore =
|
||||
E704 # makes Flake8 work like black
|
||||
E741
|
||||
E501 # long line checking is done in black
|
||||
exclude = test/
|
||||
per-file-ignores =
|
||||
*/__init__.py: F401
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -62,7 +62,7 @@ body:
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 24.04
|
||||
- **Python**: 3.10.12 (`python3 --version`)
|
||||
- **Python**: 3.9.12 (`python3 --version`)
|
||||
- **dbt-core**: 1.1.1 (`dbt --version`)
|
||||
value: |
|
||||
- OS:
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/regression-report.yml
vendored
2
.github/ISSUE_TEMPLATE/regression-report.yml
vendored
@@ -56,7 +56,7 @@ body:
|
||||
description: |
|
||||
examples:
|
||||
- **OS**: Ubuntu 24.04
|
||||
- **Python**: 3.10.12 (`python3 --version`)
|
||||
- **Python**: 3.9.12 (`python3 --version`)
|
||||
- **dbt-core (working version)**: 1.1.1 (`dbt --version`)
|
||||
- **dbt-core (regression version)**: 1.2.0 (`dbt --version`)
|
||||
value: |
|
||||
|
||||
5
.github/actions/latest-wrangler/main.py
vendored
5
.github/actions/latest-wrangler/main.py
vendored
@@ -1,10 +1,9 @@
|
||||
import os
|
||||
from packaging.version import Version, parse
|
||||
import requests
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
import requests
|
||||
from packaging.version import Version, parse
|
||||
|
||||
|
||||
def main():
|
||||
package_name: str = os.environ["INPUT_PACKAGE_NAME"]
|
||||
|
||||
@@ -1 +1 @@
|
||||
../../../scripts/setup_db.sh
|
||||
../../../test/setup_db.sh
|
||||
169
.github/dbt-postgres-testing.yml
vendored
169
.github/dbt-postgres-testing.yml
vendored
@@ -1,169 +0,0 @@
|
||||
# **what?**
|
||||
# Runs all tests in dbt-postgres with this branch of dbt-core to ensure nothing is broken
|
||||
|
||||
# **why?**
|
||||
# Ensure dbt-core changes do not break dbt-postgres, as a basic proxy for other adapters
|
||||
|
||||
# **when?**
|
||||
# This will run when trying to merge a PR into main.
|
||||
# It can also be manually triggered.
|
||||
|
||||
# This workflow can be skipped by adding the "Skip Postgres Testing" label to the PR. This is
|
||||
# useful when making a change in both `dbt-postgres` and `dbt-core` where the changes are dependant
|
||||
# and cause the other repository to break.
|
||||
|
||||
name: "dbt-postgres Tests"
|
||||
run-name: >-
|
||||
${{ (github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call')
|
||||
&& format('dbt-postgres@{0} with dbt-core@{1}', inputs.dbt-postgres-ref, inputs.dbt-core-ref)
|
||||
|| 'dbt-postgres@main with dbt-core branch' }}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dbt-postgres-ref:
|
||||
description: "The branch of dbt-postgres to test against"
|
||||
default: "main"
|
||||
dbt-core-ref:
|
||||
description: "The branch of dbt-core to test against"
|
||||
default: "main"
|
||||
workflow_call:
|
||||
inputs:
|
||||
dbt-postgres-ref:
|
||||
description: "The branch of dbt-postgres to test against"
|
||||
type: string
|
||||
required: true
|
||||
default: "main"
|
||||
dbt-core-ref:
|
||||
description: "The branch of dbt-core to test against"
|
||||
type: string
|
||||
required: true
|
||||
default: "main"
|
||||
|
||||
permissions: read-all
|
||||
|
||||
# will cancel previous workflows triggered by the same event
|
||||
# and for the same ref for PRs/merges or same SHA otherwise
|
||||
# and for the same inputs on workflow_dispatch or workflow_call
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(fromJson('["pull_request", "merge_group"]'), github.event_name) && github.event.pull_request.head.ref || github.sha }}-${{ contains(fromJson('["workflow_call", "workflow_dispatch"]'), github.event_name) && github.event.inputs.dbt-postgres-ref && github.event.inputs.dbt-core-ref || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
job-prep:
|
||||
# This allow us to run the workflow on pull_requests as well so we can always run unit tests
|
||||
# and only run integration tests on merge for time purposes
|
||||
name: Setup Repo Refs
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
dbt-postgres-ref: ${{ steps.core-ref.outputs.ref }}
|
||||
dbt-core-ref: ${{ steps.common-ref.outputs.ref }}
|
||||
|
||||
steps:
|
||||
- name: "Input Refs"
|
||||
id: job-inputs
|
||||
run: |
|
||||
echo "inputs.dbt-postgres-ref=${{ inputs.dbt-postgres-ref }}"
|
||||
echo "inputs.dbt-core-ref=${{ inputs.dbt-core-ref }}"
|
||||
|
||||
- name: "Determine dbt-postgres ref"
|
||||
id: core-ref
|
||||
run: |
|
||||
if [[ -z "${{ inputs.dbt-postgres-ref }}" ]]; then
|
||||
REF="main"
|
||||
else
|
||||
REF=${{ inputs.dbt-postgres-ref }}
|
||||
fi
|
||||
echo "ref=$REF" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Determine dbt-core ref"
|
||||
id: common-ref
|
||||
run: |
|
||||
if [[ -z "${{ inputs.dbt-core-ref }}" ]]; then
|
||||
# these will be commits instead of branches
|
||||
if [[ "${{ github.event_name }}" == "merge_group" ]]; then
|
||||
REF=${{ github.event.merge_group.head_sha }}
|
||||
else
|
||||
REF=${{ github.event.pull_request.base.sha }}
|
||||
fi
|
||||
else
|
||||
REF=${{ inputs.dbt-core-ref }}
|
||||
fi
|
||||
echo "ref=$REF" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Final Refs"
|
||||
run: |
|
||||
echo "dbt-postgres-ref=${{ steps.core-ref.outputs.ref }}"
|
||||
echo "dbt-core-ref=${{ steps.common-ref.outputs.ref }}"
|
||||
|
||||
integration-tests-postgres:
|
||||
name: "dbt-postgres integration tests"
|
||||
needs: [job-prep]
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "./dbt-postgres"
|
||||
environment:
|
||||
name: "dbt-postgres"
|
||||
env:
|
||||
POSTGRES_TEST_HOST: ${{ vars.POSTGRES_TEST_HOST }}
|
||||
POSTGRES_TEST_PORT: ${{ vars.POSTGRES_TEST_PORT }}
|
||||
POSTGRES_TEST_USER: ${{ vars.POSTGRES_TEST_USER }}
|
||||
POSTGRES_TEST_PASS: ${{ secrets.POSTGRES_TEST_PASS }}
|
||||
POSTGRES_TEST_DATABASE: ${{ vars.POSTGRES_TEST_DATABASE }}
|
||||
POSTGRES_TEST_THREADS: ${{ vars.POSTGRES_TEST_THREADS }}
|
||||
services:
|
||||
postgres:
|
||||
image: postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- ${{ vars.POSTGRES_TEST_PORT }}:5432
|
||||
steps:
|
||||
- name: "Check out dbt-adapters@${{ needs.job-prep.outputs.dbt-postgres-ref }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
with:
|
||||
repository: dbt-labs/dbt-adapters
|
||||
ref: ${{ needs.job-prep.outputs.dbt-postgres-ref }}
|
||||
|
||||
- name: "Set up Python"
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
|
||||
- name: "Set environment variables"
|
||||
run: |
|
||||
echo "HATCH_PYTHON=${{ inputs.python-version }}" >> $GITHUB_ENV
|
||||
echo "PIP_ONLY_BINARY=psycopg2-binary" >> $GITHUB_ENV
|
||||
|
||||
- name: "Setup test database"
|
||||
run: psql -f ./scripts/setup_test_database.sql
|
||||
env:
|
||||
PGHOST: ${{ vars.POSTGRES_TEST_HOST }}
|
||||
PGPORT: ${{ vars.POSTGRES_TEST_PORT }}
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: postgres
|
||||
|
||||
- name: "Install hatch"
|
||||
uses: pypa/hatch@257e27e51a6a5616ed08a39a408a21c35c9931bc # pypa/hatch@install
|
||||
|
||||
- name: "Run integration tests"
|
||||
run: hatch run ${{ inputs.hatch-env }}:integration-tests
|
||||
29
.github/workflows/artifact-reviews.yml
vendored
29
.github/workflows/artifact-reviews.yml
vendored
@@ -34,7 +34,7 @@ env:
|
||||
jobs:
|
||||
check-reviews:
|
||||
name: "Validate Additional Reviews"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
steps:
|
||||
- name: "Get list of changed files"
|
||||
id: changed_files
|
||||
@@ -114,7 +114,7 @@ jobs:
|
||||
|
||||
- name: "Find Comment"
|
||||
if: steps.artifact_files_changed.outputs.artifact_changes == 'true' && steps.check_approvals.outputs.CORE_APPROVALS < env.required_approvals
|
||||
uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 # peter-evans/find-comment@v2
|
||||
uses: peter-evans/find-comment@v2
|
||||
id: find-comment
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
@@ -123,7 +123,7 @@ jobs:
|
||||
|
||||
- name: "Create Comment"
|
||||
if: steps.artifact_files_changed.outputs.artifact_changes == 'true' && steps.find-comment.outputs.comment-id == '' && steps.check_approvals.outputs.CORE_APPROVALS < env.required_approvals
|
||||
uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # peter-evans/create-or-update-comment@v3
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
@@ -164,23 +164,16 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: "Post Event"
|
||||
# This step posts the status of the check because the workflow is triggered by multiple events
|
||||
# and we need to ensure the check is always updated. Otherwise we would end up with duplicate
|
||||
# checks in the GitHub UI.
|
||||
run: |
|
||||
if [[ "${{ steps.status_check.outputs.current_status }}" == "success" ]]; then
|
||||
state="success"
|
||||
else
|
||||
state="failure"
|
||||
fi
|
||||
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
/repos/${{ github.repository }}/statuses/${{ github.event.pull_request.base.sha }} \
|
||||
-f state="$state" \
|
||||
-f description="Artifact Review Check" \
|
||||
-f context="Artifact Review Check" \
|
||||
-f target_url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
/repos/${{ github.repository }}/check-runs \
|
||||
-f name='Artifact Review Check' \
|
||||
-f head_sha=${{ github.event.pull_request_target.head.sha || github.event.pull_request.head.sha }} \
|
||||
-f status='completed' \
|
||||
-f conclusion='${{ steps.status_check.outputs.current_status }}' \
|
||||
-f force=true \
|
||||
-f details_url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/backport.yml
vendored
2
.github/workflows/backport.yml
vendored
@@ -35,6 +35,6 @@ jobs:
|
||||
github.event.pull_request.merged
|
||||
&& contains(github.event.label.name, 'backport')
|
||||
steps:
|
||||
- uses: tibdex/backport@9565281eda0731b1d20c4025c43339fb0a23812e # tibdex/backport@v2.0.4
|
||||
- uses: tibdex/backport@v2.0.4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/bot-changelog.yml
vendored
2
.github/workflows/bot-changelog.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
- name: Create and commit changelog on bot PR
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, matrix.label) }}
|
||||
id: bot_changelog
|
||||
uses: emmyoop/changie_bot@22b70618b13d0d1c64ea95212bafca2d2bf6b764 # emmyoop/changie_bot@v1.1.0
|
||||
uses: emmyoop/changie_bot@v1.1.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
commit_author_name: "Github Build Bot"
|
||||
|
||||
7
.github/workflows/check-artifact-changes.yml
vendored
7
.github/workflows/check-artifact-changes.yml
vendored
@@ -4,8 +4,7 @@ on:
|
||||
pull_request:
|
||||
types: [ opened, reopened, labeled, unlabeled, synchronize ]
|
||||
paths-ignore: [ '.changes/**', '.github/**', 'tests/**', '**.md', '**.yml' ]
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -17,13 +16,13 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'artifact_minor_upgrade') }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check for changes in core/dbt/artifacts
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@v3
|
||||
id: check_artifact_changes
|
||||
with:
|
||||
filters: |
|
||||
|
||||
13
.github/workflows/community-label.yml
vendored
13
.github/workflows/community-label.yml
vendored
@@ -7,6 +7,7 @@
|
||||
# **when?**
|
||||
# When a PR is opened, not in draft or moved from draft to ready for review
|
||||
|
||||
|
||||
name: Label community PRs
|
||||
|
||||
on:
|
||||
@@ -28,15 +29,9 @@ jobs:
|
||||
# If this PR is opened and not draft, determine if it needs to be labeled
|
||||
# if the PR is converted out of draft, determine if it needs to be labeled
|
||||
if: |
|
||||
(
|
||||
!contains(github.event.pull_request.labels.*.name, 'community')
|
||||
&& (
|
||||
(github.event.action == 'opened' && github.event.pull_request.draft == false)
|
||||
|| github.event.action == 'ready_for_review'
|
||||
)
|
||||
&& github.event.pull_request.user.type != 'Bot'
|
||||
&& github.event.pull_request.user.login != 'dependabot[bot]'
|
||||
)
|
||||
(!contains(github.event.pull_request.labels.*.name, 'community') &&
|
||||
(github.event.action == 'opened' && github.event.pull_request.draft == false ) ||
|
||||
github.event.action == 'ready_for_review' )
|
||||
uses: dbt-labs/actions/.github/workflows/label-community.yml@main
|
||||
with:
|
||||
github_team: 'core-group'
|
||||
|
||||
388
.github/workflows/cut-release-branch.yml
vendored
388
.github/workflows/cut-release-branch.yml
vendored
@@ -1,44 +1,25 @@
|
||||
# **what?**
|
||||
# Cuts the `*.latest` branch, bumps dependencies on it, cleans up all files in `.changes/unreleased`
|
||||
# and `.changes/previous verion on main and bumps main to the input version.
|
||||
# Cuts a new `*.latest` branch
|
||||
# Also cleans up all files in `.changes/unreleased` and `.changes/previous verion on
|
||||
# `main` and bumps `main` to the input version.
|
||||
|
||||
# **why?**
|
||||
# Clean up the main branch after a release branch is cut and automate cutting the release branch.
|
||||
# Generally reduces the workload of engineers and reducing error.
|
||||
# Generally reduces the workload of engineers and reduces error. Allow automation.
|
||||
|
||||
# **when?**
|
||||
# This will run when called manually or when triggered in another workflow.
|
||||
|
||||
# Example Usage including required permissions: TODO: update once finalized
|
||||
|
||||
# permissions:
|
||||
# contents: read
|
||||
# pull-requests: write
|
||||
#
|
||||
# name: Cut Release Branch
|
||||
# jobs:
|
||||
# changelog:
|
||||
# uses: dbt-labs/actions/.github/workflows/cut-release-branch.yml@main
|
||||
# with:
|
||||
# new_branch_name: 1.7.latest
|
||||
# PR_title: "Cleanup main after cutting new 1.7.latest branch"
|
||||
# PR_body: "All adapter PRs will fail CI until the dbt-core PR has been merged due to release version conflicts."
|
||||
# secrets:
|
||||
# FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
# TODOs
|
||||
# add note to eventually commit changes directly and bypass checks - same as release - when we move to this model run test action after merge
|
||||
# This will run when called manually.
|
||||
|
||||
name: Cut new release branch
|
||||
run-name: "Cutting New Branch: ${{ inputs.new_branch_name }}"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
new_branch_name:
|
||||
description: "The full name of the new branch (ex. 1.5.latest)"
|
||||
version_to_bump_main:
|
||||
description: 'The alpha version main should bump to (ex. 1.6.0a1)'
|
||||
required: true
|
||||
new_branch_name:
|
||||
description: 'The full name of the new branch (ex. 1.5.latest)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -46,346 +27,15 @@ defaults:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
env:
|
||||
PYTHON_TARGET_VERSION: "3.10"
|
||||
PR_TITLE: "Cleanup main after cutting new ${{ inputs.new_branch_name }} branch"
|
||||
PR_BODY: "All adapter PRs will fail CI until the dbt-core PR has been merged due to release version conflicts."
|
||||
|
||||
jobs:
|
||||
prep_work:
|
||||
name: "Prep Work"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "[DEBUG] Print Inputs"
|
||||
run: |
|
||||
echo "new_branch_name: ${{ inputs.new_branch_name }}"
|
||||
echo "PR_title: ${{ env.PR_TITLE }}"
|
||||
echo "PR_body: ${{ env.PR_BODY }}"
|
||||
|
||||
create_temp_branch:
|
||||
name: "Create Temp branch off main"
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
temp_branch_name: ${{ steps.variables.outputs.BRANCH_NAME }}
|
||||
|
||||
steps:
|
||||
- name: "Set Branch Value"
|
||||
id: variables
|
||||
run: |
|
||||
echo "BRANCH_NAME=cutting_release_branch/main_cleanup_$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Checkout ${{ github.repository }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
cut_branch:
|
||||
name: "Cut branch and clean up main for dbt-core"
|
||||
uses: dbt-labs/actions/.github/workflows/cut-release-branch.yml@main
|
||||
with:
|
||||
ref: "main"
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: "Create PR Branch"
|
||||
run: |
|
||||
user="Github Build Bot"
|
||||
email="buildbot@fishtownanalytics.com"
|
||||
git config user.name "$user"
|
||||
git config user.email "$email"
|
||||
git checkout -b ${{ steps.variables.outputs.BRANCH_NAME }}
|
||||
git push --set-upstream origin ${{ steps.variables.outputs.BRANCH_NAME }}
|
||||
|
||||
- name: "[Notification] Temp branch created"
|
||||
run: |
|
||||
message="Temp branch ${{ steps.variables.outputs.BRANCH_NAME }} created"
|
||||
echo "::notice title="Temporary branch created": $title::$message"
|
||||
|
||||
cleanup_changelog:
|
||||
name: "Clean Up Changelog"
|
||||
needs: ["create_temp_branch"]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
next-version: ${{ steps.semver-current.outputs.next-minor-alpha-version }}
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.create_temp_branch.outputs.temp_branch_name }}
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: "Add Homebrew To PATH"
|
||||
run: |
|
||||
echo "/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Homebrew Packages"
|
||||
run: |
|
||||
brew install pre-commit
|
||||
brew tap miniscruff/changie https://github.com/miniscruff/changie
|
||||
brew install changie
|
||||
|
||||
- name: "Check Current Version In Code"
|
||||
id: determine_version
|
||||
run: |
|
||||
current_version=$(grep '^version = ' core/pyproject.toml | sed 's/version = "\(.*\)"/\1/')
|
||||
echo "current_version=$current_version" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "[Notification] Check Current Version In Code"
|
||||
run: |
|
||||
message="The current version is ${{ steps.determine_version.outputs.current_version }}"
|
||||
echo "::notice title="Version Bump Check": $title::$message"
|
||||
|
||||
- name: "Parse Current Version Into Parts for Changelog Directories"
|
||||
id: semver-current
|
||||
uses: dbt-labs/actions/parse-semver@main
|
||||
with:
|
||||
version: ${{ steps.determine_version.outputs.current_version }}
|
||||
|
||||
- name: "[Notification] Next Alpha Version"
|
||||
run: |
|
||||
message="The next alpha version is ${{ steps.semver-current.outputs.next-minor-alpha-version }}"
|
||||
echo "::notice title="Version Bump Check": $title::$message"
|
||||
|
||||
- name: "Delete Unreleased Changelog YAMLs"
|
||||
# removal fails if no files exist. OK to continue since we're just cleaning up the files.
|
||||
continue-on-error: true
|
||||
run: |
|
||||
rm .changes/unreleased/*.yaml || true
|
||||
|
||||
- name: "Delete Pre Release Changelogs and YAMLs"
|
||||
# removal fails if no files exist. OK to continue since we're just cleaning up the files.
|
||||
continue-on-error: true
|
||||
run: |
|
||||
rm .changes/${{ steps.semver-current.outputs.base-version }}/*.yaml || true
|
||||
rm .changes/${{ steps.semver-current.outputs.major }}.${{ steps.semver-current.outputs.minor }}.*.md || true
|
||||
|
||||
- name: "Cleanup CHANGELOG.md"
|
||||
run: |
|
||||
changie merge
|
||||
|
||||
- name: "Commit Changelog Cleanup to Branch"
|
||||
run: |
|
||||
user="Github Build Bot"
|
||||
email="buildbot@fishtownanalytics.com"
|
||||
git config user.name "$user"
|
||||
git config user.email "$email"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "Clean up changelog on main"
|
||||
git push
|
||||
|
||||
- name: "[Notification] Changelog cleaned up"
|
||||
run: |
|
||||
message="Changelog on ${{ needs.create_temp_branch.outputs.temp_branch_name }} cleaned up"
|
||||
echo "::notice title="Changelog cleaned up": $title::$message"
|
||||
|
||||
bump_version:
|
||||
name: "Bump to next minor version"
|
||||
needs: ["cleanup_changelog", "create_temp_branch"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.create_temp_branch.outputs.temp_branch_name }}
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: "Set up Python - ${{ env.PYTHON_TARGET_VERSION }}"
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # actions/setup-python@v5
|
||||
with:
|
||||
python-version: "${{ env.PYTHON_TARGET_VERSION }}"
|
||||
|
||||
- name: "Install Spark Dependencies"
|
||||
if: ${{ contains(github.repository, 'dbt-labs/dbt-spark') }}
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install libsasl2-dev
|
||||
|
||||
- name: "Install Python Dependencies"
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install hatch
|
||||
|
||||
- name: "Bump Version To ${{ needs.cleanup_changelog.outputs.next-version }}"
|
||||
run: |
|
||||
cd core
|
||||
hatch version ${{ needs.cleanup_changelog.outputs.next-version }}
|
||||
hatch run dev-req
|
||||
dbt --version
|
||||
|
||||
- name: "Commit Version Bump to Branch"
|
||||
run: |
|
||||
user="Github Build Bot"
|
||||
email="buildbot@fishtownanalytics.com"
|
||||
git config user.name "$user"
|
||||
git config user.email "$email"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "Bumping version to ${{ needs.cleanup_changelog.outputs.next-version }}"
|
||||
git push
|
||||
|
||||
- name: "[Notification] Version Bump completed"
|
||||
run: |
|
||||
message="Version on ${{ needs.create_temp_branch.outputs.temp_branch_name }} bumped to ${{ needs.cleanup_changelog.outputs.next-version }}"
|
||||
echo "::notice title="Version Bump Completed": $title::$message"
|
||||
|
||||
cleanup:
|
||||
name: "Cleanup Code Quality"
|
||||
needs: ["create_temp_branch", "bump_version"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.create_temp_branch.outputs.temp_branch_name }}
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: "Add Homebrew To PATH"
|
||||
run: |
|
||||
echo "/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin" >> $GITHUB_PATH
|
||||
|
||||
- name: "brew install pre-commit"
|
||||
run: |
|
||||
brew install pre-commit
|
||||
|
||||
# this step will fail on whitespace errors but also correct them
|
||||
- name: "Cleanup - Remove Trailing Whitespace Via Pre-commit"
|
||||
continue-on-error: true
|
||||
run: |
|
||||
pre-commit run trailing-whitespace --files CHANGELOG.md .changes/* || true
|
||||
|
||||
# this step will fail on newline errors but also correct them
|
||||
- name: "Cleanup - Remove Extra Newlines Via Pre-commit"
|
||||
continue-on-error: true
|
||||
run: |
|
||||
pre-commit run end-of-file-fixer --files CHANGELOG.md .changes/* || true
|
||||
|
||||
- name: "Commit Version Bump to Branch"
|
||||
run: |
|
||||
user="Github Build Bot"
|
||||
email="buildbot@fishtownanalytics.com"
|
||||
git config user.name "$user"
|
||||
git config user.email "$email"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "Code quality cleanup"
|
||||
git push
|
||||
|
||||
open_pr:
|
||||
name: "Open PR Against main"
|
||||
needs: ["cleanup_changelog", "create_temp_branch", "cleanup"]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
pr_number: ${{ steps.create_pr.outputs.pull-request-number }}
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ needs.create_temp_branch.outputs.temp_branch_name }}
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: "Determine PR Title"
|
||||
id: pr_title
|
||||
run: |
|
||||
echo "pr_title=${{ env.PR_TITLE }}" >> $GITHUB_OUTPUT
|
||||
if [${{ env.PR_TITLE }} == ""]; then
|
||||
echo "pr_title='Clean up changelogs and bump to version ${{ needs.cleanup_changelog.outputs.next-version }}'" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: "Determine PR Body"
|
||||
id: pr_body
|
||||
run: |
|
||||
echo "pr_body=${{ env.PR_BODY }}" >> $GITHUB_OUTPUT
|
||||
if [${{ env.PR_BODY }} == ""]; then
|
||||
echo "pr_body='Clean up changelogs and bump to version ${{ needs.cleanup_changelog.outputs.next-version }}'" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: "Add Branch Details"
|
||||
id: pr_body_branch
|
||||
run: |
|
||||
branch_details="The workflow that generated this PR also created a new branch: ${{ inputs.new_branch_name }}"
|
||||
full_body="${{ steps.pr_body.outputs.pr_body }} $branch_details"
|
||||
echo "pr_full_body=$full_body" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Open Pull Request"
|
||||
id: create_pr
|
||||
run: |
|
||||
pr_url=$(gh pr create -B main -H ${{ needs.create_temp_branch.outputs.temp_branch_name }} -l "Skip Changelog" -t "${{ steps.pr_title.outputs.pr_title }}" -b "${{ steps.pr_body_branch.outputs.pr_full_body }}")
|
||||
echo "pr_url=$pr_url" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
- name: "[Notification] Pull Request Opened"
|
||||
run: |
|
||||
message="PR opened at ${{ steps.create_pr.outputs.pr_url }}"
|
||||
echo "::notice title="Pull Request Opened": $title::$message"
|
||||
|
||||
cut_new_branch:
|
||||
# don't cut the new branch until we're done opening the PR against main
|
||||
name: "Cut New Branch ${{ inputs.new_branch_name }}"
|
||||
needs: [open_pr]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: "Ensure New Branch Does Not Exist"
|
||||
id: check_new_branch
|
||||
run: |
|
||||
title="Check New Branch Existence"
|
||||
if git show-ref --quiet ${{ inputs.new_branch_name }}; then
|
||||
message="Branch ${{ inputs.new_branch_name }} already exists. Exiting."
|
||||
echo "::error $title::$message"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: "Create New Release Branch"
|
||||
run: |
|
||||
git checkout -b ${{ inputs.new_branch_name }}
|
||||
|
||||
- name: "Push up New Branch"
|
||||
run: |
|
||||
#Data for commit
|
||||
user="Github Build Bot"
|
||||
email="buildbot@fishtownanalytics.com"
|
||||
git config user.name "$user"
|
||||
git config user.email "$email"
|
||||
git push --set-upstream origin ${{ inputs.new_branch_name }}
|
||||
|
||||
- name: "[Notification] New branch created"
|
||||
run: |
|
||||
message="New branch ${{ inputs.new_branch_name }} created"
|
||||
echo "::notice title="New branch created": $title::$message"
|
||||
|
||||
- name: "Bump dependencies via script"
|
||||
# This bumps the dependency on dbt-core in the adapters
|
||||
if: ${{ !contains(github.repository, 'dbt-core') }}
|
||||
run: |
|
||||
echo ${{ github.repository }}
|
||||
echo "running update_dependencies script"
|
||||
bash ${GITHUB_WORKSPACE}/.github/scripts/update_dependencies.sh ${{ inputs.new_branch_name }}
|
||||
commit_message="bumping .latest branch variable in update_dependencies.sh to ${{ inputs.new_branch_name }}"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "$commit_message"
|
||||
git push
|
||||
|
||||
- name: "Bump env variable via script"
|
||||
# bumps the RELEASE_BRANCH variable in nightly-release.yml in adapters
|
||||
if: ${{ !contains(github.repository, 'dbt-core') }}
|
||||
run: |
|
||||
file="./.github/scripts/update_release_branch.sh"
|
||||
if test -f "$file"; then
|
||||
echo ${{ github.repository }}
|
||||
echo "running some script yet to be written now"
|
||||
bash $file ${{ inputs.new_branch_name }}
|
||||
commit_message="updating env variable to ${{ inputs.new_branch_name }} in nightly-release.yml"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "$commit_message"
|
||||
git push
|
||||
else
|
||||
echo "no $file seen skipping step"
|
||||
fi
|
||||
version_to_bump_main: ${{ inputs.version_to_bump_main }}
|
||||
new_branch_name: ${{ inputs.new_branch_name }}
|
||||
PR_title: "Cleanup main after cutting new ${{ inputs.new_branch_name }} branch"
|
||||
PR_body: "All adapter PRs will fail CI until the dbt-core PR has been merged due to release version conflicts."
|
||||
secrets:
|
||||
FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}
|
||||
|
||||
149
.github/workflows/main.yml
vendored
149
.github/workflows/main.yml
vendored
@@ -20,8 +20,6 @@ on:
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
@@ -44,53 +42,50 @@ jobs:
|
||||
code-quality:
|
||||
name: code-quality
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install hatch
|
||||
cd core
|
||||
hatch run setup
|
||||
|
||||
- name: Verify dbt installation
|
||||
run: |
|
||||
cd core
|
||||
hatch run dbt --version
|
||||
make dev
|
||||
make dev_req
|
||||
mypy --version
|
||||
dbt --version
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
run: |
|
||||
cd core
|
||||
hatch run code-quality
|
||||
run: pre-commit run --all-files --show-diff-on-failure
|
||||
|
||||
unit:
|
||||
name: "unit test / python ${{ matrix.python-version }}"
|
||||
name: unit test / python ${{ matrix.python-version }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
timeout-minutes: 10
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ]
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
@@ -98,15 +93,15 @@ jobs:
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install hatch
|
||||
hatch --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run unit tests
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 3
|
||||
command: cd core && hatch run ci:unit-tests
|
||||
command: tox -e unit
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
@@ -117,15 +112,14 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: unit
|
||||
fail_ci_if_error: false
|
||||
|
||||
integration-metadata:
|
||||
name: integration test metadata generation
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
outputs:
|
||||
split-groups: ${{ steps.generate-split-groups.outputs.split-groups }}
|
||||
include: ${{ steps.generate-include.outputs.include }}
|
||||
@@ -146,7 +140,7 @@ jobs:
|
||||
- name: generate include
|
||||
id: generate-include
|
||||
run: |
|
||||
INCLUDE=('"python-version":"3.10","os":"windows-latest"' '"python-version":"3.10","os":"macos-14"' )
|
||||
INCLUDE=('"python-version":"3.9","os":"${{ vars.WINDOWS_LATEST }}"' '"python-version":"3.9","os":"${{ vars.MACOS_LATEST }}"' )
|
||||
INCLUDE_GROUPS="["
|
||||
for include in ${INCLUDE[@]}; do
|
||||
for group in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
|
||||
@@ -159,7 +153,7 @@ jobs:
|
||||
echo "include=${INCLUDE_GROUPS}" >> $GITHUB_OUTPUT
|
||||
|
||||
integration-postgres:
|
||||
name: "(${{ matrix.split-group }}) integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}"
|
||||
name: (${{ matrix.split-group }}) integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 30
|
||||
@@ -168,10 +162,11 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: [ "3.9", "3.10", "3.11", "3.12", "3.13" ]
|
||||
os: ["${{ vars.UBUNTU_LATEST }}"]
|
||||
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
|
||||
env:
|
||||
TOXENV: integration
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
@@ -202,16 +197,16 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Run postgres setup script
|
||||
run: |
|
||||
./scripts/setup_db.sh
|
||||
./test/setup_db.sh
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
@@ -221,16 +216,17 @@ jobs:
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install hatch
|
||||
hatch --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run integration tests
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 30
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: cd core && hatch run ci:integration-tests -- --ddtrace --splits ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }} --group ${{ matrix.split-group }}
|
||||
command: tox -- --ddtrace
|
||||
env:
|
||||
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
@@ -239,7 +235,7 @@ jobs:
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.split-group }}_${{ steps.date.outputs.date }}
|
||||
@@ -247,11 +243,10 @@ jobs:
|
||||
|
||||
- name: Upload Integration Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: integration
|
||||
fail_ci_if_error: false
|
||||
|
||||
integration-mac-windows:
|
||||
name: (${{ matrix.split-group }}) integration test / python ${{ matrix.python-version }} / ${{ matrix.os }}
|
||||
@@ -263,9 +258,11 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# already includes split group and runs mac + windows
|
||||
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
|
||||
# this include is where we add the mac and windows os
|
||||
include: ${{ fromJson(needs.integration-metadata.outputs.include) }}
|
||||
env:
|
||||
TOXENV: integration
|
||||
DBT_INVOCATION_ENV: github-actions
|
||||
DBT_TEST_USER_1: dbt_test_user_1
|
||||
DBT_TEST_USER_2: dbt_test_user_2
|
||||
@@ -278,21 +275,21 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set up postgres (macos)
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 3
|
||||
command: ./scripts/setup_db.sh
|
||||
command: ./test/setup_db.sh
|
||||
|
||||
- name: Set up postgres (windows)
|
||||
if: runner.os == 'Windows'
|
||||
@@ -302,16 +299,17 @@ jobs:
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip --version
|
||||
python -m pip install hatch
|
||||
hatch --version
|
||||
python -m pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run integration tests
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 30
|
||||
max_attempts: 3
|
||||
shell: bash
|
||||
command: cd core && hatch run ci:integration-tests -- --ddtrace --splits ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }} --group ${{ matrix.split-group }}
|
||||
command: tox -- --ddtrace
|
||||
env:
|
||||
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
|
||||
|
||||
- name: Get current date
|
||||
if: always()
|
||||
@@ -320,7 +318,7 @@ jobs:
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.split-group }}_${{ steps.date.outputs.date }}
|
||||
@@ -328,16 +326,15 @@ jobs:
|
||||
|
||||
- name: Upload Integration Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # codecov/codecov-action@v5
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: integration
|
||||
fail_ci_if_error: false
|
||||
|
||||
integration-report:
|
||||
if: ${{ always() }}
|
||||
name: Integration Test Suite
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
needs: [integration-mac-windows, integration-postgres]
|
||||
steps:
|
||||
- name: "Integration Tests Failed"
|
||||
@@ -354,21 +351,21 @@ jobs:
|
||||
build:
|
||||
name: build packages
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
python -m pip install --user --upgrade pip
|
||||
python -m pip install --upgrade hatch twine check-wheel-contents
|
||||
python -m pip install --upgrade setuptools wheel twine check-wheel-contents
|
||||
python -m pip --version
|
||||
|
||||
- name: Build distributions
|
||||
@@ -377,7 +374,27 @@ jobs:
|
||||
- name: Show distributions
|
||||
run: ls -lh dist/
|
||||
|
||||
- name: Check and verify distributions
|
||||
- name: Check distribution descriptions
|
||||
run: |
|
||||
cd core
|
||||
hatch run build:check-all
|
||||
twine check dist/*
|
||||
|
||||
- name: Check wheel contents
|
||||
run: |
|
||||
check-wheel-contents dist/*.whl --ignore W007,W008
|
||||
|
||||
- name: Install wheel distributions
|
||||
run: |
|
||||
find ./dist/*.whl -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check wheel distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
- name: Install source distributions
|
||||
# ignore dbt-1.0.0, which intentionally raises an error when installed from source
|
||||
run: |
|
||||
find ./dist/*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
dbt --version
|
||||
|
||||
265
.github/workflows/model_performance.yml
vendored
Normal file
265
.github/workflows/model_performance.yml
vendored
Normal file
@@ -0,0 +1,265 @@
|
||||
# **what?**
|
||||
# This workflow models the performance characteristics of a point in time in dbt.
|
||||
# It runs specific dbt commands on committed projects multiple times to create and
|
||||
# commit information about the distribution to the current branch. For more information
|
||||
# see the readme in the performance module at /performance/README.md.
|
||||
#
|
||||
# **why?**
|
||||
# When developing new features, we can take quick performance samples and compare
|
||||
# them against the commited baseline measurements produced by this workflow to detect
|
||||
# some performance regressions at development time before they reach users.
|
||||
#
|
||||
# **when?**
|
||||
# This is only run once directly after each release (for non-prereleases). If for some
|
||||
# reason the results of a run are not satisfactory, it can also be triggered manually.
|
||||
|
||||
name: Model Performance Characteristics
|
||||
|
||||
on:
|
||||
# runs after non-prereleases are published.
|
||||
release:
|
||||
types: [released]
|
||||
# run manually from the actions tab
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_id:
|
||||
description: 'dbt version to model (must be non-prerelease in Pypi)'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
env:
|
||||
RUNNER_CACHE_PATH: performance/runner/target/release/runner
|
||||
|
||||
# both jobs need to write
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
set-variables:
|
||||
name: Setting Variables
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
outputs:
|
||||
cache_key: ${{ steps.variables.outputs.cache_key }}
|
||||
release_id: ${{ steps.semver.outputs.base-version }}
|
||||
release_branch: ${{ steps.variables.outputs.release_branch }}
|
||||
steps:
|
||||
|
||||
# explicitly checkout the performance runner from main regardless of which
|
||||
# version we are modeling.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
- name: Parse version into parts
|
||||
id: semver
|
||||
uses: dbt-labs/actions/parse-semver@v1
|
||||
with:
|
||||
version: ${{ github.event.inputs.release_id || github.event.release.tag_name }}
|
||||
|
||||
# collect all the variables that need to be used in subsequent jobs
|
||||
- name: Set variables
|
||||
id: variables
|
||||
run: |
|
||||
# create a cache key that will be used in the next job. without this the
|
||||
# next job would have to checkout from main and hash the files itself.
|
||||
echo "cache_key=${{ runner.os }}-${{ hashFiles('performance/runner/Cargo.toml')}}-${{ hashFiles('performance/runner/src/*') }}" >> $GITHUB_OUTPUT
|
||||
|
||||
branch_name="${{steps.semver.outputs.major}}.${{steps.semver.outputs.minor}}.latest"
|
||||
echo "release_branch=$branch_name" >> $GITHUB_OUTPUT
|
||||
echo "release branch is inferred to be ${branch_name}"
|
||||
|
||||
latest-runner:
|
||||
name: Build or Fetch Runner
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
needs: [set-variables]
|
||||
env:
|
||||
RUSTFLAGS: "-D warnings"
|
||||
steps:
|
||||
- name: '[DEBUG] print variables'
|
||||
run: |
|
||||
echo "all variables defined in set-variables"
|
||||
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
|
||||
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
# explicitly checkout the performance runner from main regardless of which
|
||||
# version we are modeling.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
# attempts to access a previously cached runner
|
||||
- uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.RUNNER_CACHE_PATH }}
|
||||
key: ${{ needs.set-variables.outputs.cache_key }}
|
||||
|
||||
- name: Fetch Rust Toolchain
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- name: Add fmt
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: rustup component add rustfmt
|
||||
|
||||
- name: Cargo fmt
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --manifest-path performance/runner/Cargo.toml --all -- --check
|
||||
|
||||
- name: Test
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --manifest-path performance/runner/Cargo.toml
|
||||
|
||||
- name: Build (optimized)
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --release --manifest-path performance/runner/Cargo.toml
|
||||
# the cache action automatically caches this binary at the end of the job
|
||||
|
||||
model:
|
||||
# depends on `latest-runner` as a separate job so that failures in this job do not prevent
|
||||
# a successfully tested and built binary from being cached.
|
||||
needs: [set-variables, latest-runner]
|
||||
name: Model a release
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
steps:
|
||||
|
||||
- name: '[DEBUG] print variables'
|
||||
run: |
|
||||
echo "all variables defined in set-variables"
|
||||
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
|
||||
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install dbt
|
||||
run: pip install dbt-postgres==${{ needs.set-variables.outputs.release_id }}
|
||||
|
||||
- name: Install Hyperfine
|
||||
run: wget https://github.com/sharkdp/hyperfine/releases/download/v1.11.0/hyperfine_1.11.0_amd64.deb && sudo dpkg -i hyperfine_1.11.0_amd64.deb
|
||||
|
||||
# explicitly checkout main to get the latest project definitions
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
# this was built in the previous job so it will be there.
|
||||
- name: Fetch Runner
|
||||
uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.RUNNER_CACHE_PATH }}
|
||||
key: ${{ needs.set-variables.outputs.cache_key }}
|
||||
|
||||
- name: Move Runner
|
||||
run: mv performance/runner/target/release/runner performance/app
|
||||
|
||||
- name: Change Runner Permissions
|
||||
run: chmod +x ./performance/app
|
||||
|
||||
- name: '[DEBUG] ls baseline directory before run'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
# `${{ github.workspace }}` is used to pass the absolute path
|
||||
- name: Create directories
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/performance/tmp/
|
||||
mkdir -p performance/baselines/${{ needs.set-variables.outputs.release_id }}/
|
||||
|
||||
# Run modeling with taking 20 samples
|
||||
- name: Run Measurement
|
||||
run: |
|
||||
performance/app model -v ${{ needs.set-variables.outputs.release_id }} -b ${{ github.workspace }}/performance/baselines/ -p ${{ github.workspace }}/performance/projects/ -t ${{ github.workspace }}/performance/tmp/ -n 20
|
||||
|
||||
- name: '[DEBUG] ls baseline directory after run'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: baseline
|
||||
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}/
|
||||
|
||||
create-pr:
|
||||
name: Open PR for ${{ matrix.base-branch }}
|
||||
|
||||
# depends on `model` as a separate job so that the baseline can be committed to more than one branch
|
||||
# i.e. release branch and main
|
||||
needs: [set-variables, latest-runner, model]
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- base-branch: refs/heads/main
|
||||
target-branch: performance-bot/main_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
|
||||
- base-branch: refs/heads/${{ needs.set-variables.outputs.release_branch }}
|
||||
target-branch: performance-bot/release_${{ needs.set-variables.outputs.release_id }}_${{GITHUB.RUN_ID}}
|
||||
|
||||
steps:
|
||||
- name: '[DEBUG] print variables'
|
||||
run: |
|
||||
echo "all variables defined in set-variables"
|
||||
echo "cache_key: ${{ needs.set-variables.outputs.cache_key }}"
|
||||
echo "release_id: ${{ needs.set-variables.outputs.release_id }}"
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ matrix.base-branch }}
|
||||
|
||||
- name: Create PR branch
|
||||
run: |
|
||||
git checkout -b ${{ matrix.target-branch }}
|
||||
git push origin ${{ matrix.target-branch }}
|
||||
git branch --set-upstream-to=origin/${{ matrix.target-branch }} ${{ matrix.target-branch }}
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: baseline
|
||||
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}
|
||||
|
||||
- name: '[DEBUG] ls baselines after artifact download'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
- name: Commit baseline
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
add: 'performance/baselines/*'
|
||||
author_name: 'Github Build Bot'
|
||||
author_email: 'buildbot@fishtownanalytics.com'
|
||||
message: 'adding performance baseline for ${{ needs.set-variables.outputs.release_id }}'
|
||||
push: 'origin origin/${{ matrix.target-branch }}'
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
|
||||
base: ${{ matrix.base-branch }}
|
||||
branch: '${{ matrix.target-branch }}'
|
||||
title: 'Adding performance modeling for ${{needs.set-variables.outputs.release_id}} to ${{ matrix.base-branch }}'
|
||||
body: 'Committing perf results for tracking for the ${{needs.set-variables.outputs.release_id}}'
|
||||
labels: |
|
||||
Skip Changelog
|
||||
Performance
|
||||
4
.github/workflows/nightly-release.yml
vendored
4
.github/workflows/nightly-release.yml
vendored
@@ -39,14 +39,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.RELEASE_BRANCH }}
|
||||
|
||||
- name: "Get Current Version Number"
|
||||
id: version-number-sources
|
||||
run: |
|
||||
current_version=$(grep '^version = ' core/dbt/__version__.py | sed 's/version = "\(.*\)"/\1/')
|
||||
current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
|
||||
echo "current_version=$current_version" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Audit Version And Parse Into Parts"
|
||||
|
||||
47
.github/workflows/release.yml
vendored
47
.github/workflows/release.yml
vendored
@@ -72,15 +72,12 @@ defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
MIN_HATCH_VERSION: "1.11.0"
|
||||
|
||||
jobs:
|
||||
job-setup:
|
||||
name: Log Inputs
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
outputs:
|
||||
use_hatch: ${{ steps.use_hatch.outputs.use_hatch }}
|
||||
starting_sha: ${{ steps.set_sha.outputs.starting_sha }}
|
||||
steps:
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
@@ -91,29 +88,19 @@ jobs:
|
||||
echo Nightly release: ${{ inputs.nightly_release }}
|
||||
echo Only Docker: ${{ inputs.only_docker }}
|
||||
|
||||
# In version env.HATCH_VERSION we started to use hatch for build tooling. Before that we used setuptools.
|
||||
# This needs to check if we're using hatch or setuptools based on the version being released. We should
|
||||
# check if the version is greater than or equal to env.HATCH_VERSION. If it is, we use hatch, otherwise we use setuptools.
|
||||
- name: "Check if using hatch"
|
||||
id: use_hatch
|
||||
run: |
|
||||
# Extract major.minor from versions like 1.11.0a1 -> 1.11
|
||||
INPUT_MAJ_MIN=$(echo "${{ inputs.version_number }}" | sed -E 's/^([0-9]+\.[0-9]+).*/\1/')
|
||||
HATCH_MAJ_MIN=$(echo "${{ env.MIN_HATCH_VERSION }}" | sed -E 's/^([0-9]+\.[0-9]+).*/\1/')
|
||||
- name: "Checkout target branch"
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.target_branch }}
|
||||
|
||||
if [ $(echo "$INPUT_MAJ_MIN >= $HATCH_MAJ_MIN" | bc) -eq 1 ]; then
|
||||
echo "use_hatch=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "use_hatch=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: "Notify if using hatch"
|
||||
# release-prep.yml really shouldn't take in the sha but since core + all adapters
|
||||
# depend on it now this workaround lets us not input it manually with risk of error.
|
||||
# The changes always get merged into the head so we can't use a specific commit for
|
||||
# releases anyways.
|
||||
- name: "Capture sha"
|
||||
id: set_sha
|
||||
run: |
|
||||
if [ ${{ steps.use_hatch.outputs.use_hatch }} = "true" ]; then
|
||||
echo "::notice title="Using Hatch": $title::Using Hatch for release"
|
||||
else
|
||||
echo "::notice title="Using Setuptools": $title::Using Setuptools for release"
|
||||
fi
|
||||
echo "starting_sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
bump-version-generate-changelog:
|
||||
name: Bump package version, Generate changelog
|
||||
@@ -123,13 +110,12 @@ jobs:
|
||||
uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
|
||||
|
||||
with:
|
||||
sha: ${{ needs.job-setup.outputs.starting_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
hatch_directory: "core"
|
||||
target_branch: ${{ inputs.target_branch }}
|
||||
env_setup_script_path: "scripts/env-setup.sh"
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
use_hatch: ${{ needs.job-setup.outputs.use_hatch == 'true' }} # workflow outputs are strings...
|
||||
|
||||
secrets: inherit
|
||||
|
||||
@@ -157,13 +143,16 @@ jobs:
|
||||
with:
|
||||
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
hatch_directory: "core"
|
||||
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
build_script_path: "scripts/build-dist.sh"
|
||||
s3_bucket_name: "core-team-artifacts"
|
||||
package_test_command: "dbt --version"
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
use_hatch: ${{ needs.job-setup.outputs.use_hatch == 'true' }} # workflow outputs are strings...
|
||||
|
||||
secrets:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
github-release:
|
||||
name: GitHub Release
|
||||
|
||||
30
.github/workflows/schema-check.yml
vendored
30
.github/workflows/schema-check.yml
vendored
@@ -22,7 +22,7 @@ on:
|
||||
target_branch:
|
||||
description: "The branch to check against"
|
||||
type: string
|
||||
default: "main"
|
||||
default: 'main'
|
||||
required: true
|
||||
|
||||
# no special access is needed
|
||||
@@ -41,19 +41,19 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
python-version: 3.9
|
||||
|
||||
- name: Checkout dbt repo
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.DBT_REPO_DIRECTORY }}
|
||||
ref: ${{ inputs.target_branch }}
|
||||
|
||||
- name: Check for changes in core/dbt/artifacts
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # dorny/paths-filter@v3
|
||||
uses: dorny/paths-filter@v3
|
||||
id: check_artifact_changes
|
||||
with:
|
||||
filters: |
|
||||
@@ -69,19 +69,21 @@ jobs:
|
||||
|
||||
- name: Checkout schemas.getdbt.com repo
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: dbt-labs/schemas.getdbt.com
|
||||
ref: "main"
|
||||
ref: 'main'
|
||||
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
|
||||
- name: Generate current schema
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
|
||||
run: |
|
||||
cd ${{ env.DBT_REPO_DIRECTORY }}/core
|
||||
pip install --upgrade pip hatch
|
||||
hatch run setup
|
||||
hatch run json-schema -- --path ${{ env.LATEST_SCHEMA_PATH }}
|
||||
cd ${{ env.DBT_REPO_DIRECTORY }}
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
python scripts/collect-artifact-schema.py --path ${{ env.LATEST_SCHEMA_PATH }}
|
||||
|
||||
# Copy generated schema files into the schemas.getdbt.com repo
|
||||
# Do a git diff to find any changes
|
||||
@@ -94,8 +96,8 @@ jobs:
|
||||
git diff -I='*[0-9]{4}-[0-9]{2}-[0-9]{2}' -I='*[0-9]+\.[0-9]+\.[0-9]+' --exit-code > ${{ env.SCHEMA_DIFF_ARTIFACT }}
|
||||
|
||||
- name: Upload schema diff
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ failure() && steps.check_artifact_changes.outputs.artifacts_changed == 'true' }}
|
||||
with:
|
||||
name: "schema_changes.txt"
|
||||
path: "${{ env.SCHEMA_DIFF_ARTIFACT }}"
|
||||
name: 'schema_changes.txt'
|
||||
path: '${{ env.SCHEMA_DIFF_ARTIFACT }}'
|
||||
|
||||
@@ -14,8 +14,6 @@ on:
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
pull_request:
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: read-all
|
||||
@@ -28,7 +26,7 @@ env:
|
||||
jobs:
|
||||
integration-metadata:
|
||||
name: integration test metadata generation
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
outputs:
|
||||
split-groups: ${{ steps.generate-split-groups.outputs.split-groups }}
|
||||
|
||||
@@ -47,7 +45,7 @@ jobs:
|
||||
# run the performance measurements on the current or default branch
|
||||
test-schema:
|
||||
name: Test Log Schema
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
timeout-minutes: 30
|
||||
needs:
|
||||
- integration-metadata
|
||||
@@ -89,25 +87,25 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: checkout dev
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install python dependencies
|
||||
run: |
|
||||
pip install --user --upgrade pip
|
||||
pip --version
|
||||
pip install hatch
|
||||
hatch --version
|
||||
pip install tox
|
||||
tox --version
|
||||
|
||||
- name: Run postgres setup script
|
||||
run: |
|
||||
./scripts/setup_db.sh
|
||||
./test/setup_db.sh
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
@@ -119,17 +117,17 @@ jobs:
|
||||
# integration tests generate a ton of logs in different files. the next step will find them all.
|
||||
# we actually care if these pass, because the normal test run doesn't usually include many json log outputs
|
||||
- name: Run integration tests
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 30
|
||||
max_attempts: 3
|
||||
command: cd core && hatch run ci:integration-tests -- -nauto
|
||||
command: tox -e integration -- -nauto
|
||||
env:
|
||||
PYTEST_ADDOPTS: ${{ format('--splits {0} --group {1}', env.PYTHON_INTEGRATION_TEST_WORKERS, matrix.split-group) }}
|
||||
|
||||
test-schema-report:
|
||||
name: Log Schema Test Suite
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ vars.UBUNTU_LATEST }}
|
||||
needs: test-schema
|
||||
steps:
|
||||
- name: "[Notification] Log test suite passes"
|
||||
|
||||
46
.github/workflows/test-repeater.yml
vendored
46
.github/workflows/test-repeater.yml
vendored
@@ -14,33 +14,34 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
description: "Branch to check out"
|
||||
description: 'Branch to check out'
|
||||
type: string
|
||||
required: true
|
||||
default: "main"
|
||||
default: 'main'
|
||||
test_path:
|
||||
description: "Path to single test to run (ex: tests/functional/retry/test_retry.py::TestRetry::test_fail_fast)"
|
||||
description: 'Path to single test to run (ex: tests/functional/retry/test_retry.py::TestRetry::test_fail_fast)'
|
||||
type: string
|
||||
required: true
|
||||
default: "tests/functional/..."
|
||||
default: 'tests/functional/...'
|
||||
python_version:
|
||||
description: "Version of Python to Test Against"
|
||||
description: 'Version of Python to Test Against'
|
||||
type: choice
|
||||
options:
|
||||
- "3.10"
|
||||
- "3.11"
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
os:
|
||||
description: "OS to run test in"
|
||||
description: 'OS to run test in'
|
||||
type: choice
|
||||
options:
|
||||
- "ubuntu-latest"
|
||||
- "macos-14"
|
||||
- "windows-latest"
|
||||
- 'ubuntu-latest'
|
||||
- 'macos-14'
|
||||
- 'windows-latest'
|
||||
num_runs_per_batch:
|
||||
description: "Max number of times to run the test per batch. We always run 10 batches."
|
||||
description: 'Max number of times to run the test per batch. We always run 10 batches.'
|
||||
type: number
|
||||
required: true
|
||||
default: "50"
|
||||
default: '50'
|
||||
|
||||
permissions: read-all
|
||||
|
||||
@@ -81,37 +82,30 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: "Setup Python"
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "${{ inputs.python_version }}"
|
||||
|
||||
- name: "Install hatch"
|
||||
run: python -m pip install --user --upgrade pip hatch
|
||||
|
||||
- name: "Setup Dev Environment"
|
||||
run: |
|
||||
cd core
|
||||
hatch run setup
|
||||
run: make dev
|
||||
|
||||
- name: "Set up postgres (linux)"
|
||||
if: inputs.os == '${{ vars.UBUNTU_LATEST }}'
|
||||
run: |
|
||||
cd core
|
||||
hatch run setup-db
|
||||
run: make setup-db
|
||||
|
||||
# mac and windows don't use make due to limitations with docker with those runners in GitHub
|
||||
- name: Set up postgres (macos)
|
||||
if: runner.os == 'macOS'
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 3
|
||||
command: ./scripts/setup_db.sh
|
||||
command: ./test/setup_db.sh
|
||||
|
||||
- name: "Set up postgres (windows)"
|
||||
if: inputs.os == 'windows-latest'
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,7 +15,6 @@ build/
|
||||
!core/dbt/docs/build
|
||||
develop-eggs/
|
||||
dist/
|
||||
dist-*/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
@@ -96,7 +95,6 @@ target/
|
||||
# pycharm
|
||||
.idea/
|
||||
venv/
|
||||
.venv*/
|
||||
|
||||
# AWS credentials
|
||||
.aws/
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
|
||||
exclude: ^(core/dbt/docs/build/|core/dbt/common/events/types_pb2.py|core/dbt/adapters/events/adapter_types_pb2.py)
|
||||
|
||||
# Force all unspecified python hooks to run python 3.10
|
||||
# Force all unspecified python hooks to run python 3.9
|
||||
default_language_version:
|
||||
python: python3
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
@@ -20,72 +20,52 @@ repos:
|
||||
exclude_types:
|
||||
- "markdown"
|
||||
- id: check-case-conflict
|
||||
# local hooks are used to run the hooks in the local environment instead of a pre-commit isolated one.
|
||||
# This ensures that the hooks are run with the same version of the dependencies as the local environment
|
||||
# without having to manually keep them in sync.
|
||||
- repo: local
|
||||
- repo: https://github.com/pycqa/isort
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
# Formatter/linter/type-checker pins live in the pyproject.dev optional dependency.
|
||||
- id: isort
|
||||
name: isort
|
||||
entry: python -m isort
|
||||
language: system
|
||||
types: [python]
|
||||
- repo: https://github.com/psf/black
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 24.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
name: black
|
||||
entry: python -m black
|
||||
language: system
|
||||
types: [python]
|
||||
- id: black-check
|
||||
name: black-check
|
||||
entry: python -m black
|
||||
- id: black
|
||||
alias: black-check
|
||||
stages: [manual]
|
||||
args:
|
||||
- "--check"
|
||||
- "--diff"
|
||||
language: system
|
||||
stages: [manual]
|
||||
types: [python]
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: 4.0.1
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: flake8
|
||||
entry: python -m flake8
|
||||
language: system
|
||||
types: [python]
|
||||
- id: flake8-check
|
||||
name: flake8-check
|
||||
entry: python -m flake8
|
||||
language: system
|
||||
- id: flake8
|
||||
alias: flake8-check
|
||||
stages: [manual]
|
||||
types: [python]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
# rev must match what's in dev-requirements.txt
|
||||
rev: v1.4.1
|
||||
hooks:
|
||||
- id: mypy
|
||||
# N.B.: Mypy is... a bit fragile.
|
||||
#
|
||||
# By using `language: system` we run this hook in the local
|
||||
# environment instead of a pre-commit isolated one. This is needed
|
||||
# to ensure mypy correctly parses the project.
|
||||
#
|
||||
|
||||
# It may cause trouble
|
||||
# in that it adds environmental variables out of our control to the
|
||||
# mix. Unfortunately, there's nothing we can do about per pre-commit's
|
||||
# author.
|
||||
# See https://github.com/pre-commit/pre-commit/issues/730 for details.
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: python -m mypy
|
||||
args: [--show-error-codes]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
types: [python]
|
||||
- id: mypy-check
|
||||
name: mypy-check
|
||||
entry: python -m mypy
|
||||
- id: mypy
|
||||
alias: mypy-check
|
||||
stages: [manual]
|
||||
args: [--show-error-codes, --pretty]
|
||||
files: ^core/dbt/
|
||||
language: system
|
||||
stages: [manual]
|
||||
types: [python]
|
||||
- id: no_versioned_artifact_resource_imports
|
||||
name: no_versioned_artifact_resource_imports
|
||||
entry: python scripts/pre-commit-hooks/no_versioned_artifact_resource_imports.py
|
||||
language: system
|
||||
files: ^core/dbt/
|
||||
types: [python]
|
||||
pass_filenames: true
|
||||
|
||||
@@ -6,6 +6,7 @@ Most of the python code in the repository is within the `core/dbt` directory.
|
||||
- [`single python files`](core/dbt/README.md): A number of individual files, such as 'compilation.py' and 'exceptions.py'
|
||||
|
||||
The main subdirectories of core/dbt:
|
||||
- [`adapters`](core/dbt/adapters/README.md): Define base classes for behavior that is likely to differ across databases
|
||||
- [`clients`](core/dbt/clients/README.md): Interface with dependencies (agate, jinja) or across operating systems
|
||||
- [`config`](core/dbt/config/README.md): Reconcile user-supplied configuration from connection profiles, project files, and Jinja macros
|
||||
- [`context`](core/dbt/context/README.md): Build and expose dbt-specific Jinja functionality
|
||||
@@ -17,6 +18,10 @@ The main subdirectories of core/dbt:
|
||||
- [`parser`](core/dbt/parser/README.md): Read project files, validate, construct python objects
|
||||
- [`task`](core/dbt/task/README.md): Set forth the actions that dbt can perform when invoked
|
||||
|
||||
Legacy tests are found in the 'test' directory:
|
||||
- [`unit tests`](core/dbt/test/unit/README.md): Unit tests
|
||||
- [`integration tests`](core/dbt/test/integration/README.md): Integration tests
|
||||
|
||||
### Invoking dbt
|
||||
|
||||
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
|
||||
@@ -35,15 +40,16 @@ Each adapter plugin is a standalone python package that includes:
|
||||
|
||||
- `dbt/include/[name]`: A "sub-global" dbt project, of YAML and SQL files, that reimplements Jinja macros to use the adapter's supported SQL syntax
|
||||
- `dbt/adapters/[name]`: Python modules that inherit, and optionally reimplement, the base adapter classes defined in dbt-core
|
||||
- `pyproject.toml`
|
||||
- `setup.py`
|
||||
|
||||
The Postgres adapter code is the most central, and many of its implementations are used as the default defined in the dbt-core global project. The greater the distance of a data technology from Postgres, the more its adapter plugin may need to reimplement.
|
||||
|
||||
## Testing dbt
|
||||
|
||||
The [`tests/`](tests/) subdirectory includes unit and fuctional tests that run as continuous integration checks against open pull requests. Unit tests check mock inputs and outputs of specific python functions. Functional tests perform end-to-end dbt invocations against real adapters (Postgres) and assert that the results match expectations. See [the contributing guide](CONTRIBUTING.md) for a step-by-step walkthrough of setting up a local development and testing environment.
|
||||
The [`test/`](test/) subdirectory includes unit and integration tests that run as continuous integration checks against open pull requests. Unit tests check mock inputs and outputs of specific python functions. Integration tests perform end-to-end dbt invocations against real adapters (Postgres, Redshift, Snowflake, BigQuery) and assert that the results match expectations. See [the contributing guide](CONTRIBUTING.md) for a step-by-step walkthrough of setting up a local development and testing environment.
|
||||
|
||||
## Everything else
|
||||
|
||||
- [docker](docker/): All dbt versions are published as Docker images on DockerHub. This subfolder contains the `Dockerfile` (constant) and `requirements.txt` (one for each version).
|
||||
- [etc](etc/): Images for README
|
||||
- [scripts](scripts/): Helper scripts for testing, releasing, and producing JSON schemas. These are not included in distributions of dbt, nor are they rigorously tested—they're just handy tools for the dbt maintainers :)
|
||||
|
||||
150
CHANGELOG.md
150
CHANGELOG.md
@@ -5,12 +5,158 @@
|
||||
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
|
||||
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
|
||||
|
||||
## dbt-core 1.10.2 - June 20, 2025
|
||||
|
||||
### Features
|
||||
|
||||
- Update jsonschemas with builtin data test properties and exposure configs in dbt_project.yml for more accurate deprecations ([#11335](https://github.com/dbt-labs/dbt-core/issues/11335))
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Allow for either pydantic v1 and v2 ([#11634](https://github.com/dbt-labs/dbt-core/issues/11634))
|
||||
|
||||
|
||||
|
||||
## dbt-core 1.10.1 - June 16, 2025
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Bump minimum jsonschema version to `4.19.1` ([#11740](https://github.com/dbt-labs/dbt-core/issues/11740))
|
||||
|
||||
## dbt-core 1.10.0 - June 16, 2025
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- Add invocations_started_at field to artifact metadata ([#11272](https://github.com/dbt-labs/dbt-core/issues/11272))
|
||||
- Flip behavior flag `source-freshness-run-project-hooks` to true ([#11609](https://github.com/dbt-labs/dbt-core/issues/11609))
|
||||
- Flip behavior flag to disallow spaces in resource names ([#11610](https://github.com/dbt-labs/dbt-core/issues/11610))
|
||||
|
||||
### Features
|
||||
|
||||
- Add new hard_deletes="new_record" mode for snapshots. ([#10235](https://github.com/dbt-labs/dbt-core/issues/10235))
|
||||
- Add `batch` context object to model jinja context ([#11025](https://github.com/dbt-labs/dbt-core/issues/11025))
|
||||
- Ensure pre/post hooks only run on first/last batch respectively for microbatch model batches ([#11094](https://github.com/dbt-labs/dbt-core/issues/11094), [#11104](https://github.com/dbt-labs/dbt-core/issues/11104))
|
||||
- Support "tags" in Saved Queries ([#11155](https://github.com/dbt-labs/dbt-core/issues/11155))
|
||||
- Calculate source freshness via a SQL query ([#8797](https://github.com/dbt-labs/dbt-core/issues/8797))
|
||||
- Add freshness definition on model for adaptive job ([#11123](https://github.com/dbt-labs/dbt-core/issues/11123))
|
||||
- Meta config for dimensions measures and entities ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- Add doc_blocks to manifest for nodes and columns ([#11000](https://github.com/dbt-labs/dbt-core/issues/11000), [#11001](https://github.com/dbt-labs/dbt-core/issues/11001))
|
||||
- Initial implementation of sample mode ([#11227](https://github.com/dbt-labs/dbt-core/issues/11227), [#11230](https://github.com/dbt-labs/dbt-core/issues/11230), [#11231](https://github.com/dbt-labs/dbt-core/issues/11231), [#11248](https://github.com/dbt-labs/dbt-core/issues/11248), [#11252](https://github.com/dbt-labs/dbt-core/issues/11252), [#11254](https://github.com/dbt-labs/dbt-core/issues/11254), [#11258](https://github.com/dbt-labs/dbt-core/issues/11258))
|
||||
- Combine `--sample` and `--sample-window` CLI params ([#11299](https://github.com/dbt-labs/dbt-core/issues/11299))
|
||||
- Allow for sampling of ref'd seeds ([#11300](https://github.com/dbt-labs/dbt-core/issues/11300))
|
||||
- Enable sample mode for 'build' command ([#11298](https://github.com/dbt-labs/dbt-core/issues/11298))
|
||||
- Allow sampling nodes snapshots depend on and of snapshots as a dependency ([#11301](https://github.com/dbt-labs/dbt-core/issues/11301))
|
||||
- Add opt-in validation of macro argument names and types ([#11274](https://github.com/dbt-labs/dbt-core/issues/11274))
|
||||
- Add support for Python 3.13! ([#11401](https://github.com/dbt-labs/dbt-core/issues/11401))
|
||||
- Support artifact upload to dbt Cloud ([#11418](https://github.com/dbt-labs/dbt-core/issues/11418))
|
||||
- Show summaries for deprecations and add ability to toggle seeing all deprecation violation instances ([#11429](https://github.com/dbt-labs/dbt-core/issues/11429))
|
||||
- Add behavior flag for handling all warnings via warn_error logic ([#11116](https://github.com/dbt-labs/dbt-core/issues/11116))
|
||||
- Basic jsonschema validation of `dbt_project.yml` ([#11503](https://github.com/dbt-labs/dbt-core/issues/11503))
|
||||
- Begin checking YAML files for duplicate keys ([#11296](https://github.com/dbt-labs/dbt-core/issues/11296))
|
||||
- Add deprecation warnings for unexpected blocks in jinja. ([#11393](https://github.com/dbt-labs/dbt-core/issues/11393))
|
||||
- Begin validating the jsonschema of resource YAML files ([#11504](https://github.com/dbt-labs/dbt-core/issues/11504))
|
||||
- Add deprecation warning for custom top level keys in YAML files. ([#11338](https://github.com/dbt-labs/dbt-core/issues/11338))
|
||||
- Begin emitting deprecationw warnings for custom keys in config blocks ([#11337](https://github.com/dbt-labs/dbt-core/issues/11337))
|
||||
- Begin emitting deprecation events for custom properties found in objects ([#11336](https://github.com/dbt-labs/dbt-core/issues/11336))
|
||||
- Create a singular deprecations summary event ([#11536](https://github.com/dbt-labs/dbt-core/issues/11536))
|
||||
- Deprecate --output/-o usage in source freshness ([#11559](https://github.com/dbt-labs/dbt-core/issues/11559))
|
||||
- Deprecate usage of `include`/`exclude` terminology with warn-error-options ([#11557](https://github.com/dbt-labs/dbt-core/issues/11557))
|
||||
- Support description and config.meta on groups ([#11319](https://github.com/dbt-labs/dbt-core/issues/11319))
|
||||
- Adding Quoting to manifest metadata ([#na](https://github.com/dbt-labs/dbt-core/issues/na))
|
||||
- Include event names in deprecation warning messages ([#11471](https://github.com/dbt-labs/dbt-core/issues/11471))
|
||||
- Support config on columns ([#11651](https://github.com/dbt-labs/dbt-core/issues/11651))
|
||||
- Add file_format to catalog integration config ([#11695](https://github.com/dbt-labs/dbt-core/issues/11695))
|
||||
- 11561 ([#deprecate](https://github.com/dbt-labs/dbt-core/issues/deprecate), [#--models,--model,](https://github.com/dbt-labs/dbt-core/issues/--models,--model,), [#and](https://github.com/dbt-labs/dbt-core/issues/and), [#-m](https://github.com/dbt-labs/dbt-core/issues/-m), [#flags](https://github.com/dbt-labs/dbt-core/issues/flags))
|
||||
|
||||
### Fixes
|
||||
|
||||
- datetime.datetime.utcnow() is deprecated as of Python 3.12 ([#9791](https://github.com/dbt-labs/dbt-core/issues/9791))
|
||||
- dbt retry does not respect --threads ([#10584](https://github.com/dbt-labs/dbt-core/issues/10584))
|
||||
- update adapter version messages ([#10230](https://github.com/dbt-labs/dbt-core/issues/10230))
|
||||
- Catch DbtRuntimeError for hooks ([#11012](https://github.com/dbt-labs/dbt-core/issues/11012))
|
||||
- Access DBUG flag more consistently with the rest of the codebase in ManifestLoader ([#11068](https://github.com/dbt-labs/dbt-core/issues/11068))
|
||||
- Improve the performance characteristics of add_test_edges() ([#10950](https://github.com/dbt-labs/dbt-core/issues/10950))
|
||||
- Implement partial parsing for singular data test configs in yaml files ([#10801](https://github.com/dbt-labs/dbt-core/issues/10801))
|
||||
- Fix debug log messages for microbatch batch execution information ([#11111](https://github.com/dbt-labs/dbt-core/issues/11111))
|
||||
- Fix running of extra "last" batch when there is only one batch ([#11112](https://github.com/dbt-labs/dbt-core/issues/11112))
|
||||
- Fix interpretation of `PartialSuccess` to result in non-zero exit code ([#11114](https://github.com/dbt-labs/dbt-core/issues/11114))
|
||||
- Warn about invalid usages of `concurrent_batches` config ([#11122](https://github.com/dbt-labs/dbt-core/issues/11122))
|
||||
- Error writing generic test at run time ([#11110](https://github.com/dbt-labs/dbt-core/issues/11110))
|
||||
- Run check_modified_contract for state:modified ([#11034](https://github.com/dbt-labs/dbt-core/issues/11034))
|
||||
- Fix unrendered_config for tests from dbt_project.yml ([#11146](https://github.com/dbt-labs/dbt-core/issues/11146))
|
||||
- Make partial parsing reparse referencing nodes of newly versioned models. ([#8872](https://github.com/dbt-labs/dbt-core/issues/8872))
|
||||
- Ensure warning about microbatch lacking filter inputs is always fired ([#11159](https://github.com/dbt-labs/dbt-core/issues/11159))
|
||||
- Fix microbatch dbt list --output json ([#10556](https://github.com/dbt-labs/dbt-core/issues/10556), [#11098](https://github.com/dbt-labs/dbt-core/issues/11098))
|
||||
- Fix for custom fields in generic test config for not_null and unique tests ([#11208](https://github.com/dbt-labs/dbt-core/issues/11208))
|
||||
- Allow copying asset when dbt docs command is run outside the dbt project ([#9308](https://github.com/dbt-labs/dbt-core/issues/9308))
|
||||
- Loosen validation on freshness to accomodate previously wrong but harmless config. ([#11123](https://github.com/dbt-labs/dbt-core/issues/11123))
|
||||
- Handle `--limit -1` properly in `ShowTaskDirect` so that it propagates None instead of a negative int ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- _get_doc_blocks is crashing parsing if .format is called ([#11310](https://github.com/dbt-labs/dbt-core/issues/11310))
|
||||
- Fix microbatch execution to not block main thread nor hang ([#11243](https://github.com/dbt-labs/dbt-core/issues/11243), [#11306](https://github.com/dbt-labs/dbt-core/issues/11306))
|
||||
- Fixes parsing errors when using the new YAML format for snapshots ([#11164](https://github.com/dbt-labs/dbt-core/issues/11164))
|
||||
- Update ConfigFolderDirectory dir to use str. ([#9768](https://github.com/dbt-labs/dbt-core/issues/9768), [#11305](https://github.com/dbt-labs/dbt-core/issues/11305))
|
||||
- Fix microbatch models couting as success when only having one batch (and that batch failing) ([#11390](https://github.com/dbt-labs/dbt-core/issues/11390))
|
||||
- Add pre-commit installation to Docker container for testing compatibility ([#11498](https://github.com/dbt-labs/dbt-core/issues/11498))
|
||||
- Fix duplicate macro error message with multiple macros and multiple patches ([#4233](https://github.com/dbt-labs/dbt-core/issues/4233))
|
||||
- Fix seed path for partial parsing if project directory name changes ([#11550](https://github.com/dbt-labs/dbt-core/issues/11550))
|
||||
- Add `pre-commit` installation to Docker container for testing compatibility ([#11498](https://github.com/dbt-labs/dbt-core/issues/11498))
|
||||
- Ensure the right key is associatd with the `CustomKeyInConfigDeprecation` deprecation ([#11576](https://github.com/dbt-labs/dbt-core/issues/11576))
|
||||
- Add tags and meta config to exposures ([#11428](https://github.com/dbt-labs/dbt-core/issues/11428))
|
||||
- Add freshness config to sources ([#11506](https://github.com/dbt-labs/dbt-core/issues/11506))
|
||||
- Add freshness config to models ([#11506](https://github.com/dbt-labs/dbt-core/issues/11506))
|
||||
- require count and period on model freshness.build_after ([#11669](https://github.com/dbt-labs/dbt-core/issues/11669))
|
||||
- Don't warn for metricflow_time_spine with non-day grain ([#11690](https://github.com/dbt-labs/dbt-core/issues/11690))
|
||||
- Fix source freshness set via config to handle explicit nulls ([#11685](https://github.com/dbt-labs/dbt-core/issues/11685))
|
||||
- Ensure build_after is present in model freshness in parsing, otherwise skip freshness definition ([#11709](https://github.com/dbt-labs/dbt-core/issues/11709))
|
||||
- Ensure source node `.freshness` is equal to node's `.config.freshness` ([#11717](https://github.com/dbt-labs/dbt-core/issues/11717))
|
||||
- ignore invalid model freshness configs in inline model configs ([#11728](https://github.com/dbt-labs/dbt-core/issues/11728))
|
||||
- Fix store_failures hierarachical config parsing ([#10165](https://github.com/dbt-labs/dbt-core/issues/10165))
|
||||
- Remove model freshness property support in favor of config level support ([#11713](https://github.com/dbt-labs/dbt-core/issues/11713))
|
||||
|
||||
### Under the Hood
|
||||
|
||||
- Create a no-op exposure runner ([#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/))
|
||||
- Improve selection peformance by optimizing the select_children() and select_parents() functions. ([#11099](https://github.com/dbt-labs/dbt-core/issues/11099))
|
||||
- Change exception type from DbtInternalException to UndefinedMacroError when macro not found in 'run operation' command ([#11192](https://github.com/dbt-labs/dbt-core/issues/11192))
|
||||
- Create LogNodeResult event ([#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/))
|
||||
- Fix error counts for exposures ([#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/))
|
||||
- Misc fixes for group info in logging ([#11218](https://github.com/dbt-labs/dbt-core/issues/11218))
|
||||
- Add node_checksum to node_info on structured logs ([#11372](https://github.com/dbt-labs/dbt-core/issues/11372))
|
||||
- Parse catalogs.yml ([#XPLAT-242](https://github.com/dbt-labs/dbt-core/issues/XPLAT-242))
|
||||
- Add package 'name' to lock file ([#11487](https://github.com/dbt-labs/dbt-core/issues/11487))
|
||||
- Allow for deprecation previews ([#11597](https://github.com/dbt-labs/dbt-core/issues/11597))
|
||||
- Move core_types.proto into shared dbt-protos library ([#11608](https://github.com/dbt-labs/dbt-core/issues/11608))
|
||||
- Prevent overcounting PropertyMovedToConfigDeprecation for source freshness ([#11660](https://github.com/dbt-labs/dbt-core/issues/11660))
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Upgrading dbt-semantic-interfaces to 0.8.3 for custom grain support in offset windows ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- Bump codecov/codecov-action from 4 to 5 ([#11009](https://github.com/dbt-labs/dbt-core/issues/11009))
|
||||
|
||||
### Contributors
|
||||
- [@DevonFulcher](https://github.com/DevonFulcher) ([#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- [@Threynaud](https://github.com/Threynaud) ([#11068](https://github.com/dbt-labs/dbt-core/issues/11068))
|
||||
- [@WilliamDee](https://github.com/WilliamDee) ([#None](https://github.com/dbt-labs/dbt-core/issues/None), [#None](https://github.com/dbt-labs/dbt-core/issues/None))
|
||||
- [@amardatar](https://github.com/amardatar) ([#11164](https://github.com/dbt-labs/dbt-core/issues/11164))
|
||||
- [@aranke](https://github.com/aranke) ([#11000](https://github.com/dbt-labs/dbt-core/issues/11000), [#11001](https://github.com/dbt-labs/dbt-core/issues/11001), [#11012](https://github.com/dbt-labs/dbt-core/issues/11012), [#11310](https://github.com/dbt-labs/dbt-core/issues/11310), [#11550](https://github.com/dbt-labs/dbt-core/issues/11550), [#11428](https://github.com/dbt-labs/dbt-core/issues/11428), [#11506](https://github.com/dbt-labs/dbt-core/issues/11506), [#11506](https://github.com/dbt-labs/dbt-core/issues/11506), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#](https://github.com/dbt-labs/dbt-core/issues/), [#11218](https://github.com/dbt-labs/dbt-core/issues/11218), [#XPLAT-242](https://github.com/dbt-labs/dbt-core/issues/XPLAT-242), [#11660](https://github.com/dbt-labs/dbt-core/issues/11660))
|
||||
- [@cedric-orange](https://github.com/cedric-orange) ([#9308](https://github.com/dbt-labs/dbt-core/issues/9308))
|
||||
- [@cmcarthur](https://github.com/cmcarthur) ([#11608](https://github.com/dbt-labs/dbt-core/issues/11608))
|
||||
- [@courtneyholcomb](https://github.com/courtneyholcomb) ([#11690](https://github.com/dbt-labs/dbt-core/issues/11690))
|
||||
- [@d-cole](https://github.com/d-cole) ([#8872](https://github.com/dbt-labs/dbt-core/issues/8872))
|
||||
- [@dave-connors-3](https://github.com/dave-connors-3) ([#10230](https://github.com/dbt-labs/dbt-core/issues/10230))
|
||||
- [@donjin-master](https://github.com/donjin-master) ([#10584](https://github.com/dbt-labs/dbt-core/issues/10584))
|
||||
- [@internetcoffeephone](https://github.com/internetcoffeephone) ([#10556](https://github.com/dbt-labs/dbt-core/issues/10556), [#11098](https://github.com/dbt-labs/dbt-core/issues/11098))
|
||||
- [@kato1208](https://github.com/kato1208) ([#11498](https://github.com/dbt-labs/dbt-core/issues/11498), [#11498](https://github.com/dbt-labs/dbt-core/issues/11498))
|
||||
- [@slothkong](https://github.com/slothkong) ([#9791](https://github.com/dbt-labs/dbt-core/issues/9791))
|
||||
- [@theyostalservice](https://github.com/theyostalservice) ([#11155](https://github.com/dbt-labs/dbt-core/issues/11155))
|
||||
- [@thorn14](https://github.com/thorn14) ([#9768](https://github.com/dbt-labs/dbt-core/issues/9768), [#11305](https://github.com/dbt-labs/dbt-core/issues/11305))
|
||||
- [@venkaa28](https://github.com/venkaa28) ([#na](https://github.com/dbt-labs/dbt-core/issues/na))
|
||||
|
||||
## Previous Releases
|
||||
|
||||
For information on prior major and minor releases, see their changelogs:
|
||||
|
||||
* [1.11](https://github.com/dbt-labs/dbt-core/blob/1.11.latest/CHANGELOG.md)
|
||||
* [1.10](https://github.com/dbt-labs/dbt-core/blob/1.10.latest/CHANGELOG.md)
|
||||
|
||||
* [1.9](https://github.com/dbt-labs/dbt-core/blob/1.9.latest/CHANGELOG.md)
|
||||
* [1.8](https://github.com/dbt-labs/dbt-core/blob/1.8.latest/CHANGELOG.md)
|
||||
* [1.7](https://github.com/dbt-labs/dbt-core/blob/1.7.latest/CHANGELOG.md)
|
||||
|
||||
157
CONTRIBUTING.md
157
CONTRIBUTING.md
@@ -2,39 +2,21 @@
|
||||
|
||||
`dbt-core` is open source software. It is what it is today because community members have opened issues, provided feedback, and [contributed to the knowledge loop](https://www.getdbt.com/dbt-labs/values/). Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.
|
||||
|
||||
- [Contributing to `dbt-core`](#contributing-to-dbt-core)
|
||||
- [About this document](#about-this-document)
|
||||
- [Notes](#notes)
|
||||
- [Getting the code](#getting-the-code)
|
||||
- [Installing git](#installing-git)
|
||||
- [External contributors](#external-contributors)
|
||||
- [dbt Labs contributors](#dbt-labs-contributors)
|
||||
- [Setting up an environment](#setting-up-an-environment)
|
||||
- [Tools](#tools)
|
||||
- [Virtual environments](#virtual-environments)
|
||||
- [Docker and `docker-compose`](#docker-and-docker-compose)
|
||||
- [Postgres (optional)](#postgres-optional)
|
||||
- [Running `dbt-core` in development](#running-dbt-core-in-development)
|
||||
- [Installation](#installation)
|
||||
- [Running `dbt-core`](#running-dbt-core)
|
||||
- [Testing](#testing)
|
||||
- [Initial setup](#initial-setup)
|
||||
- [Test commands](#test-commands)
|
||||
- [Hatch scripts](#hatch-scripts)
|
||||
- [`pre-commit`](#pre-commit)
|
||||
- [`pytest`](#pytest)
|
||||
- [Unit, Integration, Functional?](#unit-integration-functional)
|
||||
- [Debugging](#debugging)
|
||||
- [Assorted development tips](#assorted-development-tips)
|
||||
- [Adding or modifying a CHANGELOG Entry](#adding-or-modifying-a-changelog-entry)
|
||||
- [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
- [Troubleshooting Tips](#troubleshooting-tips)
|
||||
1. [About this document](#about-this-document)
|
||||
2. [Getting the code](#getting-the-code)
|
||||
3. [Setting up an environment](#setting-up-an-environment)
|
||||
4. [Running dbt-core in development](#running-dbt-core-in-development)
|
||||
5. [Testing dbt-core](#testing)
|
||||
6. [Debugging](#debugging)
|
||||
7. [Adding or modifying a changelog entry](#adding-or-modifying-a-changelog-entry)
|
||||
8. [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
9. [Troubleshooting Tips](#troubleshooting-tips)
|
||||
|
||||
## About this document
|
||||
|
||||
There are many ways to contribute to the ongoing development of `dbt-core`, such as by participating in discussions and issues. We encourage you to first read our higher-level document: ["Expectations for Open Source Contributors"](https://docs.getdbt.com/docs/contributing/oss-expectations).
|
||||
|
||||
The rest of this document serves as a more granular guide for contributing code changes to `dbt-core` (this repository). It is not intended as a guide for using `dbt-core`, and some pieces assume a level of familiarity with Python development and package managers. Specific code snippets in this guide assume you are using macOS or Linux and are comfortable with the command line.
|
||||
The rest of this document serves as a more granular guide for contributing code changes to `dbt-core` (this repository). It is not intended as a guide for using `dbt-core`, and some pieces assume a level of familiarity with Python development (virtualenvs, `pip`, etc). Specific code snippets in this guide assume you are using macOS or Linux and are comfortable with the command line.
|
||||
|
||||
If you get stuck, we're happy to help! Drop us a line in the `#dbt-core-development` channel in the [dbt Community Slack](https://community.getdbt.com).
|
||||
|
||||
@@ -73,22 +55,28 @@ There are some tools that will be helpful to you in developing locally. While th
|
||||
|
||||
These are the tools used in `dbt-core` development and testing:
|
||||
|
||||
- [`hatch`](https://hatch.pypa.io/) for build backend, environment management, and running tests across Python versions (3.10, 3.11, 3.12, and 3.13)
|
||||
- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.8, 3.9, 3.10 and 3.11
|
||||
- [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests
|
||||
- [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting
|
||||
- [`black`](https://github.com/psf/black) for code formatting
|
||||
- [`mypy`](https://mypy.readthedocs.io/en/stable/) for static type checking
|
||||
- [`pre-commit`](https://pre-commit.com) to easily run those checks
|
||||
- [`changie`](https://changie.dev/) to create changelog entries, without merge conflicts
|
||||
- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) to run multiple setup or test steps in combination. Don't worry too much, nobody _really_ understands how `make` works, and our Makefile aims to be super simple.
|
||||
- [GitHub Actions](https://github.com/features/actions) for automating tests and checks, once a PR is pushed to the `dbt-core` repository
|
||||
|
||||
A deep understanding of these tools in not required to effectively contribute to `dbt-core`, but we recommend checking out the attached documentation if you're interested in learning more about each one.
|
||||
|
||||
#### Virtual environments
|
||||
|
||||
dbt-core uses [Hatch](https://hatch.pypa.io/) for dependency and environment management. Hatch automatically creates and manages isolated environments for development, testing, and building, so you don't need to manually create virtual environments.
|
||||
We strongly recommend using virtual environments when developing code in `dbt-core`. We recommend creating this virtualenv
|
||||
in the root of the `dbt-core` repository. To create a new virtualenv, run:
|
||||
```sh
|
||||
python3 -m venv env
|
||||
source env/bin/activate
|
||||
```
|
||||
|
||||
For more information on how Hatch manages environments, see the [Hatch environment documentation](https://hatch.pypa.io/latest/environment/).
|
||||
This will create and activate a new Python virtual environment.
|
||||
|
||||
#### Docker and `docker-compose`
|
||||
|
||||
@@ -107,42 +95,22 @@ brew install postgresql
|
||||
|
||||
### Installation
|
||||
|
||||
First make sure you have Python 3.10 or later installed. Ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `hatch`. Finally set up `dbt-core` for development:
|
||||
First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies):
|
||||
|
||||
```sh
|
||||
cd core
|
||||
hatch run setup
|
||||
make dev
|
||||
```
|
||||
|
||||
This will install all development dependencies and set up pre-commit hooks.
|
||||
|
||||
By default, hatch will use whatever Python version is active in your environment. To specify a particular Python version, set the `HATCH_PYTHON` environment variable:
|
||||
|
||||
or, alternatively:
|
||||
```sh
|
||||
export HATCH_PYTHON=3.12
|
||||
hatch env create
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Or add it to your shell profile (e.g., `~/.zshrc` or `~/.bashrc`) for persistence.
|
||||
|
||||
When installed in this way, any changes you make to your local copy of the source code will be reflected immediately in your next `dbt` run.
|
||||
|
||||
#### Building dbt-core
|
||||
|
||||
dbt-core uses [Hatch](https://hatch.pypa.io/) (specifically `hatchling`) as its build backend. To build distribution packages:
|
||||
|
||||
```sh
|
||||
cd core
|
||||
hatch build
|
||||
```
|
||||
|
||||
This will create both wheel (`.whl`) and source distribution (`.tar.gz`) files in the `dist/` directory.
|
||||
|
||||
The build configuration is defined in `core/pyproject.toml`. You can also use the standard `python -m build` command if you prefer.
|
||||
|
||||
### Running `dbt-core`
|
||||
|
||||
Once you've run `hatch run setup`, the `dbt` command will be available in your PATH. You can verify this by running `which dbt`.
|
||||
With your virtualenv activated, the `dbt` script should point back to the source code you've cloned on your machine. You can verify this by running `which dbt`. This command should show you a path to an executable in your virtualenv.
|
||||
|
||||
Configure your [profile](https://docs.getdbt.com/docs/configure-your-profile) as necessary to connect to your target databases. It may be a good idea to add a new profile pointing to a local Postgres instance, or a specific test sandbox within your data warehouse if appropriate. Make sure to create a profile before running integration tests.
|
||||
|
||||
@@ -160,78 +128,45 @@ Although `dbt-core` works with a number of different databases, you won't need t
|
||||
Postgres offers the easiest way to test most `dbt-core` functionality today. They are the fastest to run, and the easiest to set up. To run the Postgres integration tests, you'll have to do one extra step of setting up the test database:
|
||||
|
||||
```sh
|
||||
cd core
|
||||
hatch run setup-db
|
||||
make setup-db
|
||||
```
|
||||
|
||||
Alternatively, you can run the setup commands directly:
|
||||
|
||||
or, alternatively:
|
||||
```sh
|
||||
docker-compose up -d database
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash scripts/setup_db.sh
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
```
|
||||
|
||||
### Test commands
|
||||
|
||||
There are a few methods for running tests locally.
|
||||
|
||||
#### Hatch scripts
|
||||
#### Makefile
|
||||
|
||||
The primary way to run tests and checks is using hatch scripts (defined in `core/hatch.toml`):
|
||||
There are multiple targets in the Makefile to run common test suites and code
|
||||
checks, most notably:
|
||||
|
||||
```sh
|
||||
cd core
|
||||
|
||||
# Run all unit tests
|
||||
hatch run unit-tests
|
||||
|
||||
# Run unit tests and all code quality checks
|
||||
hatch run test
|
||||
|
||||
# Run integration tests
|
||||
hatch run integration-tests
|
||||
|
||||
# Run integration tests in fail-fast mode
|
||||
hatch run integration-tests-fail-fast
|
||||
|
||||
# Run linting checks only
|
||||
hatch run lint
|
||||
hatch run flake8
|
||||
hatch run mypy
|
||||
hatch run black
|
||||
|
||||
# Run all pre-commit hooks
|
||||
hatch run code-quality
|
||||
|
||||
# Clean build artifacts
|
||||
hatch run clean
|
||||
# Runs unit tests with py38 and code checks in parallel.
|
||||
make test
|
||||
# Runs postgres integration tests with py38 in "fail fast" mode.
|
||||
make integration
|
||||
```
|
||||
> These make targets assume you have a local installation of a recent version of [`tox`](https://tox.readthedocs.io/en/latest/) for unit/integration testing and pre-commit for code quality checks,
|
||||
> unless you use choose a Docker container to run tests. Run `make help` for more info.
|
||||
|
||||
Hatch manages isolated environments and dependencies automatically. The commands above use the `default` environment which is recommended for most local development.
|
||||
|
||||
**Using the `ci` environment (optional)**
|
||||
|
||||
If you need to replicate exactly what runs in GitHub Actions (e.g., with coverage reporting), use the `ci` environment:
|
||||
|
||||
```sh
|
||||
cd core
|
||||
|
||||
# Run unit tests with coverage
|
||||
hatch run ci:unit-tests
|
||||
|
||||
# Run unit tests with a specific Python version
|
||||
hatch run +py=3.11 ci:unit-tests
|
||||
```
|
||||
|
||||
> **Note:** Most developers should use the default environment (`hatch run unit-tests`). The `ci` environment is primarily for debugging CI failures or running tests with coverage.
|
||||
Check out the other targets in the Makefile to see other commonly used test
|
||||
suites.
|
||||
|
||||
#### `pre-commit`
|
||||
[`pre-commit`](https://pre-commit.com) takes care of running all code-checks for formatting and linting. Run `make dev` to install `pre-commit` in your local environment (we recommend running this command with a python virtual environment active). This command installs several pip executables including black, mypy, and flake8. Once this is done you can use any of the linter-based make targets as well as a git pre-commit hook that will ensure proper formatting and linting.
|
||||
|
||||
[`pre-commit`](https://pre-commit.com) takes care of running all code-checks for formatting and linting. Run `hatch run setup` to install `pre-commit` in your local environment (we recommend running this command with a python virtual environment active). This installs several pip executables including black, mypy, and flake8. Once installed, hooks will run automatically on `git commit`, or you can run them manually with `hatch run code-quality`.
|
||||
#### `tox`
|
||||
|
||||
[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py38`. The configuration for these tests in located in `tox.ini`.
|
||||
|
||||
#### `pytest`
|
||||
|
||||
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. After running `hatch run setup`, you can run pytest commands like:
|
||||
Finally, you can also run a specific test or group of tests using [`pytest`](https://docs.pytest.org/en/latest/) directly. With a virtualenv active and dev dependencies installed you can do things like:
|
||||
|
||||
```sh
|
||||
# run all unit tests in a file
|
||||
@@ -289,9 +224,7 @@ Code can be merged into the current development branch `main` by opening a pull
|
||||
|
||||
Automated tests run via GitHub Actions. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-core` repository trigger integration tests against Postgres. dbt Labs also provides CI environments in which to test changes to other adapters, triggered by PRs in those adapters' repositories, as well as periodic maintenance checks of each adapter in concert with the latest `dbt-core` code changes.
|
||||
|
||||
We require signed git commits. See docs [here](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) for setting up code signing.
|
||||
|
||||
Once all tests are passing, all comments are resolved, and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
## Troubleshooting Tips
|
||||
|
||||
|
||||
@@ -33,6 +33,9 @@ RUN apt-get update \
|
||||
python-is-python3 \
|
||||
python-dev-is-python3 \
|
||||
python3-pip \
|
||||
python3.9 \
|
||||
python3.9-dev \
|
||||
python3.9-venv \
|
||||
python3.10 \
|
||||
python3.10-dev \
|
||||
python3.10-venv \
|
||||
@@ -47,7 +50,7 @@ RUN curl -LO https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_V
|
||||
&& tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
|
||||
&& rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
|
||||
|
||||
RUN pip3 install -U hatch wheel pre-commit
|
||||
RUN pip3 install -U tox wheel six setuptools pre-commit
|
||||
|
||||
# These args are passed in via docker-compose, which reads then from the .env file.
|
||||
# On Linux, run `make .env` to create the .env file for the current user.
|
||||
@@ -62,6 +65,7 @@ RUN if [ ${USER_ID:-0} -ne 0 ] && [ ${GROUP_ID:-0} -ne 0 ]; then \
|
||||
useradd -mU -l dbt_test_user; \
|
||||
fi
|
||||
RUN mkdir /usr/app && chown dbt_test_user /usr/app
|
||||
RUN mkdir /home/tox && chown dbt_test_user /home/tox
|
||||
|
||||
WORKDIR /usr/app
|
||||
VOLUME /usr/app
|
||||
|
||||
161
Makefile
161
Makefile
@@ -1,95 +1,146 @@
|
||||
# ============================================================================
|
||||
# DEPRECATED: This Makefile is maintained for backwards compatibility only.
|
||||
#
|
||||
# dbt-core now uses Hatch for task management and development workflows.
|
||||
# Please migrate to using hatch commands directly:
|
||||
#
|
||||
# make dev → cd core && hatch run setup
|
||||
# make unit → cd core && hatch run unit-tests
|
||||
# make test → cd core && hatch run test
|
||||
# make integration → cd core && hatch run integration-tests
|
||||
# make lint → cd core && hatch run lint
|
||||
# make code_quality → cd core && hatch run code-quality
|
||||
# make setup-db → cd core && hatch run setup-db
|
||||
# make clean → cd core && hatch run clean
|
||||
#
|
||||
# See core/pyproject.toml [tool.hatch.envs.default.scripts] for all available
|
||||
# commands and CONTRIBUTING.md for detailed usage instructions.
|
||||
#
|
||||
# This Makefile will be removed in a future version of dbt-core.
|
||||
# ============================================================================
|
||||
|
||||
.DEFAULT_GOAL:=help
|
||||
|
||||
# Optional flag to run target in a docker container.
|
||||
# (example `make test USE_DOCKER=true`)
|
||||
ifeq ($(USE_DOCKER),true)
|
||||
DOCKER_CMD := docker-compose run --rm test
|
||||
endif
|
||||
|
||||
#
|
||||
# To override CI_flags, create a file at this repo's root dir named `makefile.test.env`. Fill it
|
||||
# with any ENV_VAR overrides required by your test environment, e.g.
|
||||
# DBT_TEST_USER_1=user
|
||||
# LOG_DIR="dir with a space in it"
|
||||
#
|
||||
# Warn: Restrict each line to one variable only.
|
||||
#
|
||||
ifeq (./makefile.test.env,$(wildcard ./makefile.test.env))
|
||||
include ./makefile.test.env
|
||||
endif
|
||||
|
||||
CI_FLAGS =\
|
||||
DBT_TEST_USER_1=$(if $(DBT_TEST_USER_1),$(DBT_TEST_USER_1),dbt_test_user_1)\
|
||||
DBT_TEST_USER_2=$(if $(DBT_TEST_USER_2),$(DBT_TEST_USER_2),dbt_test_user_2)\
|
||||
DBT_TEST_USER_3=$(if $(DBT_TEST_USER_3),$(DBT_TEST_USER_3),dbt_test_user_3)\
|
||||
RUSTFLAGS=$(if $(RUSTFLAGS),$(RUSTFLAGS),"-D warnings")\
|
||||
LOG_DIR=$(if $(LOG_DIR),$(LOG_DIR),./logs)\
|
||||
DBT_LOG_FORMAT=$(if $(DBT_LOG_FORMAT),$(DBT_LOG_FORMAT),json)
|
||||
|
||||
|
||||
.PHONY: dev_req
|
||||
dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
|
||||
@cd core && hatch run dev-req
|
||||
@\
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
|
||||
.PHONY: dev
|
||||
dev: ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
|
||||
@cd core && hatch run setup
|
||||
dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit install
|
||||
|
||||
.PHONY: dev-uninstall
|
||||
dev-uninstall: ## Uninstall all packages in venv except for build tools
|
||||
@pip freeze | grep -v "^-e" | cut -d "@" -f1 | xargs pip uninstall -y; \
|
||||
@\
|
||||
pip freeze | grep -v "^-e" | cut -d "@" -f1 | xargs pip uninstall -y; \
|
||||
pip uninstall -y dbt-core
|
||||
|
||||
.PHONY: mypy
|
||||
mypy: ## Runs mypy against staged changes for static type checking.
|
||||
@cd core && hatch run mypy
|
||||
mypy: .env ## Runs mypy against staged changes for static type checking.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual mypy-check | grep -v "INFO"
|
||||
|
||||
.PHONY: flake8
|
||||
flake8: ## Runs flake8 against staged changes to enforce style guide.
|
||||
@cd core && hatch run flake8
|
||||
flake8: .env ## Runs flake8 against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual flake8-check | grep -v "INFO"
|
||||
|
||||
.PHONY: black
|
||||
black: ## Runs black against staged changes to enforce style guide.
|
||||
@cd core && hatch run black
|
||||
black: .env ## Runs black against staged changes to enforce style guide.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run --hook-stage manual black-check -v | grep -v "INFO"
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## Runs flake8 and mypy code checks against staged changes.
|
||||
@cd core && hatch run lint
|
||||
|
||||
.PHONY: code_quality
|
||||
code_quality: ## Runs all pre-commit hooks against all files.
|
||||
@cd core && hatch run code-quality
|
||||
lint: .env ## Runs flake8 and mypy code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: unit
|
||||
unit: ## Runs unit tests with py
|
||||
@cd core && hatch run unit-tests
|
||||
unit: .env ## Runs unit tests with py
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py
|
||||
|
||||
.PHONY: test
|
||||
test: ## Runs unit tests with py and code checks against staged changes.
|
||||
@cd core && hatch run test
|
||||
test: .env ## Runs unit tests with py and code checks against staged changes.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py; \
|
||||
$(DOCKER_CMD) pre-commit run black-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run flake8-check --hook-stage manual | grep -v "INFO"; \
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: integration
|
||||
integration: ## Runs core integration tests using postgres with py-integration
|
||||
@cd core && hatch run integration-tests
|
||||
integration: .env ## Runs core integration tests using postgres with py-integration
|
||||
@\
|
||||
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto
|
||||
|
||||
.PHONY: integration-fail-fast
|
||||
integration-fail-fast: ## Runs core integration tests using postgres with py-integration in "fail fast" mode.
|
||||
@cd core && hatch run integration-tests-fail-fast
|
||||
integration-fail-fast: .env ## Runs core integration tests using postgres with py-integration in "fail fast" mode.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py-integration -- -x -nauto
|
||||
|
||||
.PHONY: interop
|
||||
interop: clean
|
||||
@\
|
||||
mkdir $(LOG_DIR) && \
|
||||
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto && \
|
||||
LOG_DIR=$(LOG_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
|
||||
|
||||
.PHONY: setup-db
|
||||
setup-db: ## Setup Postgres database with docker-compose for system testing.
|
||||
@cd core && hatch run setup-db
|
||||
@\
|
||||
docker-compose up -d database && \
|
||||
PGHOST=localhost PGUSER=root PGPASSWORD=password PGDATABASE=postgres bash test/setup_db.sh
|
||||
|
||||
# This rule creates a file named .env that is used by docker-compose for passing
|
||||
# the USER_ID and GROUP_ID arguments to the Docker image.
|
||||
.env: ## Setup step for using using docker-compose with make target.
|
||||
@touch .env
|
||||
ifneq ($(OS),Windows_NT)
|
||||
ifneq ($(shell uname -s), Darwin)
|
||||
@echo USER_ID=$(shell id -u) > .env
|
||||
@echo GROUP_ID=$(shell id -g) >> .env
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Resets development environment.
|
||||
@cd core && hatch run clean
|
||||
@echo 'cleaning repo...'
|
||||
@rm -f .coverage
|
||||
@rm -f .coverage.*
|
||||
@rm -rf .eggs/
|
||||
@rm -f .env
|
||||
@rm -rf .tox/
|
||||
@rm -rf build/
|
||||
@rm -rf dbt.egg-info/
|
||||
@rm -f dbt_project.yml
|
||||
@rm -rf dist/
|
||||
@rm -f htmlcov/*.{css,html,js,json,png}
|
||||
@rm -rf logs/
|
||||
@rm -rf target/
|
||||
@find . -type f -name '*.pyc' -delete
|
||||
@find . -type d -name '__pycache__' -depth -delete
|
||||
@echo 'done.'
|
||||
|
||||
.PHONY: json_schema
|
||||
json_schema: ## Update generated JSON schema using code changes.
|
||||
@cd core && hatch run json-schema
|
||||
|
||||
.PHONY: help
|
||||
help: ## Show this help message.
|
||||
@echo 'usage: make [target]'
|
||||
@echo
|
||||
@echo 'DEPRECATED: This Makefile is a compatibility shim.'
|
||||
@echo 'Please use "cd core && hatch run <command>" directly.'
|
||||
@echo 'usage: make [target] [USE_DOCKER=true]'
|
||||
@echo
|
||||
@echo 'targets:'
|
||||
@grep -E '^[8+a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
@echo
|
||||
@echo 'For more information, see CONTRIBUTING.md'
|
||||
@echo 'options:'
|
||||
@echo 'use USE_DOCKER=true to run target in a docker container'
|
||||
|
||||
.PHONY: json_schema
|
||||
json_schema: ## Update generated JSON schema using code changes.
|
||||
scripts/collect-artifact-schema.py --path schemas
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
<img src="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml/badge.svg?event=push" alt="CI Badge"/>
|
||||
</a>
|
||||
<a href="https://www.bestpractices.dev/projects/11095"><img src="https://www.bestpractices.dev/projects/11095/badge"></a>
|
||||
</p>
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||
25
codecov.yml
25
codecov.yml
@@ -2,22 +2,39 @@ ignore:
|
||||
- ".github"
|
||||
- ".changes"
|
||||
|
||||
# Disable all status checks to prevent red X's in CI
|
||||
# Coverage data is still uploaded and PR comments are still posted
|
||||
coverage:
|
||||
status:
|
||||
project: off
|
||||
patch: off
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 0.1% # Reduce noise by ignoring rounding errors in coverage drops
|
||||
informational: true
|
||||
patch:
|
||||
default:
|
||||
target: auto
|
||||
threshold: 80%
|
||||
informational: true
|
||||
|
||||
comment:
|
||||
layout: "header, diff, flags, components" # show component info in the PR comment
|
||||
|
||||
component_management:
|
||||
default_rules: # default rules that will be inherited by all components
|
||||
statuses:
|
||||
- type: project # in this case every component that doens't have a status defined will have a project type one
|
||||
target: auto
|
||||
threshold: 0.1%
|
||||
- type: patch
|
||||
target: 80%
|
||||
individual_components:
|
||||
- component_id: unittests
|
||||
name: "Unit Tests"
|
||||
flag_regexes:
|
||||
- "unit"
|
||||
statuses:
|
||||
- type: patch
|
||||
target: 80%
|
||||
threshold: 5%
|
||||
- component_id: integrationtests
|
||||
name: "Integration Tests"
|
||||
flag_regexes:
|
||||
|
||||
3
core/MANIFEST.in
Normal file
3
core/MANIFEST.in
Normal file
@@ -0,0 +1,3 @@
|
||||
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore *.json
|
||||
include dbt/py.typed
|
||||
recursive-include dbt/task/docs *.html
|
||||
@@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt-core/fa1ea14ddfb1d5ae319d5141844910dd53ab2834/docs/images/dbt-core.svg" alt="dbt logo" width="750"/>
|
||||
<img src="https://raw.githubusercontent.com/dbt-labs/dbt-core/fa1ea14ddfb1d5ae319d5141844910dd53ab2834/etc/dbt-core.svg" alt="dbt logo" width="750"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/dbt-labs/dbt-core/actions/workflows/main.yml">
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
|
||||
|
||||

|
||||

|
||||
|
||||
## Understanding dbt
|
||||
|
||||
@@ -17,7 +17,7 @@ Analysts using dbt can transform their data by simply writing select statements,
|
||||
|
||||
These select statements, or "models", form a dbt project. Models frequently build on top of one another – dbt makes it easy to [manage relationships](https://docs.getdbt.com/docs/ref) between models, and [visualize these relationships](https://docs.getdbt.com/docs/documentation), as well as assure the quality of your transformations through [testing](https://docs.getdbt.com/docs/testing).
|
||||
|
||||

|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
version = "1.12.0a1"
|
||||
@@ -25,8 +25,6 @@ from dbt.artifacts.resources.v1.config import (
|
||||
NodeAndTestConfig,
|
||||
NodeConfig,
|
||||
TestConfig,
|
||||
list_str,
|
||||
metas,
|
||||
)
|
||||
from dbt.artifacts.resources.v1.documentation import Documentation
|
||||
from dbt.artifacts.resources.v1.exposure import (
|
||||
@@ -35,13 +33,6 @@ from dbt.artifacts.resources.v1.exposure import (
|
||||
ExposureType,
|
||||
MaturityType,
|
||||
)
|
||||
from dbt.artifacts.resources.v1.function import (
|
||||
Function,
|
||||
FunctionArgument,
|
||||
FunctionConfig,
|
||||
FunctionMandatory,
|
||||
FunctionReturns,
|
||||
)
|
||||
from dbt.artifacts.resources.v1.generic_test import GenericTest, TestMetadata
|
||||
from dbt.artifacts.resources.v1.group import Group, GroupConfig
|
||||
from dbt.artifacts.resources.v1.hook import HookNode
|
||||
@@ -51,7 +42,6 @@ from dbt.artifacts.resources.v1.metric import (
|
||||
ConversionTypeParams,
|
||||
CumulativeTypeParams,
|
||||
Metric,
|
||||
MetricAggregationParams,
|
||||
MetricConfig,
|
||||
MetricInput,
|
||||
MetricInputMeasure,
|
||||
@@ -59,7 +49,6 @@ from dbt.artifacts.resources.v1.metric import (
|
||||
MetricTypeParams,
|
||||
)
|
||||
from dbt.artifacts.resources.v1.model import (
|
||||
CustomGranularity,
|
||||
Model,
|
||||
ModelConfig,
|
||||
ModelFreshness,
|
||||
@@ -77,8 +66,6 @@ from dbt.artifacts.resources.v1.saved_query import (
|
||||
from dbt.artifacts.resources.v1.seed import Seed, SeedConfig
|
||||
from dbt.artifacts.resources.v1.semantic_layer_components import (
|
||||
FileSlice,
|
||||
MeasureAggregationParameters,
|
||||
NonAdditiveDimension,
|
||||
SourceFileMetadata,
|
||||
WhereFilter,
|
||||
WhereFilterIntersection,
|
||||
@@ -90,8 +77,9 @@ from dbt.artifacts.resources.v1.semantic_model import (
|
||||
DimensionValidityParams,
|
||||
Entity,
|
||||
Measure,
|
||||
MeasureAggregationParameters,
|
||||
NodeRelation,
|
||||
SemanticLayerElementConfig,
|
||||
NonAdditiveDimension,
|
||||
SemanticModel,
|
||||
SemanticModelConfig,
|
||||
)
|
||||
|
||||
@@ -35,7 +35,6 @@ class NodeType(StrEnum):
|
||||
SemanticModel = "semantic_model"
|
||||
Unit = "unit_test"
|
||||
Fixture = "fixture"
|
||||
Function = "function"
|
||||
|
||||
def pluralize(self) -> str:
|
||||
if self is self.Analysis:
|
||||
@@ -79,15 +78,3 @@ class BatchSize(StrEnum):
|
||||
|
||||
def plural(self) -> str:
|
||||
return str(self) + "s"
|
||||
|
||||
|
||||
class FunctionType(StrEnum):
|
||||
Scalar = "scalar"
|
||||
Aggregate = "aggregate"
|
||||
Table = "table"
|
||||
|
||||
|
||||
class FunctionVolatility(StrEnum):
|
||||
Deterministic = "deterministic"
|
||||
Stable = "stable"
|
||||
NonDeterministic = "non-deterministic"
|
||||
|
||||
@@ -249,7 +249,6 @@ class CompiledResource(ParsedResource):
|
||||
refs: List[RefArgs] = field(default_factory=list)
|
||||
sources: List[List[str]] = field(default_factory=list)
|
||||
metrics: List[List[str]] = field(default_factory=list)
|
||||
functions: List[List[str]] = field(default_factory=list)
|
||||
depends_on: DependsOn = field(default_factory=DependsOn)
|
||||
compiled_path: Optional[str] = None
|
||||
compiled: bool = False
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, List, Literal, Optional
|
||||
|
||||
from dbt.artifacts.resources.types import FunctionType, FunctionVolatility, NodeType
|
||||
from dbt.artifacts.resources.v1.components import CompiledResource
|
||||
from dbt.artifacts.resources.v1.config import NodeConfig
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
|
||||
# =============
|
||||
# Function config, and supporting classes
|
||||
# =============
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionConfig(NodeConfig):
|
||||
# The fact that this is a property, that can be changed, seems wrong.
|
||||
# A function's materialization should never be changed, so why allow for it?
|
||||
materialized: str = "function"
|
||||
type: FunctionType = FunctionType.Scalar
|
||||
volatility: Optional[FunctionVolatility] = None
|
||||
runtime_version: Optional[str] = None
|
||||
entry_point: Optional[str] = None
|
||||
|
||||
|
||||
# =============
|
||||
# Function resource, and supporting classes
|
||||
# =============
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionArgument(dbtClassMixin):
|
||||
name: str
|
||||
data_type: str
|
||||
description: Optional[str] = None
|
||||
default_value: Optional[Any] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionReturns(dbtClassMixin):
|
||||
data_type: str
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionMandatory(dbtClassMixin):
|
||||
returns: FunctionReturns
|
||||
|
||||
|
||||
@dataclass
|
||||
class Function(CompiledResource, FunctionMandatory):
|
||||
resource_type: Literal[NodeType.Function]
|
||||
config: FunctionConfig
|
||||
arguments: List[FunctionArgument] = field(default_factory=list)
|
||||
@@ -6,8 +6,6 @@ from dbt.artifacts.resources.base import GraphResource
|
||||
from dbt.artifacts.resources.types import NodeType
|
||||
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
|
||||
from dbt.artifacts.resources.v1.semantic_layer_components import (
|
||||
MeasureAggregationParameters,
|
||||
NonAdditiveDimension,
|
||||
SourceFileMetadata,
|
||||
WhereFilterIntersection,
|
||||
)
|
||||
@@ -15,7 +13,6 @@ from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeB
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.references import MeasureReference, MetricReference
|
||||
from dbt_semantic_interfaces.type_enums import (
|
||||
AggregationType,
|
||||
ConversionCalculationType,
|
||||
MetricType,
|
||||
PeriodAggregation,
|
||||
@@ -96,17 +93,6 @@ class CumulativeTypeParams(dbtClassMixin):
|
||||
window: Optional[MetricTimeWindow] = None
|
||||
grain_to_date: Optional[str] = None
|
||||
period_agg: PeriodAggregation = PeriodAggregation.FIRST
|
||||
metric: Optional[MetricInput] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricAggregationParams(dbtClassMixin):
|
||||
semantic_model: str
|
||||
agg: AggregationType
|
||||
agg_params: Optional[MeasureAggregationParameters] = None
|
||||
agg_time_dimension: Optional[str] = None
|
||||
non_additive_dimension: Optional[NonAdditiveDimension] = None
|
||||
expr: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -123,7 +109,6 @@ class MetricTypeParams(dbtClassMixin):
|
||||
metrics: Optional[List[MetricInput]] = None
|
||||
conversion_type_params: Optional[ConversionTypeParams] = None
|
||||
cumulative_type_params: Optional[CumulativeTypeParams] = None
|
||||
metric_aggregation_params: Optional[MetricAggregationParams] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -13,11 +13,7 @@ from dbt.artifacts.resources.v1.config import NodeConfig
|
||||
from dbt_common.contracts.config.base import MergeBehavior
|
||||
from dbt_common.contracts.constraints import ModelLevelConstraint
|
||||
from dbt_common.contracts.util import Mergeable
|
||||
from dbt_common.dataclass_schema import (
|
||||
ExtensibleDbtClassMixin,
|
||||
ValidationError,
|
||||
dbtClassMixin,
|
||||
)
|
||||
from dbt_common.dataclass_schema import ExtensibleDbtClassMixin, dbtClassMixin
|
||||
|
||||
|
||||
class ModelFreshnessUpdatesOnOptions(enum.Enum):
|
||||
@@ -27,8 +23,8 @@ class ModelFreshnessUpdatesOnOptions(enum.Enum):
|
||||
|
||||
@dataclass
|
||||
class ModelBuildAfter(ExtensibleDbtClassMixin):
|
||||
count: Optional[int] = None
|
||||
period: Optional[TimePeriod] = None
|
||||
count: int
|
||||
period: TimePeriod
|
||||
updates_on: ModelFreshnessUpdatesOnOptions = ModelFreshnessUpdatesOnOptions.any
|
||||
|
||||
|
||||
@@ -79,25 +75,6 @@ class ModelConfig(NodeConfig):
|
||||
)
|
||||
freshness: Optional[ModelFreshness] = None
|
||||
|
||||
def __post_init__(self):
|
||||
super().__post_init__()
|
||||
if (
|
||||
self.freshness
|
||||
and self.freshness.build_after.period
|
||||
and self.freshness.build_after.count is None
|
||||
):
|
||||
raise ValidationError(
|
||||
"`freshness.build_after` must have a value for `count` if a `period` is provided"
|
||||
)
|
||||
elif (
|
||||
self.freshness
|
||||
and self.freshness.build_after.count is not None
|
||||
and not self.freshness.build_after.period
|
||||
):
|
||||
raise ValidationError(
|
||||
"`freshness.build_after` must have a value for `period` if a `count` is provided"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def __pre_deserialize__(cls, data):
|
||||
data = super().__pre_deserialize__(data)
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Sequence, Tuple
|
||||
from typing import List, Sequence, Tuple
|
||||
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.call_parameter_sets import JinjaCallParameterSets
|
||||
from dbt_semantic_interfaces.parsing.where_filter.jinja_object_parser import (
|
||||
JinjaObjectParser,
|
||||
QueryItemLocation,
|
||||
from dbt_semantic_interfaces.call_parameter_sets import FilterCallParameterSets
|
||||
from dbt_semantic_interfaces.parsing.where_filter.where_filter_parser import (
|
||||
WhereFilterParser,
|
||||
)
|
||||
from dbt_semantic_interfaces.type_enums import AggregationType
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -16,11 +14,9 @@ class WhereFilter(dbtClassMixin):
|
||||
|
||||
def call_parameter_sets(
|
||||
self, custom_granularity_names: Sequence[str]
|
||||
) -> JinjaCallParameterSets:
|
||||
return JinjaObjectParser.parse_call_parameter_sets(
|
||||
self.where_sql_template,
|
||||
custom_granularity_names=custom_granularity_names,
|
||||
query_item_location=QueryItemLocation.NON_ORDER_BY,
|
||||
) -> FilterCallParameterSets:
|
||||
return WhereFilterParser.parse_call_parameter_sets(
|
||||
self.where_sql_template, custom_granularity_names=custom_granularity_names
|
||||
)
|
||||
|
||||
|
||||
@@ -30,7 +26,7 @@ class WhereFilterIntersection(dbtClassMixin):
|
||||
|
||||
def filter_expression_parameter_sets(
|
||||
self, custom_granularity_names: Sequence[str]
|
||||
) -> Sequence[Tuple[str, JinjaCallParameterSets]]:
|
||||
) -> Sequence[Tuple[str, FilterCallParameterSets]]:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@@ -56,17 +52,3 @@ class SourceFileMetadata(dbtClassMixin):
|
||||
|
||||
repo_file_path: str
|
||||
file_slice: FileSlice
|
||||
|
||||
|
||||
@dataclass
|
||||
class MeasureAggregationParameters(dbtClassMixin):
|
||||
percentile: Optional[float] = None
|
||||
use_discrete_percentile: bool = False
|
||||
use_approximate_percentile: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class NonAdditiveDimension(dbtClassMixin):
|
||||
name: str
|
||||
window_choice: AggregationType
|
||||
window_groupings: List[str]
|
||||
|
||||
@@ -5,11 +5,6 @@ from typing import Any, Dict, List, Optional, Sequence
|
||||
from dbt.artifacts.resources import SourceFileMetadata
|
||||
from dbt.artifacts.resources.base import GraphResource
|
||||
from dbt.artifacts.resources.v1.components import DependsOn, RefArgs
|
||||
from dbt.artifacts.resources.v1.metric import Metric
|
||||
from dbt.artifacts.resources.v1.semantic_layer_components import (
|
||||
MeasureAggregationParameters,
|
||||
NonAdditiveDimension,
|
||||
)
|
||||
from dbt_common.contracts.config.base import BaseConfig, CompareBehavior, MergeBehavior
|
||||
from dbt_common.dataclass_schema import dbtClassMixin
|
||||
from dbt_semantic_interfaces.references import (
|
||||
@@ -24,7 +19,6 @@ from dbt_semantic_interfaces.type_enums import (
|
||||
AggregationType,
|
||||
DimensionType,
|
||||
EntityType,
|
||||
MetricType,
|
||||
TimeGranularity,
|
||||
)
|
||||
|
||||
@@ -133,11 +127,25 @@ class Entity(dbtClassMixin):
|
||||
|
||||
|
||||
# ====================================
|
||||
# Measure object
|
||||
# Measure objects
|
||||
# Measure protocols: https://github.com/dbt-labs/dbt-semantic-interfaces/blob/main/dbt_semantic_interfaces/protocols/measure.py
|
||||
# ====================================
|
||||
|
||||
|
||||
@dataclass
|
||||
class MeasureAggregationParameters(dbtClassMixin):
|
||||
percentile: Optional[float] = None
|
||||
use_discrete_percentile: bool = False
|
||||
use_approximate_percentile: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class NonAdditiveDimension(dbtClassMixin):
|
||||
name: str
|
||||
window_choice: AggregationType
|
||||
window_groupings: List[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Measure(dbtClassMixin):
|
||||
name: str
|
||||
@@ -266,45 +274,6 @@ class SemanticModel(GraphResource):
|
||||
)
|
||||
return TimeDimensionReference(element_name=agg_time_dimension_name)
|
||||
|
||||
def checked_agg_time_dimension_for_simple_metric(
|
||||
self, metric: Metric
|
||||
) -> TimeDimensionReference:
|
||||
assert (
|
||||
metric.type == MetricType.SIMPLE
|
||||
), "Only simple metrics can have an agg time dimension."
|
||||
metric_agg_params = metric.type_params.metric_aggregation_params
|
||||
# There are validations elsewhere to check this for metrics and provide messaging for it.
|
||||
assert metric_agg_params, "Simple metrics must have metric_aggregation_params."
|
||||
# This indicates a validation bug / dev error, not a user error that should appear
|
||||
# in a user's YAML.
|
||||
assert (
|
||||
metric_agg_params.semantic_model == self.name
|
||||
), "Cannot retrieve the agg time dimension for a metric from a different model "
|
||||
f"than the one that the metric belongs to. Metric `{metric.name}` belongs to model "
|
||||
f"`{metric_agg_params.semantic_model}`, but we requested the agg time dimension from model `{self.name}`."
|
||||
|
||||
metric_time_dimension_name = None
|
||||
if (
|
||||
metric.type_params
|
||||
and metric.type_params.metric_aggregation_params
|
||||
and metric.type_params.metric_aggregation_params.agg_time_dimension
|
||||
):
|
||||
metric_time_dimension_name = (
|
||||
metric.type_params.metric_aggregation_params.agg_time_dimension
|
||||
)
|
||||
|
||||
default_agg_time_dimension = (
|
||||
self.defaults.agg_time_dimension if self.defaults is not None else None
|
||||
)
|
||||
agg_time_dimension_name = metric_time_dimension_name or default_agg_time_dimension
|
||||
|
||||
assert agg_time_dimension_name is not None, (
|
||||
f"Aggregation time dimension for metric {metric.name} is not set! This should either be set directly on "
|
||||
f"the metric specification in the model, or else defaulted to the time dimension in the data "
|
||||
f"source containing the metric."
|
||||
)
|
||||
return TimeDimensionReference(element_name=agg_time_dimension_name)
|
||||
|
||||
@property
|
||||
def primary_entity_reference(self) -> Optional[EntityReference]:
|
||||
return (
|
||||
|
||||
@@ -10,7 +10,7 @@ from dbt.artifacts.resources.v1.components import (
|
||||
HasRelationMetadata,
|
||||
Quoting,
|
||||
)
|
||||
from dbt.artifacts.resources.v1.config import BaseConfig, MergeBehavior
|
||||
from dbt.artifacts.resources.v1.config import BaseConfig
|
||||
from dbt_common.contracts.config.properties import AdditionalPropertiesAllowed
|
||||
from dbt_common.contracts.util import Mergeable
|
||||
from dbt_common.exceptions import CompilationError
|
||||
@@ -21,10 +21,6 @@ class SourceConfig(BaseConfig):
|
||||
enabled: bool = True
|
||||
event_time: Any = None
|
||||
freshness: Optional[FreshnessThreshold] = field(default_factory=FreshnessThreshold)
|
||||
loaded_at_field: Optional[str] = None
|
||||
loaded_at_query: Optional[str] = None
|
||||
meta: Dict[str, Any] = field(default_factory=dict, metadata=MergeBehavior.Update.meta())
|
||||
tags: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -181,11 +181,3 @@ def get_artifact_schema_version(dct: dict) -> int:
|
||||
# 4. Convert to int
|
||||
# TODO: If this gets more complicated, turn into a regex
|
||||
return int(schema_version.split("/")[-1].split(".")[0][1:])
|
||||
|
||||
|
||||
def get_artifact_dbt_version(dct: dict) -> Optional[str]:
|
||||
dbt_version = dct.get("metadata", {}).get("dbt_version", None)
|
||||
if dbt_version is None:
|
||||
return None
|
||||
|
||||
return str(dbt_version)
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union
|
||||
from uuid import UUID
|
||||
|
||||
from dbt import tracking
|
||||
from dbt.artifacts.resources import (
|
||||
Analysis,
|
||||
Documentation,
|
||||
Exposure,
|
||||
Function,
|
||||
GenericTest,
|
||||
Group,
|
||||
HookNode,
|
||||
@@ -28,21 +25,23 @@ from dbt.artifacts.resources.v1.components import Quoting
|
||||
from dbt.artifacts.schemas.base import (
|
||||
ArtifactMixin,
|
||||
BaseArtifactMetadata,
|
||||
get_artifact_dbt_version,
|
||||
get_artifact_schema_version,
|
||||
schema_version,
|
||||
)
|
||||
from dbt.artifacts.schemas.upgrades import (
|
||||
upgrade_manifest_json,
|
||||
upgrade_manifest_json_dbt_version,
|
||||
)
|
||||
from dbt.version import __version__
|
||||
from dbt.artifacts.schemas.upgrades import upgrade_manifest_json
|
||||
from dbt_common.exceptions import DbtInternalError
|
||||
|
||||
NodeEdgeMap = Dict[str, List[str]]
|
||||
UniqueID = str
|
||||
ManifestResource = Union[
|
||||
Seed, Analysis, SingularTest, HookNode, Model, SqlOperation, GenericTest, Snapshot, Function
|
||||
Seed,
|
||||
Analysis,
|
||||
SingularTest,
|
||||
HookNode,
|
||||
Model,
|
||||
SqlOperation,
|
||||
GenericTest,
|
||||
Snapshot,
|
||||
]
|
||||
DisabledManifestResource = Union[
|
||||
ManifestResource,
|
||||
@@ -94,10 +93,6 @@ class ManifestMetadata(BaseArtifactMetadata):
|
||||
default_factory=Quoting,
|
||||
metadata=dict(description="The quoting configuration for the project"),
|
||||
)
|
||||
run_started_at: Optional[datetime] = field(
|
||||
default=tracking.active_user.run_started_at if tracking.active_user is not None else None,
|
||||
metadata=dict(description="The timestamp when the run started"),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
@@ -169,10 +164,6 @@ class WritableManifest(ArtifactMixin):
|
||||
description="The unit tests defined in the project",
|
||||
)
|
||||
)
|
||||
functions: Mapping[UniqueID, Function] = field(
|
||||
default_factory=dict,
|
||||
metadata=dict(description=("The functions defined in the dbt project")),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def compatible_previous_versions(cls) -> Iterable[Tuple[str, int]]:
|
||||
@@ -194,10 +185,6 @@ class WritableManifest(ArtifactMixin):
|
||||
manifest_schema_version = get_artifact_schema_version(data)
|
||||
if manifest_schema_version < cls.dbt_schema_version.version:
|
||||
data = upgrade_manifest_json(data, manifest_schema_version)
|
||||
|
||||
manifest_dbt_version = get_artifact_dbt_version(data)
|
||||
if manifest_dbt_version and manifest_dbt_version != __version__:
|
||||
data = upgrade_manifest_json_dbt_version(data)
|
||||
return cls.from_dict(data)
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -1,4 +1 @@
|
||||
from dbt.artifacts.schemas.upgrades.upgrade_manifest import upgrade_manifest_json
|
||||
from dbt.artifacts.schemas.upgrades.upgrade_manifest_dbt_version import (
|
||||
upgrade_manifest_json_dbt_version,
|
||||
)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
def upgrade_manifest_json_dbt_version(manifest: dict) -> dict:
|
||||
return manifest
|
||||
@@ -541,7 +541,6 @@ cli.add_command(ls, "ls")
|
||||
@requires.preflight
|
||||
@requires.profile
|
||||
@requires.project
|
||||
@requires.catalogs
|
||||
@requires.runtime_config
|
||||
@requires.manifest(write_perf_info=True)
|
||||
def parse(ctx, **kwargs):
|
||||
@@ -705,7 +704,6 @@ def run_operation(ctx, **kwargs):
|
||||
@requires.preflight
|
||||
@requires.profile
|
||||
@requires.project
|
||||
@requires.catalogs
|
||||
@requires.runtime_config
|
||||
@requires.manifest
|
||||
def seed(ctx, **kwargs):
|
||||
@@ -739,7 +737,6 @@ def seed(ctx, **kwargs):
|
||||
@requires.preflight
|
||||
@requires.profile
|
||||
@requires.project
|
||||
@requires.catalogs
|
||||
@requires.runtime_config
|
||||
@requires.manifest
|
||||
def snapshot(ctx, **kwargs):
|
||||
|
||||
@@ -2,13 +2,11 @@ import inspect
|
||||
import typing as t
|
||||
|
||||
import click
|
||||
from click import Context
|
||||
from click.parser import OptionParser, ParsingState
|
||||
|
||||
from dbt.cli.option_types import ChoiceTuple
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from click import Context
|
||||
from click.parser import _OptionParser, _ParsingState
|
||||
|
||||
|
||||
# Implementation from: https://stackoverflow.com/a/48394004
|
||||
# Note MultiOption options must be specified with type=tuple or type=ChoiceTuple (https://github.com/pallets/click/issues/2012)
|
||||
@@ -35,8 +33,8 @@ class MultiOption(click.Option):
|
||||
else:
|
||||
assert isinstance(option_type, ChoiceTuple), msg
|
||||
|
||||
def add_to_parser(self, parser: "_OptionParser", ctx: "Context"):
|
||||
def parser_process(value: str, state: "_ParsingState"):
|
||||
def add_to_parser(self, parser: OptionParser, ctx: Context):
|
||||
def parser_process(value: str, state: ParsingState):
|
||||
# method to hook to the parser.process
|
||||
done = False
|
||||
value_list = str.split(value, " ")
|
||||
@@ -67,7 +65,7 @@ class MultiOption(click.Option):
|
||||
break
|
||||
return retval
|
||||
|
||||
def type_cast_value(self, ctx: "Context", value: t.Any) -> t.Any:
|
||||
def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any:
|
||||
def flatten(data):
|
||||
if isinstance(data, tuple):
|
||||
for x in data:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, List
|
||||
|
||||
import click
|
||||
|
||||
@@ -25,65 +24,48 @@ select_attrs = {
|
||||
"type": tuple,
|
||||
}
|
||||
|
||||
# Record of env vars associated with options
|
||||
KNOWN_ENV_VARS: List[str] = []
|
||||
|
||||
|
||||
def _create_option_and_track_env_var(
|
||||
*args: Any, **kwargs: Any
|
||||
) -> Callable[[click.decorators.FC], click.decorators.FC]:
|
||||
global KNOWN_ENV_VARS
|
||||
|
||||
envvar = kwargs.get("envvar", None)
|
||||
if isinstance(envvar, str):
|
||||
KNOWN_ENV_VARS.append(envvar)
|
||||
|
||||
return click.option(*args, **kwargs)
|
||||
|
||||
|
||||
# --- The actual option definitions --- #
|
||||
add_package = _create_option_and_track_env_var(
|
||||
add_package = click.option(
|
||||
"--add-package",
|
||||
help="Add a package to current package spec, specify it as package-name@version. Change the source with --source flag.",
|
||||
envvar=None,
|
||||
type=Package(),
|
||||
)
|
||||
|
||||
args = _create_option_and_track_env_var(
|
||||
args = click.option(
|
||||
"--args",
|
||||
envvar=None,
|
||||
help="Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. '{my_variable: my_value}'",
|
||||
type=YAML(),
|
||||
)
|
||||
|
||||
browser = _create_option_and_track_env_var(
|
||||
browser = click.option(
|
||||
"--browser/--no-browser",
|
||||
envvar=None,
|
||||
help="Whether or not to open a local web browser after starting the server",
|
||||
help="Wether or not to open a local web browser after starting the server",
|
||||
default=True,
|
||||
)
|
||||
|
||||
cache_selected_only = _create_option_and_track_env_var(
|
||||
cache_selected_only = click.option(
|
||||
"--cache-selected-only/--no-cache-selected-only",
|
||||
envvar="DBT_CACHE_SELECTED_ONLY",
|
||||
help="At start of run, populate relational cache only for schemas containing selected nodes, or for all schemas of interest.",
|
||||
)
|
||||
|
||||
clean_project_files_only = _create_option_and_track_env_var(
|
||||
clean_project_files_only = click.option(
|
||||
"--clean-project-files-only / --no-clean-project-files-only",
|
||||
envvar="DBT_CLEAN_PROJECT_FILES_ONLY",
|
||||
help="If disabled, dbt clean will delete all paths specified in clean-paths, even if they're outside the dbt project.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
compile_docs = _create_option_and_track_env_var(
|
||||
compile_docs = click.option(
|
||||
"--compile/--no-compile",
|
||||
envvar=None,
|
||||
help="Whether or not to run 'dbt compile' as part of docs generation",
|
||||
default=True,
|
||||
)
|
||||
|
||||
compile_inject_ephemeral_ctes = _create_option_and_track_env_var(
|
||||
compile_inject_ephemeral_ctes = click.option(
|
||||
"--inject-ephemeral-ctes/--no-inject-ephemeral-ctes",
|
||||
envvar=None,
|
||||
help="Internal flag controlling injection of referenced ephemeral models' CTEs during `compile`.",
|
||||
@@ -91,21 +73,21 @@ compile_inject_ephemeral_ctes = _create_option_and_track_env_var(
|
||||
default=True,
|
||||
)
|
||||
|
||||
config_dir = _create_option_and_track_env_var(
|
||||
config_dir = click.option(
|
||||
"--config-dir",
|
||||
envvar=None,
|
||||
help="Print a system-specific command to access the directory that the current dbt project is searching for a profiles.yml. Then, exit. This flag renders other debug step flags no-ops.",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
debug = _create_option_and_track_env_var(
|
||||
debug = click.option(
|
||||
"--debug/--no-debug",
|
||||
"-d/ ",
|
||||
envvar="DBT_DEBUG",
|
||||
help="Display debug logging during dbt execution. Useful for debugging and making bug reports.",
|
||||
)
|
||||
|
||||
debug_connection = _create_option_and_track_env_var(
|
||||
debug_connection = click.option(
|
||||
"--connection",
|
||||
envvar=None,
|
||||
help="Test the connection to the target database independent of dependency checks.",
|
||||
@@ -113,13 +95,13 @@ debug_connection = _create_option_and_track_env_var(
|
||||
)
|
||||
|
||||
# flag was previously named DEFER_MODE
|
||||
defer = _create_option_and_track_env_var(
|
||||
defer = click.option(
|
||||
"--defer/--no-defer",
|
||||
envvar="DBT_DEFER",
|
||||
help="If set, resolve unselected nodes by deferring to the manifest within the --state directory.",
|
||||
)
|
||||
|
||||
defer_state = _create_option_and_track_env_var(
|
||||
defer_state = click.option(
|
||||
"--defer-state",
|
||||
envvar="DBT_DEFER_STATE",
|
||||
help="Override the state directory for deferral only.",
|
||||
@@ -132,7 +114,7 @@ defer_state = _create_option_and_track_env_var(
|
||||
),
|
||||
)
|
||||
|
||||
deprecated_defer = _create_option_and_track_env_var(
|
||||
deprecated_defer = click.option(
|
||||
"--deprecated-defer",
|
||||
envvar="DBT_DEFER_TO_STATE",
|
||||
help="Internal flag for deprecating old env var.",
|
||||
@@ -140,14 +122,14 @@ deprecated_defer = _create_option_and_track_env_var(
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
deprecated_favor_state = _create_option_and_track_env_var(
|
||||
deprecated_favor_state = click.option(
|
||||
"--deprecated-favor-state",
|
||||
envvar="DBT_FAVOR_STATE_MODE",
|
||||
help="Internal flag for deprecating old env var.",
|
||||
)
|
||||
|
||||
# Renamed to --export-saved-queries
|
||||
deprecated_include_saved_query = _create_option_and_track_env_var(
|
||||
deprecated_include_saved_query = click.option(
|
||||
"--include-saved-query/--no-include-saved-query",
|
||||
envvar="DBT_INCLUDE_SAVED_QUERY",
|
||||
help="Include saved queries in the list of resources to be selected for build command",
|
||||
@@ -155,7 +137,7 @@ deprecated_include_saved_query = _create_option_and_track_env_var(
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
deprecated_print = _create_option_and_track_env_var(
|
||||
deprecated_print = click.option(
|
||||
"--deprecated-print/--deprecated-no-print",
|
||||
envvar="DBT_NO_PRINT",
|
||||
help="Internal flag for deprecating old env var.",
|
||||
@@ -164,7 +146,7 @@ deprecated_print = _create_option_and_track_env_var(
|
||||
callback=lambda ctx, param, value: not value,
|
||||
)
|
||||
|
||||
deprecated_state = _create_option_and_track_env_var(
|
||||
deprecated_state = click.option(
|
||||
"--deprecated-state",
|
||||
envvar="DBT_ARTIFACT_STATE_PATH",
|
||||
help="Internal flag for deprecating old env var.",
|
||||
@@ -178,21 +160,21 @@ deprecated_state = _create_option_and_track_env_var(
|
||||
),
|
||||
)
|
||||
|
||||
empty = _create_option_and_track_env_var(
|
||||
empty = click.option(
|
||||
"--empty/--no-empty",
|
||||
envvar="DBT_EMPTY",
|
||||
help="If specified, limit input refs and sources to zero rows.",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
empty_catalog = _create_option_and_track_env_var(
|
||||
empty_catalog = click.option(
|
||||
"--empty-catalog",
|
||||
help="If specified, generate empty catalog.json file during the `dbt docs generate` command.",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
event_time_end = _create_option_and_track_env_var(
|
||||
event_time_end = click.option(
|
||||
"--event-time-end",
|
||||
envvar="DBT_EVENT_TIME_END",
|
||||
help="If specified, the end datetime dbt uses to filter microbatch model inputs (exclusive).",
|
||||
@@ -200,7 +182,7 @@ event_time_end = _create_option_and_track_env_var(
|
||||
default=None,
|
||||
)
|
||||
|
||||
event_time_start = _create_option_and_track_env_var(
|
||||
event_time_start = click.option(
|
||||
"--event-time-start",
|
||||
envvar="DBT_EVENT_TIME_START",
|
||||
help="If specified, the start datetime dbt uses to filter microbatch model inputs (inclusive).",
|
||||
@@ -208,7 +190,7 @@ event_time_start = _create_option_and_track_env_var(
|
||||
default=None,
|
||||
)
|
||||
|
||||
exclude = _create_option_and_track_env_var(
|
||||
exclude = click.option(
|
||||
"--exclude",
|
||||
envvar=None,
|
||||
type=tuple,
|
||||
@@ -217,7 +199,7 @@ exclude = _create_option_and_track_env_var(
|
||||
help="Specify the nodes to exclude.",
|
||||
)
|
||||
|
||||
exclude_resource_type = _create_option_and_track_env_var(
|
||||
exclude_resource_type = click.option(
|
||||
"--exclude-resource-types",
|
||||
"--exclude-resource-type",
|
||||
envvar="DBT_EXCLUDE_RESOURCE_TYPES",
|
||||
@@ -235,7 +217,6 @@ exclude_resource_type = _create_option_and_track_env_var(
|
||||
"exposure",
|
||||
"snapshot",
|
||||
"seed",
|
||||
"function",
|
||||
"default",
|
||||
],
|
||||
case_sensitive=False,
|
||||
@@ -245,7 +226,7 @@ exclude_resource_type = _create_option_and_track_env_var(
|
||||
default=(),
|
||||
)
|
||||
|
||||
export_saved_queries = _create_option_and_track_env_var(
|
||||
export_saved_queries = click.option(
|
||||
"--export-saved-queries/--no-export-saved-queries",
|
||||
envvar="DBT_EXPORT_SAVED_QUERIES",
|
||||
help="Export saved queries within the 'build' command, otherwise no-op",
|
||||
@@ -253,20 +234,20 @@ export_saved_queries = _create_option_and_track_env_var(
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
fail_fast = _create_option_and_track_env_var(
|
||||
fail_fast = click.option(
|
||||
"--fail-fast/--no-fail-fast",
|
||||
"-x/ ",
|
||||
envvar="DBT_FAIL_FAST",
|
||||
help="Stop execution on first failure.",
|
||||
)
|
||||
|
||||
favor_state = _create_option_and_track_env_var(
|
||||
favor_state = click.option(
|
||||
"--favor-state/--no-favor-state",
|
||||
envvar="DBT_FAVOR_STATE",
|
||||
help="If set, defer to the argument provided to the state flag for resolving unselected nodes, even if the node(s) exist as a database object in the current environment.",
|
||||
)
|
||||
|
||||
full_refresh = _create_option_and_track_env_var(
|
||||
full_refresh = click.option(
|
||||
"--full-refresh",
|
||||
"-f",
|
||||
envvar="DBT_FULL_REFRESH",
|
||||
@@ -274,7 +255,7 @@ full_refresh = _create_option_and_track_env_var(
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
host = _create_option_and_track_env_var(
|
||||
host = click.option(
|
||||
"--host",
|
||||
envvar="DBT_HOST",
|
||||
help="host to serve dbt docs on",
|
||||
@@ -282,7 +263,7 @@ host = _create_option_and_track_env_var(
|
||||
default="127.0.0.1",
|
||||
)
|
||||
|
||||
indirect_selection = _create_option_and_track_env_var(
|
||||
indirect_selection = click.option(
|
||||
"--indirect-selection",
|
||||
envvar="DBT_INDIRECT_SELECTION",
|
||||
help="Choose which tests to select that are adjacent to selected resources. Eager is most inclusive, cautious is most exclusive, and buildable is in between. Empty includes no tests at all.",
|
||||
@@ -290,40 +271,40 @@ indirect_selection = _create_option_and_track_env_var(
|
||||
default="eager",
|
||||
)
|
||||
|
||||
inline = _create_option_and_track_env_var(
|
||||
inline = click.option(
|
||||
"--inline",
|
||||
envvar=None,
|
||||
help="Pass SQL inline to dbt compile and show",
|
||||
)
|
||||
|
||||
inline_direct = _create_option_and_track_env_var(
|
||||
inline_direct = click.option(
|
||||
"--inline-direct",
|
||||
envvar=None,
|
||||
help="Internal flag to pass SQL inline to dbt show. Do not load the entire project or apply templating.",
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
introspect = _create_option_and_track_env_var(
|
||||
introspect = click.option(
|
||||
"--introspect/--no-introspect",
|
||||
envvar="DBT_INTROSPECT",
|
||||
help="Whether to scaffold introspective queries as part of compilation",
|
||||
default=True,
|
||||
)
|
||||
|
||||
lock = _create_option_and_track_env_var(
|
||||
lock = click.option(
|
||||
"--lock",
|
||||
envvar=None,
|
||||
help="Generate the package-lock.yml file without install the packages.",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
log_cache_events = _create_option_and_track_env_var(
|
||||
log_cache_events = click.option(
|
||||
"--log-cache-events/--no-log-cache-events",
|
||||
help="Enable verbose logging for relational cache events to help when debugging.",
|
||||
envvar="DBT_LOG_CACHE_EVENTS",
|
||||
)
|
||||
|
||||
log_format = _create_option_and_track_env_var(
|
||||
log_format = click.option(
|
||||
"--log-format",
|
||||
envvar="DBT_LOG_FORMAT",
|
||||
help="Specify the format of logging to the console and the log file. Use --log-format-file to configure the format for the log file differently than the console.",
|
||||
@@ -331,7 +312,7 @@ log_format = _create_option_and_track_env_var(
|
||||
default="default",
|
||||
)
|
||||
|
||||
log_format_file = _create_option_and_track_env_var(
|
||||
log_format_file = click.option(
|
||||
"--log-format-file",
|
||||
envvar="DBT_LOG_FORMAT_FILE",
|
||||
help="Specify the format of logging to the log file by overriding the default value and the general --log-format setting.",
|
||||
@@ -339,7 +320,7 @@ log_format_file = _create_option_and_track_env_var(
|
||||
default="debug",
|
||||
)
|
||||
|
||||
log_level = _create_option_and_track_env_var(
|
||||
log_level = click.option(
|
||||
"--log-level",
|
||||
envvar="DBT_LOG_LEVEL",
|
||||
help="Specify the minimum severity of events that are logged to the console and the log file. Use --log-level-file to configure the severity for the log file differently than the console.",
|
||||
@@ -347,7 +328,7 @@ log_level = _create_option_and_track_env_var(
|
||||
default="info",
|
||||
)
|
||||
|
||||
log_level_file = _create_option_and_track_env_var(
|
||||
log_level_file = click.option(
|
||||
"--log-level-file",
|
||||
envvar="DBT_LOG_LEVEL_FILE",
|
||||
help="Specify the minimum severity of events that are logged to the log file by overriding the default value and the general --log-level setting.",
|
||||
@@ -355,7 +336,7 @@ log_level_file = _create_option_and_track_env_var(
|
||||
default="debug",
|
||||
)
|
||||
|
||||
log_file_max_bytes = _create_option_and_track_env_var(
|
||||
log_file_max_bytes = click.option(
|
||||
"--log-file-max-bytes",
|
||||
envvar="DBT_LOG_FILE_MAX_BYTES",
|
||||
help="Configure the max file size in bytes for a single dbt.log file, before rolling over. 0 means no limit.",
|
||||
@@ -364,7 +345,7 @@ log_file_max_bytes = _create_option_and_track_env_var(
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
log_path = _create_option_and_track_env_var(
|
||||
log_path = click.option(
|
||||
"--log-path",
|
||||
envvar="DBT_LOG_PATH",
|
||||
help="Configure the 'log-path'. Only applies this setting for the current run. Overrides the 'DBT_LOG_PATH' if it is set.",
|
||||
@@ -372,16 +353,16 @@ log_path = _create_option_and_track_env_var(
|
||||
type=click.Path(resolve_path=True, path_type=Path),
|
||||
)
|
||||
|
||||
macro_debugging = _create_option_and_track_env_var(
|
||||
macro_debugging = click.option(
|
||||
"--macro-debugging/--no-macro-debugging",
|
||||
envvar="DBT_MACRO_DEBUGGING",
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
models = _create_option_and_track_env_var(*model_decls, **select_attrs) # type: ignore[arg-type]
|
||||
models = click.option(*model_decls, **select_attrs) # type: ignore[arg-type]
|
||||
|
||||
# This less standard usage of --output where output_path below is more standard
|
||||
output = _create_option_and_track_env_var(
|
||||
output = click.option(
|
||||
"--output",
|
||||
envvar=None,
|
||||
help="Specify the output format: either JSON or a newline-delimited list of selectors, paths, or names",
|
||||
@@ -389,13 +370,12 @@ output = _create_option_and_track_env_var(
|
||||
default="selector",
|
||||
)
|
||||
|
||||
output_keys = _create_option_and_track_env_var(
|
||||
output_keys = click.option(
|
||||
"--output-keys",
|
||||
envvar=None,
|
||||
help=(
|
||||
"Space-delimited listing of node properties to include as custom keys for JSON output. "
|
||||
"Supports nested keys using dot notation "
|
||||
"(e.g. `--output json --output-keys name config.materialized resource_type`)"
|
||||
"Space-delimited listing of node properties to include as custom keys for JSON output "
|
||||
"(e.g. `--output json --output-keys name resource_type description`)"
|
||||
),
|
||||
type=tuple,
|
||||
cls=MultiOption,
|
||||
@@ -403,7 +383,7 @@ output_keys = _create_option_and_track_env_var(
|
||||
default=[],
|
||||
)
|
||||
|
||||
output_path = _create_option_and_track_env_var(
|
||||
output_path = click.option(
|
||||
"--output",
|
||||
"-o",
|
||||
envvar=None,
|
||||
@@ -412,14 +392,14 @@ output_path = _create_option_and_track_env_var(
|
||||
default=None,
|
||||
)
|
||||
|
||||
partial_parse = _create_option_and_track_env_var(
|
||||
partial_parse = click.option(
|
||||
"--partial-parse/--no-partial-parse",
|
||||
envvar="DBT_PARTIAL_PARSE",
|
||||
help="Allow for partial parsing by looking for and writing to a pickle file in the target directory. This overrides the user configuration file.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
partial_parse_file_diff = _create_option_and_track_env_var(
|
||||
partial_parse_file_diff = click.option(
|
||||
"--partial-parse-file-diff/--no-partial-parse-file-diff",
|
||||
envvar="DBT_PARTIAL_PARSE_FILE_DIFF",
|
||||
help="Internal flag for whether to compute a file diff during partial parsing.",
|
||||
@@ -427,7 +407,7 @@ partial_parse_file_diff = _create_option_and_track_env_var(
|
||||
default=True,
|
||||
)
|
||||
|
||||
partial_parse_file_path = _create_option_and_track_env_var(
|
||||
partial_parse_file_path = click.option(
|
||||
"--partial-parse-file-path",
|
||||
envvar="DBT_PARTIAL_PARSE_FILE_PATH",
|
||||
help="Internal flag for path to partial_parse.manifest file.",
|
||||
@@ -436,21 +416,21 @@ partial_parse_file_path = _create_option_and_track_env_var(
|
||||
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
|
||||
)
|
||||
|
||||
print = _create_option_and_track_env_var(
|
||||
print = click.option(
|
||||
"--print/--no-print",
|
||||
envvar="DBT_PRINT",
|
||||
help="Output all {{ print() }} macro calls.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
populate_cache = _create_option_and_track_env_var(
|
||||
populate_cache = click.option(
|
||||
"--populate-cache/--no-populate-cache",
|
||||
envvar="DBT_POPULATE_CACHE",
|
||||
help="At start of run, use `show` or `information_schema` queries to populate a relational cache, which can speed up subsequent materializations.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
port = _create_option_and_track_env_var(
|
||||
port = click.option(
|
||||
"--port",
|
||||
envvar=None,
|
||||
help="Specify the port number for the docs server",
|
||||
@@ -458,7 +438,7 @@ port = _create_option_and_track_env_var(
|
||||
type=click.INT,
|
||||
)
|
||||
|
||||
printer_width = _create_option_and_track_env_var(
|
||||
printer_width = click.option(
|
||||
"--printer-width",
|
||||
envvar="DBT_PRINTER_WIDTH",
|
||||
help="Sets the width of terminal output",
|
||||
@@ -466,13 +446,13 @@ printer_width = _create_option_and_track_env_var(
|
||||
default=80,
|
||||
)
|
||||
|
||||
profile = _create_option_and_track_env_var(
|
||||
profile = click.option(
|
||||
"--profile",
|
||||
envvar="DBT_PROFILE",
|
||||
help="Which existing profile to load. Overrides setting in dbt_project.yml.",
|
||||
)
|
||||
|
||||
profiles_dir = _create_option_and_track_env_var(
|
||||
profiles_dir = click.option(
|
||||
"--profiles-dir",
|
||||
envvar="DBT_PROFILES_DIR",
|
||||
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
|
||||
@@ -483,7 +463,7 @@ profiles_dir = _create_option_and_track_env_var(
|
||||
# `dbt debug` uses this because it implements custom behaviour for non-existent profiles.yml directories
|
||||
# `dbt deps` does not load a profile at all
|
||||
# `dbt init` will write profiles.yml if it doesn't yet exist
|
||||
profiles_dir_exists_false = _create_option_and_track_env_var(
|
||||
profiles_dir_exists_false = click.option(
|
||||
"--profiles-dir",
|
||||
envvar="DBT_PROFILES_DIR",
|
||||
help="Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/",
|
||||
@@ -491,7 +471,7 @@ profiles_dir_exists_false = _create_option_and_track_env_var(
|
||||
type=click.Path(exists=False),
|
||||
)
|
||||
|
||||
project_dir = _create_option_and_track_env_var(
|
||||
project_dir = click.option(
|
||||
"--project-dir",
|
||||
envvar="DBT_PROJECT_DIR",
|
||||
help="Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.",
|
||||
@@ -499,16 +479,16 @@ project_dir = _create_option_and_track_env_var(
|
||||
type=click.Path(exists=True),
|
||||
)
|
||||
|
||||
quiet = _create_option_and_track_env_var(
|
||||
quiet = click.option(
|
||||
"--quiet/--no-quiet",
|
||||
"-q",
|
||||
envvar="DBT_QUIET",
|
||||
help="Suppress all non-error logging to stdout. Does not affect {{ print() }} macro calls.",
|
||||
)
|
||||
|
||||
raw_select = _create_option_and_track_env_var(*select_decls, **select_attrs) # type: ignore[arg-type]
|
||||
raw_select = click.option(*select_decls, **select_attrs) # type: ignore[arg-type]
|
||||
|
||||
record_timing_info = _create_option_and_track_env_var(
|
||||
record_timing_info = click.option(
|
||||
"--record-timing-info",
|
||||
"-r",
|
||||
envvar=None,
|
||||
@@ -516,7 +496,7 @@ record_timing_info = _create_option_and_track_env_var(
|
||||
type=click.Path(exists=False),
|
||||
)
|
||||
|
||||
resource_type = _create_option_and_track_env_var(
|
||||
resource_type = click.option(
|
||||
"--resource-types",
|
||||
"--resource-type",
|
||||
envvar="DBT_RESOURCE_TYPES",
|
||||
@@ -528,7 +508,6 @@ resource_type = _create_option_and_track_env_var(
|
||||
"saved_query",
|
||||
"source",
|
||||
"analysis",
|
||||
"function",
|
||||
"model",
|
||||
"test",
|
||||
"unit_test",
|
||||
@@ -545,41 +524,42 @@ resource_type = _create_option_and_track_env_var(
|
||||
default=(),
|
||||
)
|
||||
|
||||
sample = _create_option_and_track_env_var(
|
||||
sample = click.option(
|
||||
"--sample",
|
||||
envvar="DBT_SAMPLE",
|
||||
help="Run in sample mode with given SAMPLE_WINDOW spec, such that ref/source calls are sampled by the sample window.",
|
||||
default=None,
|
||||
type=SampleType(),
|
||||
hidden=True, # TODO: Unhide
|
||||
)
|
||||
|
||||
# `--select` and `--models` are analogous for most commands except `dbt list` for legacy reasons.
|
||||
# Most CLI arguments should use the combined `select` option that aliases `--models` to `--select`.
|
||||
# However, if you need to split out these separators (like `dbt ls`), use the `models` and `raw_select` options instead.
|
||||
# See https://github.com/dbt-labs/dbt-core/pull/6774#issuecomment-1408476095 for more info.
|
||||
select = _create_option_and_track_env_var(*select_decls, *model_decls, **select_attrs) # type: ignore[arg-type]
|
||||
select = click.option(*select_decls, *model_decls, **select_attrs) # type: ignore[arg-type]
|
||||
|
||||
selector = _create_option_and_track_env_var(
|
||||
selector = click.option(
|
||||
"--selector",
|
||||
envvar=None,
|
||||
help="The selector name to use, as defined in selectors.yml",
|
||||
)
|
||||
|
||||
send_anonymous_usage_stats = _create_option_and_track_env_var(
|
||||
send_anonymous_usage_stats = click.option(
|
||||
"--send-anonymous-usage-stats/--no-send-anonymous-usage-stats",
|
||||
envvar="DBT_SEND_ANONYMOUS_USAGE_STATS",
|
||||
help="Send anonymous usage stats to dbt Labs.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
show = _create_option_and_track_env_var(
|
||||
show = click.option(
|
||||
"--show",
|
||||
envvar=None,
|
||||
help="Show a sample of the loaded data in the terminal",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
show_limit = _create_option_and_track_env_var(
|
||||
show_limit = click.option(
|
||||
"--limit",
|
||||
envvar=None,
|
||||
help="Limit the number of results returned by dbt show",
|
||||
@@ -587,7 +567,7 @@ show_limit = _create_option_and_track_env_var(
|
||||
default=5,
|
||||
)
|
||||
|
||||
show_output_format = _create_option_and_track_env_var(
|
||||
show_output_format = click.option(
|
||||
"--output",
|
||||
envvar=None,
|
||||
help="Output format for dbt compile and dbt show",
|
||||
@@ -595,7 +575,7 @@ show_output_format = _create_option_and_track_env_var(
|
||||
default="text",
|
||||
)
|
||||
|
||||
show_resource_report = _create_option_and_track_env_var(
|
||||
show_resource_report = click.option(
|
||||
"--show-resource-report/--no-show-resource-report",
|
||||
default=False,
|
||||
envvar="DBT_SHOW_RESOURCE_REPORT",
|
||||
@@ -608,14 +588,14 @@ show_resource_report = _create_option_and_track_env_var(
|
||||
# This will need to be communicated as a change to the community!
|
||||
#
|
||||
# N.B. This flag is only used for testing, hence it's hidden from help text.
|
||||
single_threaded = _create_option_and_track_env_var(
|
||||
single_threaded = click.option(
|
||||
"--single-threaded/--no-single-threaded",
|
||||
envvar="DBT_SINGLE_THREADED",
|
||||
default=False,
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
show_all_deprecations = _create_option_and_track_env_var(
|
||||
show_all_deprecations = click.option(
|
||||
"--show-all-deprecations/--no-show-all-deprecations",
|
||||
envvar=None,
|
||||
help="By default, each type of a deprecation warning is only shown once. Use this flag to show all deprecation warning instances.",
|
||||
@@ -623,7 +603,7 @@ show_all_deprecations = _create_option_and_track_env_var(
|
||||
default=False,
|
||||
)
|
||||
|
||||
skip_profile_setup = _create_option_and_track_env_var(
|
||||
skip_profile_setup = click.option(
|
||||
"--skip-profile-setup",
|
||||
"-s",
|
||||
envvar=None,
|
||||
@@ -631,7 +611,7 @@ skip_profile_setup = _create_option_and_track_env_var(
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
source = _create_option_and_track_env_var(
|
||||
source = click.option(
|
||||
"--source",
|
||||
envvar=None,
|
||||
help="Source to download page from, must be one of hub, git, or local. Defaults to hub.",
|
||||
@@ -639,7 +619,7 @@ source = _create_option_and_track_env_var(
|
||||
default="hub",
|
||||
)
|
||||
|
||||
state = _create_option_and_track_env_var(
|
||||
state = click.option(
|
||||
"--state",
|
||||
envvar="DBT_STATE",
|
||||
help="Unless overridden, use this state directory for both state comparison and deferral.",
|
||||
@@ -652,42 +632,42 @@ state = _create_option_and_track_env_var(
|
||||
),
|
||||
)
|
||||
|
||||
static = _create_option_and_track_env_var(
|
||||
static = click.option(
|
||||
"--static",
|
||||
help="Generate an additional static_index.html with manifest and catalog built-in.",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
static_parser = _create_option_and_track_env_var(
|
||||
static_parser = click.option(
|
||||
"--static-parser/--no-static-parser",
|
||||
envvar="DBT_STATIC_PARSER",
|
||||
help="Use the static parser.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
store_failures = _create_option_and_track_env_var(
|
||||
store_failures = click.option(
|
||||
"--store-failures",
|
||||
envvar="DBT_STORE_FAILURES",
|
||||
help="Store test results (failing rows) in the database",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
target = _create_option_and_track_env_var(
|
||||
target = click.option(
|
||||
"--target",
|
||||
"-t",
|
||||
envvar="DBT_TARGET",
|
||||
help="Which target to load for the given profile",
|
||||
)
|
||||
|
||||
target_path = _create_option_and_track_env_var(
|
||||
target_path = click.option(
|
||||
"--target-path",
|
||||
envvar="DBT_TARGET_PATH",
|
||||
help="Configure the 'target-path'. Only applies this setting for the current run. Overrides the 'DBT_TARGET_PATH' if it is set.",
|
||||
type=click.Path(),
|
||||
)
|
||||
|
||||
threads = _create_option_and_track_env_var(
|
||||
threads = click.option(
|
||||
"--threads",
|
||||
envvar=None,
|
||||
help="Specify number of threads to use while executing models. Overrides settings in profiles.yml.",
|
||||
@@ -695,41 +675,41 @@ threads = _create_option_and_track_env_var(
|
||||
type=click.INT,
|
||||
)
|
||||
|
||||
upgrade = _create_option_and_track_env_var(
|
||||
upgrade = click.option(
|
||||
"--upgrade",
|
||||
envvar=None,
|
||||
help="Upgrade packages to the latest version.",
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
use_colors = _create_option_and_track_env_var(
|
||||
use_colors = click.option(
|
||||
"--use-colors/--no-use-colors",
|
||||
envvar="DBT_USE_COLORS",
|
||||
help="Specify whether log output is colorized in the console and the log file. Use --use-colors-file/--no-use-colors-file to colorize the log file differently than the console.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
use_colors_file = _create_option_and_track_env_var(
|
||||
use_colors_file = click.option(
|
||||
"--use-colors-file/--no-use-colors-file",
|
||||
envvar="DBT_USE_COLORS_FILE",
|
||||
help="Specify whether log file output is colorized by overriding the default value and the general --use-colors/--no-use-colors setting.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
use_experimental_parser = _create_option_and_track_env_var(
|
||||
use_experimental_parser = click.option(
|
||||
"--use-experimental-parser/--no-use-experimental-parser",
|
||||
envvar="DBT_USE_EXPERIMENTAL_PARSER",
|
||||
help="Enable experimental parsing features.",
|
||||
)
|
||||
|
||||
use_fast_test_edges = _create_option_and_track_env_var(
|
||||
use_fast_test_edges = click.option(
|
||||
"--use-fast-test-edges/--no-use-fast-test-edges",
|
||||
envvar="DBT_USE_FAST_TEST_EDGES",
|
||||
default=False,
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
vars = _create_option_and_track_env_var(
|
||||
vars = click.option(
|
||||
"--vars",
|
||||
envvar=None,
|
||||
help="Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. '{my_variable: my_value}'",
|
||||
@@ -747,7 +727,7 @@ def _version_callback(ctx, _param, value):
|
||||
ctx.exit()
|
||||
|
||||
|
||||
version = _create_option_and_track_env_var(
|
||||
version = click.option(
|
||||
"--version",
|
||||
"-V",
|
||||
"-v",
|
||||
@@ -759,14 +739,14 @@ version = _create_option_and_track_env_var(
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
version_check = _create_option_and_track_env_var(
|
||||
version_check = click.option(
|
||||
"--version-check/--no-version-check",
|
||||
envvar="DBT_VERSION_CHECK",
|
||||
help="If set, ensure the installed dbt version matches the require-dbt-version specified in the dbt_project.yml file (if any). Otherwise, allow them to differ.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
warn_error = _create_option_and_track_env_var(
|
||||
warn_error = click.option(
|
||||
"--warn-error",
|
||||
envvar="DBT_WARN_ERROR",
|
||||
help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.",
|
||||
@@ -774,7 +754,7 @@ warn_error = _create_option_and_track_env_var(
|
||||
is_flag=True,
|
||||
)
|
||||
|
||||
warn_error_options = _create_option_and_track_env_var(
|
||||
warn_error_options = click.option(
|
||||
"--warn-error-options",
|
||||
envvar="DBT_WARN_ERROR_OPTIONS",
|
||||
default="{}",
|
||||
@@ -783,14 +763,14 @@ warn_error_options = _create_option_and_track_env_var(
|
||||
type=WarnErrorOptionsType(),
|
||||
)
|
||||
|
||||
write_json = _create_option_and_track_env_var(
|
||||
write_json = click.option(
|
||||
"--write-json/--no-write-json",
|
||||
envvar="DBT_WRITE_JSON",
|
||||
help="Whether or not to write the manifest.json and run_results.json files to the target directory",
|
||||
default=True,
|
||||
)
|
||||
|
||||
upload_artifacts = _create_option_and_track_env_var(
|
||||
upload_artifacts = click.option(
|
||||
"--upload-to-artifacts-ingest-api/--no-upload-to-artifacts-ingest-api",
|
||||
envvar="DBT_UPLOAD_TO_ARTIFACTS_INGEST_API",
|
||||
help="Whether or not to upload the artifacts to the dbt Cloud API",
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
import time
|
||||
import traceback
|
||||
from functools import update_wrapper
|
||||
from typing import Dict, Optional
|
||||
from typing import Optional
|
||||
|
||||
from click import Context
|
||||
|
||||
@@ -17,7 +17,6 @@ from dbt.config.runtime import UnsetProfile, load_profile, load_project
|
||||
from dbt.context.providers import generate_runtime_macro_context
|
||||
from dbt.context.query_header import generate_query_header_context
|
||||
from dbt.deprecations import show_deprecations_summary
|
||||
from dbt.env_vars import KNOWN_ENGINE_ENV_VARS, validate_engine_env_vars
|
||||
from dbt.events.logging import setup_event_logger
|
||||
from dbt.events.types import (
|
||||
ArtifactUploadError,
|
||||
@@ -57,17 +56,6 @@ from dbt_common.record import (
|
||||
from dbt_common.utils import cast_dict_to_dict_of_strings
|
||||
|
||||
|
||||
def _cross_propagate_engine_env_vars(env_dict: Dict[str, str]) -> None:
|
||||
for env_var in KNOWN_ENGINE_ENV_VARS:
|
||||
if env_var.old_name is not None:
|
||||
# If the old name is in the env dict, and not the new name, set the new name based on the old name
|
||||
if env_var.old_name in env_dict and env_var.name not in env_dict:
|
||||
env_dict[env_var.name] = env_dict[env_var.old_name]
|
||||
# If the new name is in the env dict, override the old name with it
|
||||
elif env_var.name in env_dict:
|
||||
env_dict[env_var.old_name] = env_dict[env_var.name]
|
||||
|
||||
|
||||
def preflight(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
ctx = args[0]
|
||||
@@ -81,9 +69,7 @@ def preflight(func):
|
||||
|
||||
# Must be set after record/replay is set up so that the env can be
|
||||
# recorded or replayed if needed.
|
||||
env_dict = get_env()
|
||||
_cross_propagate_engine_env_vars(env_dict)
|
||||
get_invocation_context()._env = env_dict
|
||||
get_invocation_context()._env = get_env()
|
||||
|
||||
# Flags
|
||||
flags = Flags(ctx)
|
||||
@@ -122,9 +108,6 @@ def preflight(func):
|
||||
# Adapter management
|
||||
ctx.with_resource(adapter_management())
|
||||
|
||||
# Validate engine env var restricted name space
|
||||
validate_engine_env_vars()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return update_wrapper(wrapper, func)
|
||||
@@ -136,16 +119,12 @@ def setup_record_replay():
|
||||
|
||||
recorder: Optional[Recorder] = None
|
||||
if rec_mode == RecorderMode.REPLAY:
|
||||
previous_recording_path = os.environ.get(
|
||||
"DBT_ENGINE_RECORDER_FILE_PATH"
|
||||
) or os.environ.get("DBT_RECORDER_FILE_PATH")
|
||||
previous_recording_path = os.environ.get("DBT_RECORDER_FILE_PATH")
|
||||
recorder = Recorder(
|
||||
RecorderMode.REPLAY, types=rec_types, previous_recording_path=previous_recording_path
|
||||
)
|
||||
elif rec_mode == RecorderMode.DIFF:
|
||||
previous_recording_path = os.environ.get(
|
||||
"DBT_ENGINE_RECORDER_FILE_PATH"
|
||||
) or os.environ.get("DBT_RECORDER_FILE_PATH")
|
||||
previous_recording_path = os.environ.get("DBT_RECORDER_FILE_PATH")
|
||||
# ensure types match the previous recording
|
||||
types = get_record_types_from_dict(previous_recording_path)
|
||||
recorder = Recorder(
|
||||
@@ -271,7 +250,6 @@ def profile(func):
|
||||
threads = getattr(flags, "THREADS", None)
|
||||
profile = load_profile(flags.PROJECT_DIR, flags.VARS, flags.PROFILE, flags.TARGET, threads)
|
||||
ctx.obj["profile"] = profile
|
||||
get_invocation_context().uses_adapter(profile.credentials.type)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
@@ -291,22 +269,8 @@ def project(func):
|
||||
flags = ctx.obj["flags"]
|
||||
# TODO deprecations warnings fired from loading the project will lack
|
||||
# the project_id in the snowplow event.
|
||||
|
||||
# Determine if vars should be required during project loading.
|
||||
# Commands that don't need vars evaluated (like 'deps', 'clean')
|
||||
# should use lenient mode (require_vars=False) to allow missing vars.
|
||||
# Commands that validate or execute (like 'run', 'compile', 'build', 'debug') should use
|
||||
# strict mode (require_vars=True) to show helpful "Required var X not found" errors.
|
||||
# If adding more commands to lenient mode, update this condition.
|
||||
require_vars = flags.WHICH != "deps"
|
||||
|
||||
project = load_project(
|
||||
flags.PROJECT_DIR,
|
||||
flags.VERSION_CHECK,
|
||||
ctx.obj["profile"],
|
||||
flags.VARS,
|
||||
validate=True,
|
||||
require_vars=require_vars,
|
||||
flags.PROJECT_DIR, flags.VERSION_CHECK, ctx.obj["profile"], flags.VARS, validate=True
|
||||
)
|
||||
ctx.obj["project"] = project
|
||||
|
||||
@@ -432,11 +396,7 @@ def setup_manifest(ctx: Context, write: bool = True, write_perf_info: bool = Fal
|
||||
# if a manifest has already been set on the context, don't overwrite it
|
||||
if ctx.obj.get("manifest") is None:
|
||||
ctx.obj["manifest"] = parse_manifest(
|
||||
runtime_config,
|
||||
write_perf_info,
|
||||
write,
|
||||
ctx.obj["flags"].write_json,
|
||||
active_integrations,
|
||||
runtime_config, write_perf_info, write, ctx.obj["flags"].write_json
|
||||
)
|
||||
adapter = get_adapter(runtime_config)
|
||||
else:
|
||||
@@ -446,5 +406,6 @@ def setup_manifest(ctx: Context, write: bool = True, write_perf_info: bool = Fal
|
||||
adapter.set_macro_resolver(ctx.obj["manifest"])
|
||||
query_header_context = generate_query_header_context(adapter.config, ctx.obj["manifest"]) # type: ignore[attr-defined]
|
||||
adapter.connections.set_query_header(query_header_context)
|
||||
|
||||
for integration in active_integrations:
|
||||
adapter.add_catalog_integration(integration)
|
||||
|
||||
@@ -26,7 +26,6 @@ from dbt.contracts.graph.nodes import (
|
||||
SeedNode,
|
||||
UnitTestDefinition,
|
||||
UnitTestNode,
|
||||
UnitTestSourceDefinition,
|
||||
)
|
||||
from dbt.events.types import FoundStats, WritingInjectedSQLForNode
|
||||
from dbt.exceptions import (
|
||||
@@ -182,8 +181,6 @@ class Linker:
|
||||
self.dependency(node.unique_id, (manifest.metrics[dependency].unique_id))
|
||||
elif dependency in manifest.semantic_models:
|
||||
self.dependency(node.unique_id, (manifest.semantic_models[dependency].unique_id))
|
||||
elif dependency in manifest.functions:
|
||||
self.dependency(node.unique_id, (manifest.functions[dependency].unique_id))
|
||||
else:
|
||||
raise GraphDependencyNotFoundError(node, dependency)
|
||||
|
||||
@@ -196,8 +193,6 @@ class Linker:
|
||||
self.link_node(semantic_model, manifest)
|
||||
for exposure in manifest.exposures.values():
|
||||
self.link_node(exposure, manifest)
|
||||
for function in manifest.functions.values():
|
||||
self.link_node(function, manifest)
|
||||
for metric in manifest.metrics.values():
|
||||
self.link_node(metric, manifest)
|
||||
for unit_test in manifest.unit_tests.values():
|
||||
@@ -567,12 +562,7 @@ class Compiler:
|
||||
|
||||
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
|
||||
|
||||
cte_name = (
|
||||
cte_model.cte_name
|
||||
if isinstance(cte_model, UnitTestSourceDefinition)
|
||||
else cte_model.identifier
|
||||
)
|
||||
new_cte_name = self.add_ephemeral_prefix(cte_name)
|
||||
new_cte_name = self.add_ephemeral_prefix(cte_model.identifier)
|
||||
rendered_sql = cte_model._pre_injected_sql or cte_model.compiled_code
|
||||
sql = f" {new_cte_name} as (\n{rendered_sql}\n)"
|
||||
|
||||
@@ -605,7 +595,7 @@ class Compiler:
|
||||
if extra_context is None:
|
||||
extra_context = {}
|
||||
|
||||
if node.language == ModelLanguage.python and node.resource_type == NodeType.Model:
|
||||
if node.language == ModelLanguage.python:
|
||||
context = self._create_node_context(node, manifest, extra_context)
|
||||
|
||||
postfix = jinja.get_rendered(
|
||||
@@ -660,15 +650,8 @@ class Compiler:
|
||||
raise GraphDependencyNotFoundError(node, to_expression)
|
||||
|
||||
adapter = get_adapter(self.config)
|
||||
|
||||
if (
|
||||
hasattr(foreign_key_node, "defer_relation")
|
||||
and foreign_key_node.defer_relation
|
||||
and self.config.args.defer
|
||||
):
|
||||
return str(adapter.Relation.create_from(self.config, foreign_key_node.defer_relation))
|
||||
else:
|
||||
return str(adapter.Relation.create_from(self.config, foreign_key_node))
|
||||
relation_name = str(adapter.Relation.create_from(self.config, foreign_key_node))
|
||||
return relation_name
|
||||
|
||||
# This method doesn't actually "compile" any of the nodes. That is done by the
|
||||
# "compile_node" method. This creates a Linker and builds the networkx graph,
|
||||
|
||||
@@ -68,9 +68,7 @@ class Profile(HasCredentials):
|
||||
threads: int,
|
||||
credentials: Credentials,
|
||||
) -> None:
|
||||
"""
|
||||
TODO: Is this no longer needed now that 3.9 is no longer supported?
|
||||
Explicitly defining `__init__` to work around bug in Python 3.9.7
|
||||
"""Explicitly defining `__init__` to work around bug in Python 3.9.7
|
||||
https://bugs.python.org/issue45081
|
||||
"""
|
||||
self.profile_name = profile_name
|
||||
|
||||
@@ -206,7 +206,7 @@ def load_raw_project(project_root: str, validate: bool = False) -> Dict[str, Any
|
||||
project_dict = _load_yaml(project_yaml_filepath, validate=validate)
|
||||
|
||||
if validate:
|
||||
from dbt.jsonschemas.jsonschemas import jsonschema_validate, project_schema
|
||||
from dbt.jsonschemas import jsonschema_validate, project_schema
|
||||
|
||||
jsonschema_validate(
|
||||
schema=project_schema(), json=project_dict, file_path=project_yaml_filepath
|
||||
@@ -420,16 +420,9 @@ class PartialProject(RenderComponents):
|
||||
test_paths: List[str] = value_or(cfg.test_paths, ["tests"])
|
||||
analysis_paths: List[str] = value_or(cfg.analysis_paths, ["analyses"])
|
||||
snapshot_paths: List[str] = value_or(cfg.snapshot_paths, ["snapshots"])
|
||||
function_paths: List[str] = value_or(cfg.function_paths, ["functions"])
|
||||
|
||||
all_source_paths: List[str] = _all_source_paths(
|
||||
model_paths,
|
||||
seed_paths,
|
||||
snapshot_paths,
|
||||
analysis_paths,
|
||||
macro_paths,
|
||||
test_paths,
|
||||
function_paths,
|
||||
model_paths, seed_paths, snapshot_paths, analysis_paths, macro_paths, test_paths
|
||||
)
|
||||
|
||||
docs_paths: List[str] = value_or(cfg.docs_paths, all_source_paths)
|
||||
@@ -460,7 +453,6 @@ class PartialProject(RenderComponents):
|
||||
semantic_models: Dict[str, Any]
|
||||
saved_queries: Dict[str, Any]
|
||||
exposures: Dict[str, Any]
|
||||
functions: Dict[str, Any]
|
||||
vars_value: VarProvider
|
||||
dbt_cloud: Dict[str, Any]
|
||||
|
||||
@@ -477,7 +469,6 @@ class PartialProject(RenderComponents):
|
||||
semantic_models = cfg.semantic_models
|
||||
saved_queries = cfg.saved_queries
|
||||
exposures = cfg.exposures
|
||||
functions = cfg.functions
|
||||
if cfg.vars is None:
|
||||
vars_dict: Dict[str, Any] = {}
|
||||
else:
|
||||
@@ -518,7 +509,6 @@ class PartialProject(RenderComponents):
|
||||
asset_paths=asset_paths,
|
||||
target_path=target_path,
|
||||
snapshot_paths=snapshot_paths,
|
||||
function_paths=function_paths,
|
||||
clean_targets=clean_targets,
|
||||
log_path=log_path,
|
||||
packages_install_path=packages_install_path,
|
||||
@@ -542,7 +532,6 @@ class PartialProject(RenderComponents):
|
||||
semantic_models=semantic_models,
|
||||
saved_queries=saved_queries,
|
||||
exposures=exposures,
|
||||
functions=functions,
|
||||
vars=vars_value,
|
||||
config_version=cfg.config_version,
|
||||
unrendered=unrendered,
|
||||
@@ -637,7 +626,6 @@ class Project:
|
||||
asset_paths: List[str]
|
||||
target_path: str
|
||||
snapshot_paths: List[str]
|
||||
function_paths: List[str]
|
||||
clean_targets: List[str]
|
||||
log_path: str
|
||||
packages_install_path: str
|
||||
@@ -656,7 +644,6 @@ class Project:
|
||||
semantic_models: Dict[str, Any]
|
||||
saved_queries: Dict[str, Any]
|
||||
exposures: Dict[str, Any]
|
||||
functions: Dict[str, Any]
|
||||
vars: VarProvider
|
||||
dbt_version: List[VersionSpecifier]
|
||||
packages: PackageConfig
|
||||
@@ -679,7 +666,6 @@ class Project:
|
||||
self.analysis_paths,
|
||||
self.macro_paths,
|
||||
self.test_paths,
|
||||
self.function_paths,
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -746,7 +732,6 @@ class Project:
|
||||
"semantic-models": self.semantic_models,
|
||||
"saved-queries": self.saved_queries,
|
||||
"exposures": self.exposures,
|
||||
"functions": self.functions,
|
||||
"vars": self.vars.to_dict(),
|
||||
"require-dbt-version": [v.to_version_string() for v in self.dbt_version],
|
||||
"restrict-access": self.restrict_access,
|
||||
|
||||
@@ -101,10 +101,7 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
_KEYPATH_HANDLERS = ProjectPostprocessor()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
profile: Optional[HasCredentials] = None,
|
||||
cli_vars: Optional[Dict[str, Any]] = None,
|
||||
require_vars: bool = True,
|
||||
self, profile: Optional[HasCredentials] = None, cli_vars: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
# Generate contexts here because we want to save the context
|
||||
# object in order to retrieve the env_vars. This is almost always
|
||||
@@ -112,19 +109,10 @@ class DbtProjectYamlRenderer(BaseRenderer):
|
||||
# even when we don't have a profile.
|
||||
if cli_vars is None:
|
||||
cli_vars = {}
|
||||
# Store profile and cli_vars for creating strict context later
|
||||
self.profile = profile
|
||||
self.cli_vars = cli_vars
|
||||
|
||||
# By default, require vars (strict mode) for proper error messages.
|
||||
# Commands that don't need vars (like 'deps') should explicitly pass
|
||||
# require_vars=False for lenient loading.
|
||||
if profile:
|
||||
self.ctx_obj = TargetContext(
|
||||
profile.to_target_dict(), cli_vars, require_vars=require_vars
|
||||
)
|
||||
self.ctx_obj = TargetContext(profile.to_target_dict(), cli_vars)
|
||||
else:
|
||||
self.ctx_obj = BaseContext(cli_vars, require_vars=require_vars) # type:ignore
|
||||
self.ctx_obj = BaseContext(cli_vars) # type:ignore
|
||||
context = self.ctx_obj.to_dict()
|
||||
super().__init__(context)
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ from dbt.adapters.contracts.connection import (
|
||||
)
|
||||
from dbt.adapters.contracts.relation import ComponentName
|
||||
from dbt.adapters.factory import get_include_paths, get_relation_class_by_name
|
||||
from dbt.artifacts.resources import Quoting
|
||||
from dbt.artifacts.resources.v1.components import Quoting
|
||||
from dbt.config.project import load_raw_project
|
||||
from dbt.contracts.graph.manifest import ManifestMetadata
|
||||
from dbt.contracts.project import Configuration
|
||||
@@ -52,10 +52,9 @@ def load_project(
|
||||
profile: HasCredentials,
|
||||
cli_vars: Optional[Dict[str, Any]] = None,
|
||||
validate: bool = False,
|
||||
require_vars: bool = True,
|
||||
) -> Project:
|
||||
# get the project with all of the provided information
|
||||
project_renderer = DbtProjectYamlRenderer(profile, cli_vars, require_vars=require_vars)
|
||||
project_renderer = DbtProjectYamlRenderer(profile, cli_vars)
|
||||
project = Project.from_project_root(
|
||||
project_root, project_renderer, verify_version=version_check, validate=validate
|
||||
)
|
||||
@@ -156,7 +155,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
analysis_paths=project.analysis_paths,
|
||||
docs_paths=project.docs_paths,
|
||||
asset_paths=project.asset_paths,
|
||||
function_paths=project.function_paths,
|
||||
target_path=project.target_path,
|
||||
snapshot_paths=project.snapshot_paths,
|
||||
clean_targets=project.clean_targets,
|
||||
@@ -182,7 +180,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
semantic_models=project.semantic_models,
|
||||
saved_queries=project.saved_queries,
|
||||
exposures=project.exposures,
|
||||
functions=project.functions,
|
||||
vars=project.vars,
|
||||
config_version=project.config_version,
|
||||
unrendered=project.unrendered,
|
||||
@@ -268,14 +265,7 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
args,
|
||||
)
|
||||
flags = get_flags()
|
||||
# For dbt deps, use lenient var validation to allow missing vars
|
||||
# For all other commands, use strict validation for helpful error messages
|
||||
# If command is not set (e.g., during test setup), default to strict mode
|
||||
# unless the command is explicitly "deps"
|
||||
require_vars = getattr(flags, "WHICH", None) != "deps"
|
||||
project = load_project(
|
||||
project_root, bool(flags.VERSION_CHECK), profile, cli_vars, require_vars=require_vars
|
||||
)
|
||||
project = load_project(project_root, bool(flags.VERSION_CHECK), profile, cli_vars)
|
||||
return project, profile
|
||||
|
||||
# Called in task/base.py, in BaseTask.from_args
|
||||
@@ -313,9 +303,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
identifier=self.quoting.get("identifier", None),
|
||||
column=self.quoting.get("column", None),
|
||||
),
|
||||
run_started_at=(
|
||||
tracking.active_user.run_started_at if tracking.active_user is not None else None
|
||||
),
|
||||
)
|
||||
|
||||
def _get_v2_config_paths(
|
||||
@@ -363,7 +350,6 @@ class RuntimeConfig(Project, Profile, AdapterRequiredConfig):
|
||||
"semantic_models": self._get_config_paths(self.semantic_models),
|
||||
"saved_queries": self._get_config_paths(self.saved_queries),
|
||||
"exposures": self._get_config_paths(self.exposures),
|
||||
"functions": self._get_config_paths(self.functions),
|
||||
}
|
||||
|
||||
def warn_for_unused_resource_config_paths(
|
||||
|
||||
@@ -12,7 +12,6 @@ from typing import Any, Callable, Dict, Iterable, List, Mapping, NoReturn, Optio
|
||||
# approaches which will extend well to potentially many modules
|
||||
import pytz
|
||||
|
||||
import dbt.deprecations as deprecations
|
||||
import dbt.flags as flags_module
|
||||
from dbt import tracking, utils
|
||||
from dbt.clients.jinja import get_rendered
|
||||
@@ -84,14 +83,7 @@ def get_itertools_module_context() -> Dict[str, Any]:
|
||||
"combinations_with_replacement",
|
||||
]
|
||||
|
||||
def deprecation_wrapper(fn):
|
||||
def deprecation_wrapper_inner(*args, **kwargs):
|
||||
deprecations.warn("modules-itertools-usage-deprecation")
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
return deprecation_wrapper_inner
|
||||
|
||||
return {name: deprecation_wrapper(getattr(itertools, name)) for name in context_exports}
|
||||
return {name: getattr(itertools, name) for name in context_exports}
|
||||
|
||||
|
||||
def get_context_modules() -> Dict[str, Dict[str, Any]]:
|
||||
@@ -152,12 +144,10 @@ class Var:
|
||||
context: Mapping[str, Any],
|
||||
cli_vars: Mapping[str, Any],
|
||||
node: Optional[Resource] = None,
|
||||
require_vars: bool = True,
|
||||
) -> None:
|
||||
self._context: Mapping[str, Any] = context
|
||||
self._cli_vars: Mapping[str, Any] = cli_vars
|
||||
self._node: Optional[Resource] = node
|
||||
self._require_vars: bool = require_vars
|
||||
self._merged: Mapping[str, Any] = self._generate_merged()
|
||||
|
||||
def _generate_merged(self) -> Mapping[str, Any]:
|
||||
@@ -170,9 +160,7 @@ class Var:
|
||||
else:
|
||||
return "<Configuration>"
|
||||
|
||||
def get_missing_var(self, var_name: str) -> None:
|
||||
# Only raise an error if vars are _required_
|
||||
if self._require_vars:
|
||||
def get_missing_var(self, var_name: str) -> NoReturn:
|
||||
# TODO function name implies a non exception resolution
|
||||
raise RequiredVarNotFoundError(var_name, dict(self._merged), self._node)
|
||||
|
||||
@@ -202,11 +190,10 @@ class BaseContext(metaclass=ContextMeta):
|
||||
_context_attrs_: Dict[str, Any]
|
||||
|
||||
# subclass is TargetContext
|
||||
def __init__(self, cli_vars: Dict[str, Any], require_vars: bool = True) -> None:
|
||||
def __init__(self, cli_vars: Dict[str, Any]) -> None:
|
||||
self._ctx: Dict[str, Any] = {}
|
||||
self.cli_vars: Dict[str, Any] = cli_vars
|
||||
self.env_vars: Dict[str, Any] = {}
|
||||
self.require_vars: bool = require_vars
|
||||
|
||||
def generate_builtins(self) -> Dict[str, Any]:
|
||||
builtins: Dict[str, Any] = {}
|
||||
@@ -312,7 +299,7 @@ class BaseContext(metaclass=ContextMeta):
|
||||
from events
|
||||
where event_type = '{{ var("event_type", "activation") }}'
|
||||
"""
|
||||
return Var(self._ctx, self.cli_vars, require_vars=self.require_vars)
|
||||
return Var(self._ctx, self.cli_vars)
|
||||
|
||||
@contextmember()
|
||||
def env_var(self, var: str, default: Optional[str] = None) -> str:
|
||||
|
||||
@@ -15,8 +15,8 @@ class ConfiguredContext(TargetContext):
|
||||
# subclasses are SchemaYamlContext, MacroResolvingContext, ManifestContext
|
||||
config: AdapterRequiredConfig
|
||||
|
||||
def __init__(self, config: AdapterRequiredConfig, require_vars: bool = True) -> None:
|
||||
super().__init__(config.to_target_dict(), config.cli_vars, require_vars=require_vars)
|
||||
def __init__(self, config: AdapterRequiredConfig) -> None:
|
||||
super().__init__(config.to_target_dict(), config.cli_vars)
|
||||
self.config = config
|
||||
|
||||
@contextproperty()
|
||||
|
||||
@@ -88,8 +88,6 @@ class RenderedConfig(ConfigSource):
|
||||
model_configs = self.project.exposures
|
||||
elif resource_type == NodeType.Unit:
|
||||
model_configs = self.project.unit_tests
|
||||
elif resource_type == NodeType.Function:
|
||||
model_configs = self.project.functions
|
||||
else:
|
||||
model_configs = self.project.models
|
||||
return model_configs
|
||||
|
||||
@@ -20,7 +20,7 @@ from typing_extensions import Protocol
|
||||
|
||||
from dbt import selected_resources
|
||||
from dbt.adapters.base.column import Column
|
||||
from dbt.adapters.base.relation import EventTimeFilter, RelationType
|
||||
from dbt.adapters.base.relation import EventTimeFilter
|
||||
from dbt.adapters.contracts.connection import AdapterResponse
|
||||
from dbt.adapters.exceptions import MissingConfigError
|
||||
from dbt.adapters.factory import (
|
||||
@@ -56,7 +56,6 @@ from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference
|
||||
from dbt.contracts.graph.nodes import (
|
||||
AccessType,
|
||||
Exposure,
|
||||
FunctionNode,
|
||||
Macro,
|
||||
ManifestNode,
|
||||
ModelNode,
|
||||
@@ -243,59 +242,9 @@ class BaseResolver(metaclass=abc.ABCMeta):
|
||||
def resolve_limit(self) -> Optional[int]:
|
||||
return 0 if getattr(self.config.args, "EMPTY", False) else None
|
||||
|
||||
def _resolve_event_time_field_name(self, target: ManifestNode) -> str:
|
||||
"""Get the event time field name with proper quoting based on configuration."""
|
||||
# Default to False for quoting
|
||||
should_quote = False
|
||||
column_found = False
|
||||
column = None
|
||||
|
||||
# Check if config has event_time attribute
|
||||
if not hasattr(target.config, "event_time") or target.config.event_time is None:
|
||||
return ""
|
||||
|
||||
# Check column-level quote configuration first (overrides source-level)
|
||||
if hasattr(target, "columns") and target.columns and isinstance(target.columns, dict):
|
||||
for _, column_info in target.columns.items():
|
||||
if column_info.name == target.config.event_time:
|
||||
column_found = True
|
||||
# Create the column object
|
||||
column = Column.create(
|
||||
column_info.name, column_info.data_type if column_info.data_type else ""
|
||||
)
|
||||
# Column-level quote setting takes precedence
|
||||
if hasattr(column_info, "quote") and column_info.quote is not None:
|
||||
should_quote = column_info.quote
|
||||
# Fallback to source-level quote setting
|
||||
elif (
|
||||
hasattr(target, "quoting")
|
||||
and hasattr(target.quoting, "column")
|
||||
and target.quoting.column is not None
|
||||
):
|
||||
should_quote = target.quoting.column
|
||||
break
|
||||
|
||||
# If column not found, fall back to source-level quote setting
|
||||
if not column_found:
|
||||
if (
|
||||
hasattr(target, "quoting")
|
||||
and hasattr(target.quoting, "column")
|
||||
and target.quoting.column is not None
|
||||
):
|
||||
should_quote = target.quoting.column
|
||||
# Create column object for quoting
|
||||
column = Column.create(target.config.event_time, "")
|
||||
|
||||
# Apply quoting logic
|
||||
if should_quote and column is not None:
|
||||
return column.quoted
|
||||
else:
|
||||
return target.config.event_time
|
||||
|
||||
def resolve_event_time_filter(self, target: ManifestNode) -> Optional[EventTimeFilter]:
|
||||
event_time_filter = None
|
||||
sample_mode = getattr(self.config.args, "sample", None) is not None
|
||||
field_name = self._resolve_event_time_field_name(target)
|
||||
|
||||
# TODO The number of branches here is getting rough. We should consider ways to simplify
|
||||
# what is going on to make it easier to maintain
|
||||
@@ -328,7 +277,7 @@ class BaseResolver(metaclass=abc.ABCMeta):
|
||||
else self.model.batch.event_time_end
|
||||
)
|
||||
event_time_filter = EventTimeFilter(
|
||||
field_name=field_name,
|
||||
field_name=target.config.event_time,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
@@ -336,7 +285,7 @@ class BaseResolver(metaclass=abc.ABCMeta):
|
||||
# Regular microbatch models
|
||||
else:
|
||||
event_time_filter = EventTimeFilter(
|
||||
field_name=field_name,
|
||||
field_name=target.config.event_time,
|
||||
start=self.model.batch.event_time_start,
|
||||
end=self.model.batch.event_time_end,
|
||||
)
|
||||
@@ -344,7 +293,7 @@ class BaseResolver(metaclass=abc.ABCMeta):
|
||||
# Sample mode _non_ microbatch models
|
||||
elif sample_mode:
|
||||
event_time_filter = EventTimeFilter(
|
||||
field_name=field_name,
|
||||
field_name=target.config.event_time,
|
||||
start=self.config.args.sample.start,
|
||||
end=self.config.args.sample.end,
|
||||
)
|
||||
@@ -459,41 +408,6 @@ class BaseMetricResolver(BaseResolver):
|
||||
return self.resolve(name, package)
|
||||
|
||||
|
||||
class BaseFunctionResolver(BaseResolver):
|
||||
@abc.abstractmethod
|
||||
def resolve(self, name: str, package: Optional[str] = None): ...
|
||||
|
||||
def _repack_args(self, name: str, package: Optional[str]) -> List[str]:
|
||||
if package is None:
|
||||
return [name]
|
||||
else:
|
||||
return [package, name]
|
||||
|
||||
def validate_args(self, name: str, package: Optional[str]):
|
||||
if not isinstance(name, str):
|
||||
raise CompilationError(
|
||||
f"The name argument to function() must be a string, got {type(name)}"
|
||||
)
|
||||
|
||||
if package is not None and not isinstance(package, str):
|
||||
raise CompilationError(
|
||||
f"The package argument to function() must be a string or None, got {type(package)}"
|
||||
)
|
||||
|
||||
def __call__(self, *args: str):
|
||||
name: str
|
||||
package: Optional[str] = None
|
||||
|
||||
if len(args) == 1:
|
||||
name = args[0]
|
||||
elif len(args) == 2:
|
||||
package, name = args
|
||||
else:
|
||||
raise RefArgsError(node=self.model, args=args)
|
||||
self.validate_args(name, package)
|
||||
return self.resolve(name, package)
|
||||
|
||||
|
||||
class Config(Protocol):
|
||||
def __init__(self, model, context_config: Optional[ContextConfig]): ...
|
||||
|
||||
@@ -544,15 +458,9 @@ class ParseConfigObject(Config):
|
||||
def require(self, name, validator=None):
|
||||
return ""
|
||||
|
||||
def meta_require(self, name, validator=None):
|
||||
return ""
|
||||
|
||||
def get(self, name, default=None, validator=None):
|
||||
return ""
|
||||
|
||||
def meta_get(self, name, default=None, validator=None):
|
||||
return ""
|
||||
|
||||
def persist_relation_docs(self) -> bool:
|
||||
return False
|
||||
|
||||
@@ -584,16 +492,6 @@ class RuntimeConfigObject(Config):
|
||||
raise MissingConfigError(unique_id=self.model.unique_id, name=name)
|
||||
return result
|
||||
|
||||
def _lookup_meta(self, name, default=_MISSING):
|
||||
# if this is a macro, there might be no `model.config`.
|
||||
if not hasattr(self.model, "config"):
|
||||
result = default
|
||||
else:
|
||||
result = self.model.config.meta_get(name, default)
|
||||
if result is _MISSING:
|
||||
raise MissingConfigError(unique_id=self.model.unique_id, name=name)
|
||||
return result
|
||||
|
||||
def require(self, name, validator=None):
|
||||
to_return = self._lookup(name)
|
||||
|
||||
@@ -602,12 +500,6 @@ class RuntimeConfigObject(Config):
|
||||
|
||||
return to_return
|
||||
|
||||
def meta_require(self, name, validator=None):
|
||||
to_return = self._lookup_meta(name)
|
||||
|
||||
if validator is not None:
|
||||
self._validate(validator, to_return)
|
||||
|
||||
def get(self, name, default=None, validator=None):
|
||||
to_return = self._lookup(name, default)
|
||||
|
||||
@@ -616,14 +508,6 @@ class RuntimeConfigObject(Config):
|
||||
|
||||
return to_return
|
||||
|
||||
def meta_get(self, name, default=None, validator=None):
|
||||
to_return = self._lookup_meta(name, default)
|
||||
|
||||
if validator is not None and default is not None:
|
||||
self._validate(validator, to_return)
|
||||
|
||||
return to_return
|
||||
|
||||
def persist_relation_docs(self) -> bool:
|
||||
persist_docs = self.get("persist_docs", default={})
|
||||
if not isinstance(persist_docs, dict):
|
||||
@@ -884,12 +768,7 @@ class RuntimeUnitTestSourceResolver(BaseSourceResolver):
|
||||
# we just need to set_cte, but skipping it confuses typing. We *do* need
|
||||
# the relation in the "this" property.
|
||||
self.model.set_cte(target_source.unique_id, None)
|
||||
|
||||
identifier = self.Relation.add_ephemeral_prefix(target_source.cte_name)
|
||||
return self.Relation.create(
|
||||
type=self.Relation.CTE,
|
||||
identifier=identifier,
|
||||
).quote(identifier=False)
|
||||
return self.Relation.create_ephemeral_from(target_source)
|
||||
|
||||
|
||||
# metric` implementations
|
||||
@@ -983,51 +862,6 @@ class UnitTestVar(RuntimeVar):
|
||||
super().__init__(context, config_copy or config, node=node)
|
||||
|
||||
|
||||
# `function` implementations.
|
||||
class ParseFunctionResolver(BaseFunctionResolver):
|
||||
def resolve(self, name: str, package: Optional[str] = None):
|
||||
# When you call function(), this is what happens at parse time
|
||||
self.model.functions.append(self._repack_args(name, package))
|
||||
return self.Relation.create_from(self.config, self.model, type=RelationType.Function)
|
||||
|
||||
|
||||
class RuntimeFunctionResolver(BaseFunctionResolver):
|
||||
def resolve(self, name: str, package: Optional[str] = None):
|
||||
target_function = self.manifest.resolve_function(
|
||||
name,
|
||||
package,
|
||||
self.current_project,
|
||||
self.model.package_name,
|
||||
)
|
||||
|
||||
if target_function is None or isinstance(target_function, Disabled):
|
||||
raise TargetNotFoundError(
|
||||
node=self.model,
|
||||
target_name=name,
|
||||
target_kind="function",
|
||||
disabled=(isinstance(target_function, Disabled)),
|
||||
)
|
||||
|
||||
# Source quoting does _not_ respect global configs in dbt_project.yml, as documented here:
|
||||
# https://docs.getdbt.com/reference/project-configs/quoting
|
||||
# Use an object with an empty quoting field to bypass any settings in self.
|
||||
class SourceQuotingBaseConfig:
|
||||
quoting: Dict[str, Any] = {}
|
||||
|
||||
return self.Relation.create_from(
|
||||
SourceQuotingBaseConfig(),
|
||||
target_function,
|
||||
limit=self.resolve_limit,
|
||||
event_time_filter=self.resolve_event_time_filter(target_function),
|
||||
type=RelationType.Function,
|
||||
)
|
||||
|
||||
|
||||
# TODO: Right now the RuntimeUnitTestProvider uses the RuntimeFunctionResolver for functions,
|
||||
# but for CT-12025 we'll likely need to create a separate RuntimeUnitTestFunctionResolver to
|
||||
# handle function overrides (mocking functions)
|
||||
|
||||
|
||||
# Providers
|
||||
class Provider(Protocol):
|
||||
execute: bool
|
||||
@@ -1037,7 +871,6 @@ class Provider(Protocol):
|
||||
ref: Type[BaseRefResolver]
|
||||
source: Type[BaseSourceResolver]
|
||||
metric: Type[BaseMetricResolver]
|
||||
function: Type[BaseFunctionResolver]
|
||||
|
||||
|
||||
class ParseProvider(Provider):
|
||||
@@ -1048,7 +881,6 @@ class ParseProvider(Provider):
|
||||
ref = ParseRefResolver
|
||||
source = ParseSourceResolver
|
||||
metric = ParseMetricResolver
|
||||
function = ParseFunctionResolver
|
||||
|
||||
|
||||
class GenerateNameProvider(Provider):
|
||||
@@ -1059,7 +891,6 @@ class GenerateNameProvider(Provider):
|
||||
ref = ParseRefResolver
|
||||
source = ParseSourceResolver
|
||||
metric = ParseMetricResolver
|
||||
function = ParseFunctionResolver
|
||||
|
||||
|
||||
class RuntimeProvider(Provider):
|
||||
@@ -1070,7 +901,6 @@ class RuntimeProvider(Provider):
|
||||
ref = RuntimeRefResolver
|
||||
source = RuntimeSourceResolver
|
||||
metric = RuntimeMetricResolver
|
||||
function = RuntimeFunctionResolver
|
||||
|
||||
|
||||
class RuntimeUnitTestProvider(Provider):
|
||||
@@ -1081,7 +911,6 @@ class RuntimeUnitTestProvider(Provider):
|
||||
ref = RuntimeUnitTestRefResolver
|
||||
source = RuntimeUnitTestSourceResolver
|
||||
metric = RuntimeMetricResolver
|
||||
function = RuntimeFunctionResolver
|
||||
|
||||
|
||||
class OperationProvider(RuntimeProvider):
|
||||
@@ -1324,10 +1153,6 @@ class ProviderContext(ManifestContext):
|
||||
def metric(self) -> Callable:
|
||||
return self.provider.metric(self.db_wrapper, self.model, self.config, self.manifest)
|
||||
|
||||
@contextproperty()
|
||||
def function(self) -> Callable:
|
||||
return self.provider.function(self.db_wrapper, self.model, self.config, self.manifest)
|
||||
|
||||
@contextproperty("config")
|
||||
def ctx_config(self) -> Config:
|
||||
"""The `config` variable exists to handle end-user configuration for
|
||||
@@ -1916,14 +1741,6 @@ class UnitTestContext(ModelContext):
|
||||
return None
|
||||
|
||||
|
||||
class FunctionContext(ModelContext):
|
||||
model: FunctionNode
|
||||
|
||||
@contextproperty()
|
||||
def this(self) -> Optional[RelationProxy]:
|
||||
return self.db_wrapper.Relation.create_from(self.config, self.model)
|
||||
|
||||
|
||||
# This is called by '_context_for', used in 'render_with_context'
|
||||
def generate_parser_model_context(
|
||||
model: ManifestNode,
|
||||
@@ -1940,21 +1757,6 @@ def generate_parser_model_context(
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
def generate_parser_unit_test_context(
|
||||
unit_test: UnitTestNode, config: RuntimeConfig, manifest: Manifest
|
||||
) -> Dict[str, Any]:
|
||||
context_config = ContextConfig(
|
||||
config,
|
||||
unit_test.fqn,
|
||||
NodeType.Unit,
|
||||
config.project_name,
|
||||
)
|
||||
|
||||
ctx = UnitTestContext(unit_test, config, manifest, ParseProvider(), context_config)
|
||||
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
def generate_generate_name_macro_context(
|
||||
macro: Macro,
|
||||
config: RuntimeConfig,
|
||||
@@ -2036,15 +1838,6 @@ def generate_runtime_unit_test_context(
|
||||
return ctx_dict
|
||||
|
||||
|
||||
def generate_runtime_function_context(
|
||||
function: FunctionNode,
|
||||
config: RuntimeConfig,
|
||||
manifest: Manifest,
|
||||
) -> Dict[str, Any]:
|
||||
ctx = FunctionContext(function, config, manifest, OperationProvider(), None)
|
||||
return ctx.to_dict()
|
||||
|
||||
|
||||
class ExposureRefResolver(BaseResolver):
|
||||
def __call__(self, *args, **kwargs) -> str:
|
||||
package = None
|
||||
|
||||
@@ -5,10 +5,8 @@ from dbt.context.base import BaseContext, contextproperty
|
||||
|
||||
class TargetContext(BaseContext):
|
||||
# subclass is ConfiguredContext
|
||||
def __init__(
|
||||
self, target_dict: Dict[str, Any], cli_vars: Dict[str, Any], require_vars: bool = True
|
||||
):
|
||||
super().__init__(cli_vars=cli_vars, require_vars=require_vars)
|
||||
def __init__(self, target_dict: Dict[str, Any], cli_vars: Dict[str, Any]):
|
||||
super().__init__(cli_vars=cli_vars)
|
||||
self.target_dict = target_dict
|
||||
|
||||
@contextproperty()
|
||||
|
||||
@@ -23,7 +23,6 @@ class ParseFileType(StrEnum):
|
||||
Schema = "schema"
|
||||
Hook = "hook" # not a real filetype, from dbt_project.yml
|
||||
Fixture = "fixture"
|
||||
Function = "function"
|
||||
|
||||
|
||||
parse_file_type_to_parser = {
|
||||
@@ -38,7 +37,6 @@ parse_file_type_to_parser = {
|
||||
ParseFileType.Schema: "SchemaParser",
|
||||
ParseFileType.Hook: "HookParser",
|
||||
ParseFileType.Fixture: "FixtureParser",
|
||||
ParseFileType.Function: "FunctionParser",
|
||||
}
|
||||
|
||||
|
||||
@@ -161,7 +159,6 @@ class SourceFile(BaseSourceFile):
|
||||
docs: List[str] = field(default_factory=list)
|
||||
macros: List[str] = field(default_factory=list)
|
||||
env_vars: List[str] = field(default_factory=list)
|
||||
functions: List[str] = field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
def big_seed(cls, path: FilePath) -> "SourceFile":
|
||||
@@ -194,7 +191,6 @@ class SchemaSourceFile(BaseSourceFile):
|
||||
data_tests: Dict[str, Any] = field(default_factory=dict)
|
||||
sources: List[str] = field(default_factory=list)
|
||||
exposures: List[str] = field(default_factory=list)
|
||||
functions: List[str] = field(default_factory=list)
|
||||
metrics: List[str] = field(default_factory=list)
|
||||
snapshots: List[str] = field(default_factory=list)
|
||||
# The following field will no longer be used. Leaving
|
||||
|
||||
@@ -32,13 +32,8 @@ from dbt.adapters.exceptions import (
|
||||
from dbt.adapters.factory import get_adapter_package_names
|
||||
|
||||
# to preserve import paths
|
||||
from dbt.artifacts.resources import (
|
||||
BaseResource,
|
||||
DeferRelation,
|
||||
NodeConfig,
|
||||
NodeVersion,
|
||||
RefArgs,
|
||||
)
|
||||
from dbt.artifacts.resources import BaseResource, DeferRelation, NodeVersion, RefArgs
|
||||
from dbt.artifacts.resources.v1.config import NodeConfig
|
||||
from dbt.artifacts.schemas.manifest import ManifestMetadata, UniqueID, WritableManifest
|
||||
from dbt.clients.jinja_static import statically_parse_ref_or_source
|
||||
from dbt.contracts.files import (
|
||||
@@ -53,7 +48,6 @@ from dbt.contracts.graph.nodes import (
|
||||
BaseNode,
|
||||
Documentation,
|
||||
Exposure,
|
||||
FunctionNode,
|
||||
GenericTestNode,
|
||||
GraphMemberNode,
|
||||
Group,
|
||||
@@ -178,41 +172,8 @@ class SourceLookup(dbtClassMixin):
|
||||
return manifest.sources[unique_id]
|
||||
|
||||
|
||||
class FunctionLookup(dbtClassMixin):
|
||||
def __init__(self, manifest: "Manifest") -> None:
|
||||
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
|
||||
self.populate(manifest)
|
||||
|
||||
def get_unique_id(self, search_name, package: Optional[PackageName]):
|
||||
return find_unique_id_for_package(self.storage, search_name, package)
|
||||
|
||||
def find(self, search_name, package: Optional[PackageName], manifest: "Manifest"):
|
||||
unique_id = self.get_unique_id(search_name, package)
|
||||
if unique_id is not None:
|
||||
return self.perform_lookup(unique_id, manifest)
|
||||
return None
|
||||
|
||||
def add_function(self, function: FunctionNode):
|
||||
if function.search_name not in self.storage:
|
||||
self.storage[function.search_name] = {}
|
||||
|
||||
self.storage[function.search_name][function.package_name] = function.unique_id
|
||||
|
||||
def populate(self, manifest):
|
||||
for function in manifest.functions.values():
|
||||
if hasattr(function, "name"):
|
||||
self.add_function(function)
|
||||
|
||||
def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> FunctionNode:
|
||||
if unique_id not in manifest.functions:
|
||||
raise dbt_common.exceptions.DbtInternalError(
|
||||
f"Function {unique_id} found in cache but not found in manifest"
|
||||
)
|
||||
return manifest.functions[unique_id]
|
||||
|
||||
|
||||
class RefableLookup(dbtClassMixin):
|
||||
# model, seed, snapshot, function
|
||||
# model, seed, snapshot
|
||||
_lookup_types: ClassVar[set] = set(REFABLE_NODE_TYPES)
|
||||
_versioned_types: ClassVar[set] = set(VERSIONED_NODE_TYPES)
|
||||
|
||||
@@ -720,9 +681,6 @@ class Disabled(Generic[D]):
|
||||
target: D
|
||||
|
||||
|
||||
MaybeFunctionNode = Optional[Union[FunctionNode, Disabled[FunctionNode]]]
|
||||
|
||||
|
||||
MaybeMetricNode = Optional[Union[Metric, Disabled[Metric]]]
|
||||
|
||||
|
||||
@@ -919,7 +877,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
macros: MutableMapping[str, Macro] = field(default_factory=dict)
|
||||
docs: MutableMapping[str, Documentation] = field(default_factory=dict)
|
||||
exposures: MutableMapping[str, Exposure] = field(default_factory=dict)
|
||||
functions: MutableMapping[str, FunctionNode] = field(default_factory=dict)
|
||||
metrics: MutableMapping[str, Metric] = field(default_factory=dict)
|
||||
groups: MutableMapping[str, Group] = field(default_factory=dict)
|
||||
selectors: MutableMapping[str, Any] = field(default_factory=dict)
|
||||
@@ -962,9 +919,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
_singular_test_lookup: Optional[SingularTestLookup] = field(
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_function_lookup: Optional[FunctionLookup] = field(
|
||||
default=None, metadata={"serialize": lambda x: None, "deserialize": lambda x: None}
|
||||
)
|
||||
_parsing_info: ParsingInfo = field(
|
||||
default_factory=ParsingInfo,
|
||||
metadata={"serialize": lambda x: None, "deserialize": lambda x: None},
|
||||
@@ -1001,7 +955,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
"""
|
||||
self.flat_graph = {
|
||||
"exposures": {k: v.to_dict(omit_none=False) for k, v in self.exposures.items()},
|
||||
"functions": {k: v.to_dict(omit_none=False) for k, v in self.functions.items()},
|
||||
"groups": {k: v.to_dict(omit_none=False) for k, v in self.groups.items()},
|
||||
"metrics": {k: v.to_dict(omit_none=False) for k, v in self.metrics.items()},
|
||||
"nodes": {k: v.to_dict(omit_none=False) for k, v in self.nodes.items()},
|
||||
@@ -1095,7 +1048,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
resource_fqns: Dict[str, Set[Tuple[str, ...]]] = {}
|
||||
all_resources = chain(
|
||||
self.exposures.values(),
|
||||
self.functions.values(),
|
||||
self.nodes.values(),
|
||||
self.sources.values(),
|
||||
self.metrics.values(),
|
||||
@@ -1129,7 +1081,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
macros={k: _deepcopy(v) for k, v in self.macros.items()},
|
||||
docs={k: _deepcopy(v) for k, v in self.docs.items()},
|
||||
exposures={k: _deepcopy(v) for k, v in self.exposures.items()},
|
||||
functions={k: _deepcopy(v) for k, v in self.functions.items()},
|
||||
metrics={k: _deepcopy(v) for k, v in self.metrics.items()},
|
||||
groups={k: _deepcopy(v) for k, v in self.groups.items()},
|
||||
selectors={k: _deepcopy(v) for k, v in self.selectors.items()},
|
||||
@@ -1150,7 +1101,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
self.nodes.values(),
|
||||
self.sources.values(),
|
||||
self.exposures.values(),
|
||||
self.functions.values(),
|
||||
self.metrics.values(),
|
||||
self.semantic_models.values(),
|
||||
self.saved_queries.values(),
|
||||
@@ -1207,7 +1157,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
macros=cls._map_resources_to_map_nodes(writable_manifest.macros),
|
||||
docs=cls._map_resources_to_map_nodes(writable_manifest.docs),
|
||||
exposures=cls._map_resources_to_map_nodes(writable_manifest.exposures),
|
||||
functions=cls._map_resources_to_map_nodes(writable_manifest.functions),
|
||||
metrics=cls._map_resources_to_map_nodes(writable_manifest.metrics),
|
||||
groups=cls._map_resources_to_map_nodes(writable_manifest.groups),
|
||||
semantic_models=cls._map_resources_to_map_nodes(writable_manifest.semantic_models),
|
||||
@@ -1216,7 +1165,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
selector_id: selector
|
||||
for selector_id, selector in writable_manifest.selectors.items()
|
||||
},
|
||||
metadata=writable_manifest.metadata,
|
||||
)
|
||||
|
||||
return manifest
|
||||
@@ -1265,7 +1213,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
macros=self._map_nodes_to_map_resources(self.macros),
|
||||
docs=self._map_nodes_to_map_resources(self.docs),
|
||||
exposures=self._map_nodes_to_map_resources(self.exposures),
|
||||
functions=self._map_nodes_to_map_resources(self.functions),
|
||||
metrics=self._map_nodes_to_map_resources(self.metrics),
|
||||
groups=self._map_nodes_to_map_resources(self.groups),
|
||||
selectors=self.selectors,
|
||||
@@ -1293,8 +1240,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
return self.sources[unique_id]
|
||||
elif unique_id in self.exposures:
|
||||
return self.exposures[unique_id]
|
||||
elif unique_id in self.functions:
|
||||
return self.functions[unique_id]
|
||||
elif unique_id in self.metrics:
|
||||
return self.metrics[unique_id]
|
||||
elif unique_id in self.semantic_models:
|
||||
@@ -1377,12 +1322,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
self._singular_test_lookup = SingularTestLookup(self)
|
||||
return self._singular_test_lookup
|
||||
|
||||
@property
|
||||
def function_lookup(self) -> FunctionLookup:
|
||||
if self._function_lookup is None:
|
||||
self._function_lookup = FunctionLookup(self)
|
||||
return self._function_lookup
|
||||
|
||||
@property
|
||||
def external_node_unique_ids(self):
|
||||
return [node.unique_id for node in self.nodes.values() if node.is_external_node]
|
||||
@@ -1452,29 +1391,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
return Disabled(disabled[0])
|
||||
return None
|
||||
|
||||
def resolve_function(
|
||||
self,
|
||||
target_function_name: str,
|
||||
target_function_package: Optional[str],
|
||||
current_project: str,
|
||||
node_package: str,
|
||||
) -> MaybeFunctionNode:
|
||||
package_candidates = _packages_to_search(
|
||||
current_project, node_package, target_function_package
|
||||
)
|
||||
disabled: Optional[List[FunctionNode]] = None
|
||||
for package in package_candidates:
|
||||
function = self.function_lookup.find(target_function_name, package, self)
|
||||
if function is not None and function.config.enabled:
|
||||
return function
|
||||
|
||||
# it's possible that the function is disabled
|
||||
if disabled is None:
|
||||
disabled = self.disabled_lookup.find(target_function_name, package)
|
||||
if disabled:
|
||||
return Disabled(disabled[0])
|
||||
return None
|
||||
|
||||
def resolve_metric(
|
||||
self,
|
||||
target_metric_name: str,
|
||||
@@ -1715,11 +1631,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
self.exposures[exposure.unique_id] = exposure
|
||||
source_file.exposures.append(exposure.unique_id)
|
||||
|
||||
def add_function(self, source_file: SourceFile, function: FunctionNode):
|
||||
_check_duplicates(function, self.functions)
|
||||
self.functions[function.unique_id] = function
|
||||
source_file.functions.append(function.unique_id)
|
||||
|
||||
def add_metric(
|
||||
self, source_file: SchemaSourceFile, metric: Metric, generated_from: Optional[str] = None
|
||||
):
|
||||
@@ -1756,8 +1667,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
source_file.semantic_models.append(node.unique_id)
|
||||
if isinstance(node, Exposure):
|
||||
source_file.exposures.append(node.unique_id)
|
||||
if isinstance(node, FunctionNode):
|
||||
source_file.functions.append(node.unique_id)
|
||||
if isinstance(node, UnitTestDefinition):
|
||||
source_file.unit_tests.append(node.unique_id)
|
||||
elif isinstance(source_file, FixtureSourceFile):
|
||||
@@ -1824,7 +1733,6 @@ class Manifest(MacroMethods, dbtClassMixin):
|
||||
self.macros,
|
||||
self.docs,
|
||||
self.exposures,
|
||||
self.functions,
|
||||
self.metrics,
|
||||
self.groups,
|
||||
self.selectors,
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from dbt.artifacts.resources import (
|
||||
ExposureConfig,
|
||||
FunctionConfig,
|
||||
GroupConfig,
|
||||
MetricConfig,
|
||||
ModelConfig,
|
||||
@@ -53,7 +52,6 @@ RESOURCE_TYPES: Dict[NodeType, Type[BaseConfig]] = {
|
||||
NodeType.Snapshot: SnapshotConfig,
|
||||
NodeType.Unit: UnitTestConfig,
|
||||
NodeType.Group: GroupConfig,
|
||||
NodeType.Function: FunctionConfig,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -32,8 +32,6 @@ from dbt.artifacts.resources import (
|
||||
from dbt.artifacts.resources import Documentation as DocumentationResource
|
||||
from dbt.artifacts.resources import Exposure as ExposureResource
|
||||
from dbt.artifacts.resources import FileHash
|
||||
from dbt.artifacts.resources import Function as FunctionResource
|
||||
from dbt.artifacts.resources import FunctionArgument, FunctionReturns
|
||||
from dbt.artifacts.resources import GenericTest as GenericTestResource
|
||||
from dbt.artifacts.resources import GraphResource
|
||||
from dbt.artifacts.resources import Group as GroupResource
|
||||
@@ -47,7 +45,6 @@ from dbt.artifacts.resources import MetricInputMeasure
|
||||
from dbt.artifacts.resources import Model as ModelResource
|
||||
from dbt.artifacts.resources import (
|
||||
ModelConfig,
|
||||
ModelFreshness,
|
||||
NodeConfig,
|
||||
NodeVersion,
|
||||
ParsedResource,
|
||||
@@ -63,6 +60,7 @@ from dbt.artifacts.resources import SourceDefinition as SourceDefinitionResource
|
||||
from dbt.artifacts.resources import SqlOperation as SqlOperationResource
|
||||
from dbt.artifacts.resources import TimeSpine
|
||||
from dbt.artifacts.resources import UnitTestDefinition as UnitTestDefinitionResource
|
||||
from dbt.artifacts.resources.v1.model import ModelFreshness
|
||||
from dbt.artifacts.schemas.batch_results import BatchResults
|
||||
from dbt.clients.jinja_static import statically_extract_has_name_this
|
||||
from dbt.contracts.graph.model_config import UnitTestNodeConfig
|
||||
@@ -697,36 +695,6 @@ class ModelNode(ModelResource, CompiledNode):
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_data_type_for_comparison(data_type: Optional[str]) -> Optional[str]:
|
||||
"""
|
||||
Normalize a data type string by removing size, precision, and scale parameters.
|
||||
This allows comparison of base types while ignoring non-breaking parameter changes.
|
||||
|
||||
Examples:
|
||||
varchar(10) -> varchar
|
||||
VARCHAR(5) -> varchar
|
||||
numeric(10,2) -> numeric
|
||||
text -> text
|
||||
decimal(5) -> decimal
|
||||
None -> None
|
||||
|
||||
Per dbt documentation, changes to size/precision/scale should not be
|
||||
considered breaking changes for contracts.
|
||||
See: https://docs.getdbt.com/reference/resource-configs/contract#size-precision-and-scale
|
||||
|
||||
Note: Comparison is case-insensitive. Type aliases (e.g., 'varchar' vs
|
||||
'character varying') are not automatically resolved - users should use
|
||||
consistent type names in their contracts to avoid false positives.
|
||||
"""
|
||||
if not data_type:
|
||||
return data_type
|
||||
|
||||
# Split on the first '(' to get the base type without parameters
|
||||
# Convert to lowercase for case-insensitive comparison
|
||||
base_type, _, _ = data_type.partition("(")
|
||||
return base_type.strip().lower()
|
||||
|
||||
def same_contract(self, old, adapter_type=None) -> bool:
|
||||
# If the contract wasn't previously enforced:
|
||||
if old.contract.enforced is False and self.contract.enforced is False:
|
||||
@@ -768,16 +736,6 @@ class ModelNode(ModelResource, CompiledNode):
|
||||
columns_removed.append(old_value.name)
|
||||
# Has this column's data type changed?
|
||||
elif old_value.data_type != self.columns[old_key].data_type:
|
||||
# Compare normalized data types (without size/precision/scale)
|
||||
# to determine if this is a breaking change
|
||||
old_normalized = self._normalize_data_type_for_comparison(old_value.data_type)
|
||||
new_normalized = self._normalize_data_type_for_comparison(
|
||||
self.columns[old_key].data_type
|
||||
)
|
||||
|
||||
# Only consider it a breaking change if the base types differ
|
||||
# Changes like varchar(3) -> varchar(10) are not breaking
|
||||
if old_normalized != new_normalized:
|
||||
column_type_changes.append(
|
||||
{
|
||||
"column_name": str(old_value.name),
|
||||
@@ -1098,10 +1056,6 @@ class UnitTestSourceDefinition(ModelNode):
|
||||
source_name: str = "undefined"
|
||||
quoting: QuotingResource = field(default_factory=QuotingResource)
|
||||
|
||||
@property
|
||||
def cte_name(self):
|
||||
return self.unique_id.split(".")[-1]
|
||||
|
||||
@property
|
||||
def search_name(self):
|
||||
return f"{self.source_name}.{self.name}"
|
||||
@@ -1582,19 +1536,6 @@ class Group(GroupResource, BaseNode):
|
||||
}
|
||||
|
||||
|
||||
# ====================================
|
||||
# Function node
|
||||
# ====================================
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionNode(CompiledNode, FunctionResource):
|
||||
|
||||
@classmethod
|
||||
def resource_class(cls) -> Type[FunctionResource]:
|
||||
return FunctionResource
|
||||
|
||||
|
||||
# ====================================
|
||||
# SemanticModel node
|
||||
# ====================================
|
||||
@@ -1766,20 +1707,6 @@ class ParsedNodePatch(ParsedPatch):
|
||||
freshness: Optional[ModelFreshness] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedFunctionPatchRequired:
|
||||
returns: FunctionReturns
|
||||
|
||||
|
||||
# TODO: Maybe this shouldn't be a subclass of ParsedNodePatch, but ParsedPatch instead
|
||||
# Currently, `functions` have the fields like `columns`, `access`, `version`, and etc,
|
||||
# but they don't actually do anything. If we remove those properties from FunctionNode,
|
||||
# we can remove this class and use ParsedPatch instead.
|
||||
@dataclass
|
||||
class ParsedFunctionPatch(ParsedNodePatch, ParsedFunctionPatchRequired):
|
||||
arguments: List[FunctionArgument] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedMacroPatch(ParsedPatch):
|
||||
arguments: List[MacroArgument] = field(default_factory=list)
|
||||
@@ -1799,7 +1726,6 @@ class ParsedSingularTestPatch(ParsedPatch):
|
||||
# SQL related attributes
|
||||
ManifestSQLNode = Union[
|
||||
AnalysisNode,
|
||||
FunctionNode,
|
||||
SingularTestNode,
|
||||
HookNode,
|
||||
ModelNode,
|
||||
|
||||
@@ -127,6 +127,7 @@ class SemanticManifest:
|
||||
def _get_pydantic_semantic_manifest(self) -> PydanticSemanticManifest:
|
||||
pydantic_time_spines: List[PydanticTimeSpine] = []
|
||||
minimum_time_spine_granularity: Optional[TimeGranularity] = None
|
||||
has_legacy_time_spine_with_config: bool = False
|
||||
for node in self.manifest.nodes.values():
|
||||
if not (isinstance(node, ModelNode) and node.time_spine):
|
||||
continue
|
||||
@@ -166,6 +167,8 @@ class SemanticManifest:
|
||||
],
|
||||
)
|
||||
pydantic_time_spines.append(pydantic_time_spine)
|
||||
if pydantic_time_spine.node_relation.relation_name == LEGACY_TIME_SPINE_MODEL_NAME:
|
||||
has_legacy_time_spine_with_config = True
|
||||
if (
|
||||
not minimum_time_spine_granularity
|
||||
or standard_granularity_column.granularity.to_int()
|
||||
@@ -193,8 +196,8 @@ class SemanticManifest:
|
||||
PydanticSavedQuery.parse_obj(saved_query.to_dict())
|
||||
)
|
||||
|
||||
legacy_time_spine_model: Optional[ModelNode] = None
|
||||
if self.manifest.semantic_models:
|
||||
if not has_legacy_time_spine_with_config:
|
||||
legacy_time_spine_model = self.manifest.ref_lookup.find(
|
||||
LEGACY_TIME_SPINE_MODEL_NAME, None, None, self.manifest
|
||||
)
|
||||
@@ -218,8 +221,8 @@ class SemanticManifest:
|
||||
"(https://docs.getdbt.com/docs/build/metricflow-time-spine)."
|
||||
)
|
||||
|
||||
# For backward compatibility: if legacy time spine exists without config, include it in the manifest.
|
||||
if legacy_time_spine_model and legacy_time_spine_model.time_spine is None:
|
||||
# For backward compatibility: if legacy time spine exists, include it in the manifest.
|
||||
if legacy_time_spine_model:
|
||||
legacy_time_spine = LegacyTimeSpine(
|
||||
location=legacy_time_spine_model.relation_name,
|
||||
column_name="date_day",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user