mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-17 19:31:34 +00:00
Compare commits
234 Commits
macro-reso
...
delete_sch
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0199978a61 | ||
|
|
4443a91c24 | ||
|
|
8fe7d652ab | ||
|
|
ecf9436c6e | ||
|
|
994a089b1f | ||
|
|
55aad328ea | ||
|
|
84cc54007e | ||
|
|
43d6c2f369 | ||
|
|
fe9e39dab8 | ||
|
|
760e4cea3a | ||
|
|
e349e01829 | ||
|
|
0b0092842f | ||
|
|
04ebe0f2a3 | ||
|
|
fb6dbc848e | ||
|
|
65791e4d12 | ||
|
|
3e8f2f1c27 | ||
|
|
29b83598e3 | ||
|
|
dc744f6e8f | ||
|
|
8b1f1d52c4 | ||
|
|
1bcef62e73 | ||
|
|
487a5321df | ||
|
|
f884eb4473 | ||
|
|
4cb6d47bf7 | ||
|
|
01f5dc8b85 | ||
|
|
2e3c6fe614 | ||
|
|
a36057d6e9 | ||
|
|
1a9fb612ed | ||
|
|
35062ca6af | ||
|
|
4e1b44e353 | ||
|
|
f6b2cb7fdc | ||
|
|
bcbde3ac42 | ||
|
|
1e4bed0f1b | ||
|
|
9a0b714fda | ||
|
|
ddd6506bea | ||
|
|
37b1fce205 | ||
|
|
2325759ba8 | ||
|
|
29429ecf7b | ||
|
|
9a7be6de67 | ||
|
|
0dc2a2f963 | ||
|
|
2d336553af | ||
|
|
1014a6d490 | ||
|
|
27943a5ebc | ||
|
|
528b95cba8 | ||
|
|
0290cf7dd0 | ||
|
|
5c8a4ab986 | ||
|
|
8736508617 | ||
|
|
4811ada35a | ||
|
|
afb2d61a08 | ||
|
|
514647b29f | ||
|
|
4c587544b6 | ||
|
|
f5f9591d09 | ||
|
|
61727ab5b6 | ||
|
|
f87964ec1c | ||
|
|
2edd5b3335 | ||
|
|
668fe78e2d | ||
|
|
fe28d9e115 | ||
|
|
5cb127999c | ||
|
|
86b349f812 | ||
|
|
a70024f745 | ||
|
|
8b5884b527 | ||
|
|
4c1d0e92cd | ||
|
|
6e7e55212b | ||
|
|
11dbe679b9 | ||
|
|
c63ae89efb | ||
|
|
ee74a60082 | ||
|
|
607646b627 | ||
|
|
7e164e3ab7 | ||
|
|
7e72cace2b | ||
|
|
c53d67d3b5 | ||
|
|
cb56f4fdc1 | ||
|
|
f15e128d6c | ||
|
|
99d033ffec | ||
|
|
6fee361183 | ||
|
|
95581cc661 | ||
|
|
3c4456ddbf | ||
|
|
b44c2e498d | ||
|
|
c86cec3256 | ||
|
|
a1f005789d | ||
|
|
d03292e8b9 | ||
|
|
ebacedd89d | ||
|
|
fb41ce93d6 | ||
|
|
1e4e15c023 | ||
|
|
cf08b8411a | ||
|
|
e81f7fdbd5 | ||
|
|
96f54264b4 | ||
|
|
b945d177d3 | ||
|
|
ebc22fa26c | ||
|
|
a994ace2db | ||
|
|
f2a5ad0504 | ||
|
|
fe33dcc3d6 | ||
|
|
c95b1ea5e6 | ||
|
|
0d87d314ac | ||
|
|
71f3519611 | ||
|
|
02d7727365 | ||
|
|
f683e36468 | ||
|
|
cfaacc6e49 | ||
|
|
a029661e23 | ||
|
|
80b2a47d60 | ||
|
|
9af5ec6069 | ||
|
|
e46eae1f0e | ||
|
|
c07186855f | ||
|
|
3e7778c380 | ||
|
|
68970d09fa | ||
|
|
8c8c6284fb | ||
|
|
b435e26aa4 | ||
|
|
58f9af7d58 | ||
|
|
c6c0c79216 | ||
|
|
461e8e5323 | ||
|
|
0fa9690e38 | ||
|
|
cc42ec39e6 | ||
|
|
952cca8c58 | ||
|
|
0c8a8de7cd | ||
|
|
c52a015f36 | ||
|
|
c5eb6d70b4 | ||
|
|
2c1926cee9 | ||
|
|
29395ac617 | ||
|
|
58344f4d25 | ||
|
|
65b366bca9 | ||
|
|
c501d71645 | ||
|
|
d65bae5f05 | ||
|
|
c48aaa03de | ||
|
|
7b8ae21c36 | ||
|
|
d9c36c3a57 | ||
|
|
bfb68b2619 | ||
|
|
a0abc58130 | ||
|
|
8a395e928d | ||
|
|
7072a53770 | ||
|
|
deedeeb9ce | ||
|
|
6fd0a94729 | ||
|
|
e4fe839e45 | ||
|
|
ce10240f5b | ||
|
|
f48a927b86 | ||
|
|
fa993f3ea5 | ||
|
|
ef1cb97755 | ||
|
|
fc431010ef | ||
|
|
0d723f180a | ||
|
|
9d232398ee | ||
|
|
865b09b2f0 | ||
|
|
7329143ffb | ||
|
|
d50aebb117 | ||
|
|
b337e0b726 | ||
|
|
4b6c57cede | ||
|
|
2b23a038d4 | ||
|
|
d1ebf9d12a | ||
|
|
12e40e2581 | ||
|
|
d1e400eed2 | ||
|
|
7ea4670832 | ||
|
|
869ba181c7 | ||
|
|
f36c4e7275 | ||
|
|
7f9874d260 | ||
|
|
9a32716374 | ||
|
|
9bc80d52df | ||
|
|
83e51618d0 | ||
|
|
7df747ae04 | ||
|
|
20f904951f | ||
|
|
5198031d5b | ||
|
|
1ec5e22e2b | ||
|
|
c7522d27ad | ||
|
|
6965eca079 | ||
|
|
8a1b9276f9 | ||
|
|
2411f93240 | ||
|
|
5841d52792 | ||
|
|
03a4d118f3 | ||
|
|
a1f6451090 | ||
|
|
c12f6fbf4d | ||
|
|
f732b76dc3 | ||
|
|
edc60034a5 | ||
|
|
63f40543ab | ||
|
|
6f603f6006 | ||
|
|
581d8563cc | ||
|
|
2b6e2e18df | ||
|
|
1220fdfdd6 | ||
|
|
07726b0047 | ||
|
|
e33b06badf | ||
|
|
15dcb9a19d | ||
|
|
7885e874c6 | ||
|
|
93f1bd5df6 | ||
|
|
ef03ea2697 | ||
|
|
db65e627ca | ||
|
|
1a5d6922dd | ||
|
|
2d59a51874 | ||
|
|
0836095a57 | ||
|
|
87178287c7 | ||
|
|
dc3f60801e | ||
|
|
9c8b28aa64 | ||
|
|
719a50cc91 | ||
|
|
7a410ab228 | ||
|
|
98bbbc126b | ||
|
|
06e55bb93e | ||
|
|
3e2ec1601b | ||
|
|
5ae8f6aad7 | ||
|
|
1cbc6d333d | ||
|
|
3bf148c443 | ||
|
|
77d48cc27a | ||
|
|
6663846026 | ||
|
|
bb2017a839 | ||
|
|
bdcf264963 | ||
|
|
50b85a0b01 | ||
|
|
ad723a6db8 | ||
|
|
f1f0c38c55 | ||
|
|
07c40d6574 | ||
|
|
2f2e0cee10 | ||
|
|
dc59c706ff | ||
|
|
cc7170dead | ||
|
|
af188624d5 | ||
|
|
c547aace36 | ||
|
|
2a5ad17e6d | ||
|
|
cd8b652568 | ||
|
|
eecaee1fe6 | ||
|
|
d3f412daab | ||
|
|
0da5dfecbb | ||
|
|
dc47f6b7b9 | ||
|
|
321031cb47 | ||
|
|
b5a0c4c228 | ||
|
|
15704ab3d5 | ||
|
|
a1f78a8f62 | ||
|
|
00f4a25bdc | ||
|
|
cff0b65b01 | ||
|
|
0726df85eb | ||
|
|
1e4286a62d | ||
|
|
84dfb22cd5 | ||
|
|
34d8ac7c6e | ||
|
|
125982a4ad | ||
|
|
43136bbfb6 | ||
|
|
11cc71b75f | ||
|
|
e42b7ca214 | ||
|
|
48d9a67aaa | ||
|
|
7763212297 | ||
|
|
c2bc2f009b | ||
|
|
6e0a387205 | ||
|
|
1740df534b | ||
|
|
0ab954e1af | ||
|
|
5488dfb992 | ||
|
|
09355701f6 |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 1.8.0a1
|
||||
current_version = 1.9.0a1
|
||||
parse = (?P<major>[\d]+) # major version number
|
||||
\.(?P<minor>[\d]+) # minor version number
|
||||
\.(?P<patch>[\d]+) # patch version number
|
||||
@@ -35,13 +35,3 @@ first_value = 1
|
||||
[bumpversion:file:core/setup.py]
|
||||
|
||||
[bumpversion:file:core/dbt/version.py]
|
||||
|
||||
[bumpversion:file:plugins/postgres/setup.py]
|
||||
|
||||
[bumpversion:file:plugins/postgres/dbt/adapters/postgres/__version__.py]
|
||||
|
||||
[bumpversion:file:docker/Dockerfile]
|
||||
|
||||
[bumpversion:file:tests/adapter/setup.py]
|
||||
|
||||
[bumpversion:file:tests/adapter/dbt/tests/adapter/__version__.py]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# dbt Core Changelog
|
||||
|
||||
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
|
||||
- This file provides a full account of all changes to `dbt-core`
|
||||
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
|
||||
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
|
||||
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Dependencies
|
||||
body: Begin using DSI 0.4.x
|
||||
time: 2023-10-31T13:19:54.750009-07:00
|
||||
custom:
|
||||
Author: QMalcolm peterallenwebb
|
||||
PR: "8892"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Dependencies
|
||||
body: Update typing-extensions version to >=4.4
|
||||
time: 2023-11-06T13:00:51.062386-08:00
|
||||
custom:
|
||||
Author: tlento
|
||||
PR: "9012"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Docs
|
||||
body: fix get_custom_database docstring
|
||||
time: 2023-11-06T12:31:57.525711Z
|
||||
custom:
|
||||
Author: LeoTheGriff
|
||||
Issue: "9003"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Add drop_schema_named macro
|
||||
time: 2023-10-17T14:36:20.612289-07:00
|
||||
custom:
|
||||
Author: colin-rogers-dbt
|
||||
Issue: "8025"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Add exports to SavedQuery spec
|
||||
time: 2023-10-31T13:20:22.448158-07:00
|
||||
custom:
|
||||
Author: QMalcolm peterallenwebb
|
||||
Issue: "8892"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Features
|
||||
body: Support setting export configs hierarchically via saved query and project configs
|
||||
time: 2023-11-10T15:42:55.042317-08:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "8956"
|
||||
6
.changes/unreleased/Features-20240506-175642.yaml
Normal file
6
.changes/unreleased/Features-20240506-175642.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: serialize inferred primary key
|
||||
time: 2024-05-06T17:56:42.757673-05:00
|
||||
custom:
|
||||
Author: dave-connors-3
|
||||
Issue: "9824"
|
||||
6
.changes/unreleased/Features-20240507-162717.yaml
Normal file
6
.changes/unreleased/Features-20240507-162717.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Features
|
||||
body: 'Add unit_test: selection method'
|
||||
time: 2024-05-07T16:27:17.047585-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "10053"
|
||||
6
.changes/unreleased/Fixes-20230601-204157.yaml
Normal file
6
.changes/unreleased/Fixes-20230601-204157.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Remove unused check_new method
|
||||
time: 2023-06-01T20:41:57.556342+02:00
|
||||
custom:
|
||||
Author: kevinneville
|
||||
Issue: "7586"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: For packages installed with tarball method, fetch metadata to resolve nested dependencies
|
||||
time: 2023-10-13T13:09:43.188308-04:00
|
||||
custom:
|
||||
Author: adamlopez
|
||||
Issue: "8621"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix partial parsing not working for semantic model change
|
||||
time: 2023-10-16T16:39:53.05058-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "8859"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Handle unknown `type_code` for model contracts
|
||||
time: 2023-10-24T11:01:51.980781-06:00
|
||||
custom:
|
||||
Author: dbeatty10
|
||||
Issue: 8877 8353
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Add back contract enforcement for temporary tables on postgres
|
||||
time: 2023-10-24T14:55:04.051683-05:00
|
||||
custom:
|
||||
Author: emmyoop
|
||||
Issue: "8857"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Rework get_catalog implementation to retain previous adapter interface semantics
|
||||
time: 2023-10-24T15:54:00.628086-04:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "8846"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Add version to fqn when version==0
|
||||
time: 2023-10-26T00:25:36.259356-05:00
|
||||
custom:
|
||||
Author: aranke
|
||||
Issue: "8836"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix cased comparison in catalog-retrieval function.
|
||||
time: 2023-10-30T09:37:34.258612-04:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "8939"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Catalog queries now assign the correct type to materialized views
|
||||
time: 2023-10-31T00:53:45.486203-04:00
|
||||
custom:
|
||||
Author: mikealfare
|
||||
Issue: "8864"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix compilation exception running empty seed file and support new Integer agate data_type
|
||||
time: 2023-10-31T14:48:37.774871-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "8895"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Make relation filtering None-tolerant for maximal flexibility across adapters.
|
||||
time: 2023-11-01T15:58:24.552054-04:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "8974"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Update run_results.json from previous versions of dbt to support deferral and
|
||||
rerun from failure
|
||||
time: 2023-11-06T15:59:33.677915-05:00
|
||||
custom:
|
||||
Author: jtcohen6 peterallenwebb
|
||||
Issue: "9010"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix git repository with subdirectory for Deps
|
||||
time: 2023-11-07T09:23:58.214271-08:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "9000"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Use MANIFEST.in to recursively include all jinja templates; fixes issue where
|
||||
some templates were not included in the distribution
|
||||
time: 2023-11-07T09:41:30.121733-05:00
|
||||
custom:
|
||||
Author: mikealfare
|
||||
Issue: "9016"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: Fix formatting of tarball information in packages-lock.yml
|
||||
time: 2023-11-13T11:49:56.437007-08:00
|
||||
custom:
|
||||
Author: ChenyuLInx QMalcolm
|
||||
Issue: "9062"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: 'deps: Lock git packages to commit SHA during resolution'
|
||||
time: 2023-11-27T15:43:10.122069+01:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
Issue: "9050"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: 'deps: Use PackageRenderer to read package-lock.json'
|
||||
time: 2023-11-27T15:43:47.842423+01:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
Issue: "9127"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Fixes
|
||||
body: 'Get sources working again in dbt docs generate'
|
||||
time: 2023-11-28T15:52:25.738256Z
|
||||
custom:
|
||||
Author: aranke
|
||||
Issue: "9119"
|
||||
7
.changes/unreleased/Fixes-20240508-151127.yaml
Normal file
7
.changes/unreleased/Fixes-20240508-151127.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
kind: Fixes
|
||||
body: 'Restore previous behavior for --favor-state: only favor defer_relation if not
|
||||
selected in current command"'
|
||||
time: 2024-05-08T15:11:27.510912+02:00
|
||||
custom:
|
||||
Author: jtcohen6
|
||||
Issue: "10107"
|
||||
6
.changes/unreleased/Fixes-20240509-091411.yaml
Normal file
6
.changes/unreleased/Fixes-20240509-091411.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Fixes
|
||||
body: Unit test fixture (csv) returns null for empty value
|
||||
time: 2024-05-09T09:14:11.772709-04:00
|
||||
custom:
|
||||
Author: michelleark
|
||||
Issue: "9881"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Add a no-op runner for Saved Qeury
|
||||
time: 2023-10-27T14:00:48.4755-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "8893"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Move CatalogRelationTypes test case to the shared test suite to be reused by
|
||||
adapter maintainers
|
||||
time: 2023-11-03T19:52:22.694394-04:00
|
||||
custom:
|
||||
Author: mikealfare
|
||||
Issue: "8952"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Treat SystemExit as an interrupt if raised during node execution.
|
||||
time: 2023-11-06T08:04:22.022179-05:00
|
||||
custom:
|
||||
Author: benmosher
|
||||
Issue: n/a
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Removing unused 'documentable'
|
||||
time: 2023-11-06T10:57:30.694056-08:00
|
||||
custom:
|
||||
Author: QMalcolm
|
||||
Issue: "8871"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Cache dbt plugin modules to improve integration test performance
|
||||
time: 2023-11-07T19:15:46.170151-05:00
|
||||
custom:
|
||||
Author: peterallenwebb
|
||||
Issue: "9029"
|
||||
@@ -1,7 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Fix test_current_timestamp_matches_utc test; allow for MacOS runner system clock
|
||||
variance
|
||||
time: 2023-11-11T17:53:50.098843-05:00
|
||||
custom:
|
||||
Author: mikealfare
|
||||
Issue: "9057"
|
||||
@@ -1,6 +0,0 @@
|
||||
kind: Under the Hood
|
||||
body: Clean up unused adaptor folders
|
||||
time: 2023-11-20T13:47:35.923794-08:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "9123"
|
||||
6
.changes/unreleased/Under the Hood-20240502-154430.yaml
Normal file
6
.changes/unreleased/Under the Hood-20240502-154430.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Clear error message for Private package in dbt-core
|
||||
time: 2024-05-02T15:44:30.713097-07:00
|
||||
custom:
|
||||
Author: ChenyuLInx
|
||||
Issue: "10083"
|
||||
6
.changes/unreleased/Under the Hood-20240506-145511.yaml
Normal file
6
.changes/unreleased/Under the Hood-20240506-145511.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
kind: Under the Hood
|
||||
body: Enable use of context in serialization
|
||||
time: 2024-05-06T14:55:11.1812-04:00
|
||||
custom:
|
||||
Author: gshank
|
||||
Issue: "10093"
|
||||
@@ -31,43 +31,7 @@ kinds:
|
||||
- {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
- label: Under the Hood
|
||||
- label: Dependencies
|
||||
changeFormat: |-
|
||||
{{- $PRList := list }}
|
||||
{{- $changes := splitList " " $.Custom.PR }}
|
||||
{{- range $pullrequest := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
|
||||
{{- $PRList = append $PRList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
skipGlobalChoices: true
|
||||
additionalChoices:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
- label: Security
|
||||
changeFormat: |-
|
||||
{{- $PRList := list }}
|
||||
{{- $changes := splitList " " $.Custom.PR }}
|
||||
{{- range $pullrequest := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }}
|
||||
{{- $PRList = append $PRList $changeLink }}
|
||||
{{- end -}}
|
||||
- {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
|
||||
skipGlobalChoices: true
|
||||
additionalChoices:
|
||||
- key: Author
|
||||
label: GitHub Username(s) (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 3
|
||||
- key: PR
|
||||
label: GitHub Pull Request Number (separated by a single space if multiple)
|
||||
type: string
|
||||
minLength: 1
|
||||
|
||||
newlines:
|
||||
afterChangelogHeader: 1
|
||||
@@ -106,18 +70,10 @@ footerFormat: |
|
||||
{{- $changeList := splitList " " $change.Custom.Author }}
|
||||
{{- $IssueList := list }}
|
||||
{{- $changeLink := $change.Kind }}
|
||||
{{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
|
||||
{{- $changes := splitList " " $change.Custom.PR }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
{{- $changes := splitList " " $change.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end -}}
|
||||
{{- $changes := splitList " " $change.Custom.Issue }}
|
||||
{{- range $issueNbr := $changes }}
|
||||
{{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
|
||||
{{- $IssueList = append $IssueList $changeLink }}
|
||||
{{- end }}
|
||||
{{- /* check if this contributor has other changes associated with them already */}}
|
||||
{{- if hasKey $contributorDict $author }}
|
||||
|
||||
2
.flake8
2
.flake8
@@ -10,3 +10,5 @@ ignore =
|
||||
E741
|
||||
E501 # long line checking is done in black
|
||||
exclude = test/
|
||||
per-file-ignores =
|
||||
*/__init__.py: F401
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,4 +1,4 @@
|
||||
core/dbt/include/index.html binary
|
||||
core/dbt/task/docs/index.html binary
|
||||
tests/functional/artifacts/data/state/*/manifest.json binary
|
||||
core/dbt/docs/build/html/searchindex.js binary
|
||||
core/dbt/docs/build/html/index.html binary
|
||||
|
||||
25
.github/CODEOWNERS
vendored
25
.github/CODEOWNERS
vendored
@@ -13,31 +13,6 @@
|
||||
# the core team as a whole will be assigned
|
||||
* @dbt-labs/core-team
|
||||
|
||||
### ADAPTERS
|
||||
|
||||
# Adapter interface ("base" + "sql" adapter defaults, cache)
|
||||
/core/dbt/adapters @dbt-labs/core-adapters
|
||||
|
||||
# Global project (default macros + materializations), starter project
|
||||
/core/dbt/include @dbt-labs/core-adapters
|
||||
|
||||
# Postgres plugin
|
||||
/plugins/ @dbt-labs/core-adapters
|
||||
/plugins/postgres/setup.py @dbt-labs/core-adapters
|
||||
|
||||
# Functional tests for adapter plugins
|
||||
/tests/adapter @dbt-labs/core-adapters
|
||||
|
||||
### TESTS
|
||||
|
||||
# Overlapping ownership for vast majority of unit + functional tests
|
||||
|
||||
# Perf regression testing framework
|
||||
# This excludes the test project files itself since those aren't specific
|
||||
# framework changes (excluded by not setting an owner next to it- no owner)
|
||||
/performance @nathaniel-may
|
||||
/performance/projects
|
||||
|
||||
### ARTIFACTS
|
||||
|
||||
/schemas/dbt @dbt-labs/cloud-artifacts
|
||||
|
||||
13
.github/ISSUE_TEMPLATE/implementation-ticket.yml
vendored
13
.github/ISSUE_TEMPLATE/implementation-ticket.yml
vendored
@@ -30,6 +30,16 @@ body:
|
||||
What is the definition of done for this ticket? Include any relevant edge cases and/or test cases
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Suggested Tests
|
||||
description: |
|
||||
Provide scenarios to test. Link to existing similar tests if appropriate.
|
||||
placeholder: |
|
||||
1. Test with no version specified in the schema file and use selection logic on a versioned model for a specific version. Expect pass.
|
||||
2. Test with a version specified in the schema file that is no valid. Expect ParsingError.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Impact to Other Teams
|
||||
@@ -52,7 +62,6 @@ body:
|
||||
attributes:
|
||||
label: Context
|
||||
description: |
|
||||
Provide the "why", motivation, and alternative approaches considered -- linking to previous refinement issues, spikes, Notion docs as appropriate
|
||||
validations:
|
||||
Provide the "why", motivation, and alternative approaches considered -- linking to previous refinement issues, spikes and documentation as appropriate
|
||||
validations:
|
||||
required: false
|
||||
|
||||
3
.github/_README.md
vendored
3
.github/_README.md
vendored
@@ -47,7 +47,8 @@ ___
|
||||
|
||||
### How to re-run jobs
|
||||
|
||||
- Some actions cannot be rerun in the GitHub UI. Namely the snyk checks and the cla check. Snyk checks are rerun by closing and reopening the PR. You can retrigger the cla check by commenting on the PR with `@cla-bot check`
|
||||
- From the UI you can rerun from failure
|
||||
- You can retrigger the cla check by commenting on the PR with `@cla-bot check`
|
||||
|
||||
___
|
||||
|
||||
|
||||
21
.github/actions/latest-wrangler/action.yml
vendored
21
.github/actions/latest-wrangler/action.yml
vendored
@@ -1,20 +1,21 @@
|
||||
name: "Github package 'latest' tag wrangler for containers"
|
||||
description: "Determines wether or not a given dbt container should be given a bare 'latest' tag (I.E. dbt-core:latest)"
|
||||
name: "GitHub package `latest` tag wrangler for containers"
|
||||
description: "Determines if the published image should include `latest` tags"
|
||||
|
||||
inputs:
|
||||
package_name:
|
||||
description: "Package to check (I.E. dbt-core, dbt-redshift, etc)"
|
||||
description: "Package being published (i.e. `dbt-core`, `dbt-redshift`, etc.)"
|
||||
required: true
|
||||
new_version:
|
||||
description: "Semver of the container being built (I.E. 1.0.4)"
|
||||
description: "SemVer of the package being published (i.e. 1.7.2, 1.8.0a1, etc.)"
|
||||
required: true
|
||||
gh_token:
|
||||
description: "Auth token for github (must have view packages scope)"
|
||||
github_token:
|
||||
description: "Auth token for GitHub (must have view packages scope)"
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
latest:
|
||||
description: "Wether or not built container should be tagged latest (bool)"
|
||||
minor_latest:
|
||||
description: "Wether or not built container should be tagged minor.latest (bool)"
|
||||
tags:
|
||||
description: "A list of tags to associate with this version"
|
||||
|
||||
runs:
|
||||
using: "docker"
|
||||
image: "Dockerfile"
|
||||
|
||||
133
.github/actions/latest-wrangler/main.py
vendored
133
.github/actions/latest-wrangler/main.py
vendored
@@ -1,98 +1,71 @@
|
||||
import os
|
||||
import sys
|
||||
from packaging.version import Version, parse
|
||||
import requests
|
||||
from distutils.util import strtobool
|
||||
from typing import Union
|
||||
from packaging.version import parse, Version
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# get inputs
|
||||
package = os.environ["INPUT_PACKAGE"]
|
||||
new_version = parse(os.environ["INPUT_NEW_VERSION"])
|
||||
gh_token = os.environ["INPUT_GH_TOKEN"]
|
||||
halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
|
||||
def main():
|
||||
package_name: str = os.environ["INPUT_PACKAGE_NAME"]
|
||||
new_version: Version = parse(os.environ["INPUT_NEW_VERSION"])
|
||||
github_token: str = os.environ["INPUT_GITHUB_TOKEN"]
|
||||
|
||||
# get package metadata from github
|
||||
package_request = requests.get(
|
||||
f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
|
||||
auth=("", gh_token),
|
||||
)
|
||||
package_meta = package_request.json()
|
||||
response = _package_metadata(package_name, github_token)
|
||||
published_versions = _published_versions(response)
|
||||
new_version_tags = _new_version_tags(new_version, published_versions)
|
||||
_register_tags(new_version_tags, package_name)
|
||||
|
||||
# Log info if we don't get a 200
|
||||
if package_request.status_code != 200:
|
||||
print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
|
||||
|
||||
# Make an early exit if there is no matching package in github
|
||||
if package_request.status_code == 404:
|
||||
if halt_on_missing:
|
||||
sys.exit(1)
|
||||
# everything is the latest if the package doesn't exist
|
||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
||||
with open(github_output, "at", encoding="utf-8") as gh_output:
|
||||
gh_output.write("latest=True")
|
||||
gh_output.write("minor_latest=True")
|
||||
sys.exit(0)
|
||||
def _package_metadata(package_name: str, github_token: str) -> requests.Response:
|
||||
url = f"https://api.github.com/orgs/dbt-labs/packages/container/{package_name}/versions"
|
||||
return requests.get(url, auth=("", github_token))
|
||||
|
||||
# TODO: verify package meta is "correct"
|
||||
# https://github.com/dbt-labs/dbt-core/issues/4640
|
||||
|
||||
# map versions and tags
|
||||
version_tag_map = {
|
||||
version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
|
||||
}
|
||||
def _published_versions(response: requests.Response) -> List[Version]:
|
||||
package_metadata = response.json()
|
||||
return [
|
||||
parse(tag)
|
||||
for version in package_metadata
|
||||
for tag in version["metadata"]["container"]["tags"]
|
||||
if "latest" not in tag
|
||||
]
|
||||
|
||||
# is pre-release
|
||||
pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
|
||||
|
||||
# semver of current latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if "latest" in tags:
|
||||
# N.B. This seems counterintuitive, but we expect any version tagged
|
||||
# 'latest' to have exactly three associated tags:
|
||||
# latest, major.minor.latest, and major.minor.patch.
|
||||
# Subtracting everything that contains the string 'latest' gets us
|
||||
# the major.minor.patch which is what's needed for comparison.
|
||||
current_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_latest = False
|
||||
def _new_version_tags(new_version: Version, published_versions: List[Version]) -> List[str]:
|
||||
# the package version is always a tag
|
||||
tags = [str(new_version)]
|
||||
|
||||
# semver of current_minor_latest
|
||||
for version, tags in version_tag_map.items():
|
||||
if f"{new_version.major}.{new_version.minor}.latest" in tags:
|
||||
# Similar to above, only now we expect exactly two tags:
|
||||
# major.minor.patch and major.minor.latest
|
||||
current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
|
||||
else:
|
||||
current_minor_latest = False
|
||||
# pre-releases don't get tagged with `latest`
|
||||
if new_version.is_prerelease:
|
||||
return tags
|
||||
|
||||
def is_latest(
|
||||
pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
|
||||
) -> bool:
|
||||
"""Determine if a given contaier should be tagged 'latest' based on:
|
||||
- it's pre-release status
|
||||
- it's version
|
||||
- the version of a previously identified container tagged 'latest'
|
||||
if new_version > max(published_versions):
|
||||
tags.append("latest")
|
||||
|
||||
:param pre_rel: Wether or not the version of the new container is a pre-release
|
||||
:param new_version: The version of the new container
|
||||
:param remote_latest: The version of the previously identified container that's
|
||||
already tagged latest or False
|
||||
"""
|
||||
# is a pre-release = not latest
|
||||
if pre_rel:
|
||||
return False
|
||||
# + no latest tag found = is latest
|
||||
if not remote_latest:
|
||||
return True
|
||||
# + if remote version is lower than current = is latest, else not latest
|
||||
return True if remote_latest <= new_version else False
|
||||
published_patches = [
|
||||
version
|
||||
for version in published_versions
|
||||
if version.major == new_version.major and version.minor == new_version.minor
|
||||
]
|
||||
if new_version > max(published_patches):
|
||||
tags.append(f"{new_version.major}.{new_version.minor}.latest")
|
||||
|
||||
latest = is_latest(pre_rel, new_version, current_latest)
|
||||
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
|
||||
return tags
|
||||
|
||||
|
||||
def _register_tags(tags: List[str], package_name: str) -> None:
|
||||
fully_qualified_tags = ",".join([f"ghcr.io/dbt-labs/{package_name}:{tag}" for tag in tags])
|
||||
github_output = os.environ.get("GITHUB_OUTPUT")
|
||||
with open(github_output, "at", encoding="utf-8") as gh_output:
|
||||
gh_output.write(f"latest={latest}")
|
||||
gh_output.write(f"minor_latest={minor_latest}")
|
||||
gh_output.write(f"fully_qualified_tags={fully_qualified_tags}")
|
||||
|
||||
|
||||
def _validate_response(response: requests.Response) -> None:
|
||||
message = response["message"]
|
||||
if response.status_code != 200:
|
||||
print(f"Call to GitHub API failed: {response.status_code} - {message}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@@ -11,11 +11,6 @@ updates:
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/plugins/postgres"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
rebase-strategy: "disabled"
|
||||
|
||||
# docker dependencies
|
||||
- package-ecosystem: "docker"
|
||||
|
||||
2
.github/workflows/backport.yml
vendored
2
.github/workflows/backport.yml
vendored
@@ -35,6 +35,6 @@ jobs:
|
||||
github.event.pull_request.merged
|
||||
&& contains(github.event.label.name, 'backport')
|
||||
steps:
|
||||
- uses: tibdex/backport@v2.0.3
|
||||
- uses: tibdex/backport@v2.0.4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/bot-changelog.yml
vendored
4
.github/workflows/bot-changelog.yml
vendored
@@ -41,8 +41,6 @@ jobs:
|
||||
include:
|
||||
- label: "dependencies"
|
||||
changie_kind: "Dependencies"
|
||||
- label: "snyk"
|
||||
changie_kind: "Security"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -58,4 +56,4 @@ jobs:
|
||||
commit_message: "Add automated changelog yaml from template for bot PR"
|
||||
changie_kind: ${{ matrix.changie_kind }}
|
||||
label: ${{ matrix.label }}
|
||||
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}"
|
||||
custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n Issue: ${{ github.event.pull_request.number }}"
|
||||
|
||||
2
.github/workflows/changelog-existence.yml
vendored
2
.github/workflows/changelog-existence.yml
vendored
@@ -19,6 +19,8 @@ name: Check Changelog Entry
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, labeled, unlabeled, synchronize]
|
||||
paths-ignore: ['.changes/**', '.github/**', 'tests/**', '**.md', '**.yml']
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
|
||||
41
.github/workflows/check-artifact-changes.yml
vendored
Normal file
41
.github/workflows/check-artifact-changes.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Check Artifact Changes
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ opened, reopened, labeled, unlabeled, synchronize ]
|
||||
paths-ignore: [ '.changes/**', '.github/**', 'tests/**', '**.md', '**.yml' ]
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check-artifact-changes:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'artifact_minor_upgrade') }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check for changes in core/dbt/artifacts
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
uses: dorny/paths-filter@v3
|
||||
id: check_artifact_changes
|
||||
with:
|
||||
filters: |
|
||||
artifacts_changed:
|
||||
- 'core/dbt/artifacts/**'
|
||||
list-files: shell
|
||||
|
||||
- name: Fail CI if artifacts have changed
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
|
||||
run: |
|
||||
echo "CI failure: Artifact changes checked in core/dbt/artifacts directory."
|
||||
echo "Files changed: ${{ steps.check_artifact_changes.outputs.artifacts_changed_files }}"
|
||||
echo "To bypass this check, confirm that the change is not breaking (https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/artifacts/README.md#breaking-changes) and add the 'artifact_minor_upgrade' label to the PR."
|
||||
exit 1
|
||||
|
||||
- name: CI check passed
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'false'
|
||||
run: |
|
||||
echo "No prohibited artifact changes found in core/dbt/artifacts. CI check passed."
|
||||
39
.github/workflows/community-label.yml
vendored
Normal file
39
.github/workflows/community-label.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# **what?**
|
||||
# Label a PR with a `community` label when a PR is opened by a user outside core/adapters
|
||||
|
||||
# **why?**
|
||||
# To streamline triage and ensure that community contributions are recognized and prioritized
|
||||
|
||||
# **when?**
|
||||
# When a PR is opened, not in draft or moved from draft to ready for review
|
||||
|
||||
|
||||
name: Label community PRs
|
||||
|
||||
on:
|
||||
# have to use pull_request_target since community PRs come from forks
|
||||
pull_request_target:
|
||||
types: [opened, ready_for_review]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
pull-requests: write # labels PRs
|
||||
contents: read # reads team membership
|
||||
|
||||
jobs:
|
||||
open_issues:
|
||||
# If this PR already has the community label, no need to relabel it
|
||||
# If this PR is opened and not draft, determine if it needs to be labeled
|
||||
# if the PR is converted out of draft, determine if it needs to be labeled
|
||||
if: |
|
||||
(!contains(github.event.pull_request.labels.*.name, 'community') &&
|
||||
(github.event.action == 'opened' && github.event.pull_request.draft == false ) ||
|
||||
github.event.action == 'ready_for_review' )
|
||||
uses: dbt-labs/actions/.github/workflows/label-community.yml@main
|
||||
with:
|
||||
github_team: 'core-group'
|
||||
label: 'community'
|
||||
secrets: inherit
|
||||
26
.github/workflows/docs-issue.yml
vendored
26
.github/workflows/docs-issue.yml
vendored
@@ -1,19 +1,18 @@
|
||||
# **what?**
|
||||
# Open an issue in docs.getdbt.com when a PR is labeled `user docs`
|
||||
# Open an issue in docs.getdbt.com when an issue is labeled `user docs` and closed as completed
|
||||
|
||||
# **why?**
|
||||
# To reduce barriers for keeping docs up to date
|
||||
|
||||
# **when?**
|
||||
# When a PR is labeled `user docs` and is merged. Runs on pull_request_target to run off the workflow already merged,
|
||||
# not the workflow that existed on the PR branch. This allows old PRs to get comments.
|
||||
# When an issue is labeled `user docs` and is closed as completed. Can be labeled before or after the issue is closed.
|
||||
|
||||
|
||||
name: Open issues in docs.getdbt.com repo when a PR is labeled
|
||||
run-name: "Open an issue in docs.getdbt.com for PR #${{ github.event.pull_request.number }}"
|
||||
name: Open issues in docs.getdbt.com repo when an issue is labeled
|
||||
run-name: "Open an issue in docs.getdbt.com for issue #${{ github.event.issue.number }}"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
issues:
|
||||
types: [labeled, closed]
|
||||
|
||||
defaults:
|
||||
@@ -21,23 +20,22 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
permissions:
|
||||
issues: write # opens new issues
|
||||
pull-requests: write # comments on PRs
|
||||
|
||||
issues: write # comments on issues
|
||||
|
||||
jobs:
|
||||
open_issues:
|
||||
# we only want to run this when the PR has been merged or the label in the labeled event is `user docs`. Otherwise it runs the
|
||||
# we only want to run this when the issue is closed as completed and the label `user docs` has been assigned.
|
||||
# If this logic does not exist in this workflow, it runs the
|
||||
# risk of duplicaton of issues being created due to merge and label both triggering this workflow to run and neither having
|
||||
# generating the comment before the other runs. This lives here instead of the shared workflow because this is where we
|
||||
# decide if it should run or not.
|
||||
if: |
|
||||
(github.event.pull_request.merged == true) &&
|
||||
((github.event.action == 'closed' && contains( github.event.pull_request.labels.*.name, 'user docs')) ||
|
||||
(github.event.action == 'labeled' && github.event.label.name == 'user docs'))
|
||||
(github.event.issue.state == 'closed' &&
|
||||
github.event.issue.state_reason == 'completed' &&
|
||||
contains( github.event.issue.labels.*.name, 'user docs'))
|
||||
uses: dbt-labs/actions/.github/workflows/open-issue-in-repo.yml@main
|
||||
with:
|
||||
issue_repository: "dbt-labs/docs.getdbt.com"
|
||||
issue_title: "Docs Changes Needed from ${{ github.event.repository.name }} PR #${{ github.event.pull_request.number }}"
|
||||
issue_title: "Docs Changes Needed from ${{ github.event.repository.name }} Issue #${{ github.event.issue.number }}"
|
||||
issue_body: "At a minimum, update body to include a link to the page on docs.getdbt.com requiring updates and what part(s) of the page you would like to see updated."
|
||||
secrets: inherit
|
||||
|
||||
26
.github/workflows/jira-creation.yml
vendored
26
.github/workflows/jira-creation.yml
vendored
@@ -1,26 +0,0 @@
|
||||
# **what?**
|
||||
# Mirrors issues into Jira. Includes the information: title,
|
||||
# GitHub Issue ID and URL
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these issues in there
|
||||
|
||||
# **when?**
|
||||
# On issue creation or when an issue is labeled `Jira`
|
||||
|
||||
name: Jira Issue Creation
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
call-creation-action:
|
||||
uses: dbt-labs/actions/.github/workflows/jira-creation-actions.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
26
.github/workflows/jira-label.yml
vendored
26
.github/workflows/jira-label.yml
vendored
@@ -1,26 +0,0 @@
|
||||
# **what?**
|
||||
# Calls mirroring Jira label Action. Includes adding a new label
|
||||
# to an existing issue or removing a label as well
|
||||
|
||||
# **why?**
|
||||
# Jira is our tool for tracking and we need to see these labels in there
|
||||
|
||||
# **when?**
|
||||
# On labels being added or removed from issues
|
||||
|
||||
name: Jira Label Mirroring
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
issues: read
|
||||
|
||||
jobs:
|
||||
call-label-action:
|
||||
uses: dbt-labs/actions/.github/workflows/jira-label-actions.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
27
.github/workflows/jira-transition.yml
vendored
27
.github/workflows/jira-transition.yml
vendored
@@ -1,27 +0,0 @@
|
||||
# **what?**
|
||||
# Transition a Jira issue to a new state
|
||||
# Only supports these GitHub Issue transitions:
|
||||
# closed, deleted, reopened
|
||||
|
||||
# **why?**
|
||||
# Jira needs to be kept up-to-date
|
||||
|
||||
# **when?**
|
||||
# On issue closing, deletion, reopened
|
||||
|
||||
name: Jira Issue Transition
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [closed, deleted, reopened]
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
call-transition-action:
|
||||
uses: dbt-labs/actions/.github/workflows/jira-transition-actions.yml@main
|
||||
secrets:
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
32
.github/workflows/main.yml
vendored
32
.github/workflows/main.yml
vendored
@@ -47,10 +47,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
@@ -74,17 +74,17 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ]
|
||||
|
||||
env:
|
||||
TOXENV: "unit"
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
|
||||
- name: Upload Unit Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: unit
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
- name: generate include
|
||||
id: generate-include
|
||||
run: |
|
||||
INCLUDE=('"python-version":"3.8","os":"windows-latest"' '"python-version":"3.8","os":"macos-latest"' )
|
||||
INCLUDE=('"python-version":"3.8","os":"windows-latest"' '"python-version":"3.8","os":"macos-12"' )
|
||||
INCLUDE_GROUPS="["
|
||||
for include in ${INCLUDE[@]}; do
|
||||
for group in $(seq 1 ${{ env.PYTHON_INTEGRATION_TEST_WORKERS }}); do
|
||||
@@ -157,7 +157,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ]
|
||||
os: [ubuntu-20.04]
|
||||
split-group: ${{ fromJson(needs.integration-metadata.outputs.split-groups) }}
|
||||
include: ${{ fromJson(needs.integration-metadata.outputs.include) }}
|
||||
@@ -175,10 +175,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
@@ -213,15 +213,15 @@ jobs:
|
||||
CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ steps.date.outputs.date }}
|
||||
name: logs_${{ matrix.python-version }}_${{ matrix.os }}_${{ matrix.split-group }}_${{ steps.date.outputs.date }}
|
||||
path: ./logs
|
||||
|
||||
- name: Upload Integration Test Coverage to Codecov
|
||||
if: ${{ matrix.python-version == '3.11' }}
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: integration
|
||||
@@ -250,10 +250,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
@@ -288,7 +288,7 @@ jobs:
|
||||
- name: Install source distributions
|
||||
# ignore dbt-1.0.0, which intentionally raises an error when installed from source
|
||||
run: |
|
||||
find ./dist/dbt-[a-z]*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
find ./dist/*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/
|
||||
|
||||
- name: Check source distributions
|
||||
run: |
|
||||
|
||||
20
.github/workflows/model_performance.yml
vendored
20
.github/workflows/model_performance.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
# explicitly checkout the performance runner from main regardless of which
|
||||
# version we are modeling.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
@@ -87,12 +87,12 @@ jobs:
|
||||
# explicitly checkout the performance runner from main regardless of which
|
||||
# version we are modeling.
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
# attempts to access a previously cached runner
|
||||
- uses: actions/cache@v3
|
||||
- uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.RUNNER_CACHE_PATH }}
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
@@ -160,13 +160,13 @@ jobs:
|
||||
|
||||
# explicitly checkout main to get the latest project definitions
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
# this was built in the previous job so it will be there.
|
||||
- name: Fetch Runner
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ${{ env.RUNNER_CACHE_PATH }}
|
||||
@@ -195,7 +195,7 @@ jobs:
|
||||
- name: '[DEBUG] ls baseline directory after run'
|
||||
run: ls -R performance/baselines/
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: baseline
|
||||
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}/
|
||||
@@ -225,7 +225,7 @@ jobs:
|
||||
echo "release_branch: ${{ needs.set-variables.outputs.release_branch }}"
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ matrix.base-branch }}
|
||||
|
||||
@@ -235,7 +235,7 @@ jobs:
|
||||
git push origin ${{ matrix.target-branch }}
|
||||
git branch --set-upstream-to=origin/${{ matrix.target-branch }} ${{ matrix.target-branch }}
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: baseline
|
||||
path: performance/baselines/${{ needs.set-variables.outputs.release_id }}
|
||||
@@ -253,7 +253,7 @@ jobs:
|
||||
push: 'origin origin/${{ matrix.target-branch }}'
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
author: 'Github Build Bot <buildbot@fishtownanalytics.com>'
|
||||
base: ${{ matrix.base-branch }}
|
||||
|
||||
16
.github/workflows/nightly-release.yml
vendored
16
.github/workflows/nightly-release.yml
vendored
@@ -20,6 +20,7 @@ on:
|
||||
|
||||
permissions:
|
||||
contents: write # this is the permission that allows creating a new release
|
||||
packages: write # this is the permission that allows Docker release
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -33,22 +34,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
commit_sha: ${{ steps.resolve-commit-sha.outputs.release_commit }}
|
||||
version_number: ${{ steps.nightly-release-version.outputs.number }}
|
||||
release_branch: ${{ steps.release-branch.outputs.name }}
|
||||
|
||||
steps:
|
||||
- name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.RELEASE_BRANCH }}
|
||||
|
||||
- name: "Resolve Commit To Release"
|
||||
id: resolve-commit-sha
|
||||
run: |
|
||||
commit_sha=$(git rev-parse HEAD)
|
||||
echo "release_commit=$commit_sha" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: "Get Current Version Number"
|
||||
id: version-number-sources
|
||||
run: |
|
||||
@@ -88,7 +82,6 @@ jobs:
|
||||
steps:
|
||||
- name: "[DEBUG] Log Outputs"
|
||||
run: |
|
||||
echo commit_sha : ${{ needs.aggregate-release-data.outputs.commit_sha }}
|
||||
echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
|
||||
echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
|
||||
|
||||
@@ -97,13 +90,8 @@ jobs:
|
||||
|
||||
uses: ./.github/workflows/release.yml
|
||||
with:
|
||||
sha: ${{ needs.aggregate-release-data.outputs.commit_sha }}
|
||||
target_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
|
||||
version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
|
||||
build_script_path: "scripts/build-dist.sh"
|
||||
env_setup_script_path: "scripts/env-setup.sh"
|
||||
s3_bucket_name: "core-team-artifacts"
|
||||
package_test_command: "dbt --version"
|
||||
test_run: true
|
||||
nightly_release: true
|
||||
secrets: inherit
|
||||
|
||||
118
.github/workflows/release-docker.yml
vendored
118
.github/workflows/release-docker.yml
vendored
@@ -1,118 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will generate a series of docker images for dbt and push them to the github container registry
|
||||
|
||||
# **why?**
|
||||
# Docker images for dbt are used in a number of important places throughout the dbt ecosystem. This is how we keep those images up-to-date.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
# **next steps**
|
||||
# - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
|
||||
|
||||
name: Docker release
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
package:
|
||||
description: The package to release. _One_ of [dbt-core, dbt-redshift, dbt-bigquery, dbt-snowflake, dbt-spark, dbt-postgres]
|
||||
required: true
|
||||
version_number:
|
||||
description: The release version number (i.e. 1.0.0b1). Do not include `latest` tags or a leading `v`!
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
get_version_meta:
|
||||
name: Get version meta
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
major: ${{ steps.version.outputs.major }}
|
||||
minor: ${{ steps.version.outputs.minor }}
|
||||
patch: ${{ steps.version.outputs.patch }}
|
||||
latest: ${{ steps.latest.outputs.latest }}
|
||||
minor_latest: ${{ steps.latest.outputs.minor_latest }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Split version
|
||||
id: version
|
||||
run: |
|
||||
IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }}
|
||||
echo "major=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "minor=$MINOR" >> $GITHUB_OUTPUT
|
||||
echo "patch=$PATCH" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Is pkg 'latest'
|
||||
id: latest
|
||||
uses: ./.github/actions/latest-wrangler
|
||||
with:
|
||||
package: ${{ github.event.inputs.package }}
|
||||
new_version: ${{ github.event.inputs.version_number }}
|
||||
gh_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
halt_on_missing: False
|
||||
|
||||
setup_image_builder:
|
||||
name: Set up docker image builder
|
||||
runs-on: ubuntu-latest
|
||||
needs: [get_version_meta]
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
build_and_push:
|
||||
name: Build images and push to GHCR
|
||||
runs-on: ubuntu-latest
|
||||
needs: [setup_image_builder, get_version_meta]
|
||||
steps:
|
||||
- name: Get docker build arg
|
||||
id: build_arg
|
||||
run: |
|
||||
BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g')
|
||||
BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g')
|
||||
echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT
|
||||
echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Log in to the GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push MAJOR.MINOR.PATCH tag
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ github.event.inputs.version_number }}
|
||||
|
||||
- name: Build and push MINOR.latest tag
|
||||
uses: docker/build-push-action@v5
|
||||
if: ${{ needs.get_version_meta.outputs.minor_latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:${{ needs.get_version_meta.outputs.major }}.${{ needs.get_version_meta.outputs.minor }}.latest
|
||||
|
||||
- name: Build and push latest tag
|
||||
uses: docker/build-push-action@v5
|
||||
if: ${{ needs.get_version_meta.outputs.latest == 'True' }}
|
||||
with:
|
||||
file: docker/Dockerfile
|
||||
push: True
|
||||
target: ${{ github.event.inputs.package }}
|
||||
build-args: |
|
||||
${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
|
||||
tags: |
|
||||
ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest
|
||||
148
.github/workflows/release.yml
vendored
148
.github/workflows/release.yml
vendored
@@ -7,6 +7,7 @@
|
||||
# - run unit and integration tests against given commit;
|
||||
# - build and package that SHA;
|
||||
# - release it to GitHub and PyPI with that specific build;
|
||||
# - release it to Docker
|
||||
#
|
||||
# **why?**
|
||||
# Ensure an automated and tested release process
|
||||
@@ -14,15 +15,12 @@
|
||||
# **when?**
|
||||
# This workflow can be run manually on demand or can be called by other workflows
|
||||
|
||||
name: Release to GitHub and PyPI
|
||||
name: "Release to GitHub, PyPI & Docker"
|
||||
run-name: "Release ${{ inputs.version_number }} to GitHub, PyPI & Docker"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: "The last commit sha in the release"
|
||||
type: string
|
||||
required: true
|
||||
target_branch:
|
||||
description: "The branch to release from"
|
||||
type: string
|
||||
@@ -31,26 +29,6 @@ on:
|
||||
description: "The release version number (i.e. 1.0.0b1)"
|
||||
type: string
|
||||
required: true
|
||||
build_script_path:
|
||||
description: "Build script path"
|
||||
type: string
|
||||
default: "scripts/build-dist.sh"
|
||||
required: true
|
||||
env_setup_script_path:
|
||||
description: "Environment setup script path"
|
||||
type: string
|
||||
default: "scripts/env-setup.sh"
|
||||
required: false
|
||||
s3_bucket_name:
|
||||
description: "AWS S3 bucket name"
|
||||
type: string
|
||||
default: "core-team-artifacts"
|
||||
required: true
|
||||
package_test_command:
|
||||
description: "Package test command"
|
||||
type: string
|
||||
default: "dbt --version"
|
||||
required: true
|
||||
test_run:
|
||||
description: "Test run (Publish release as draft)"
|
||||
type: boolean
|
||||
@@ -61,12 +39,13 @@ on:
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
only_docker:
|
||||
description: "Only release Docker image, skip GitHub & PyPI"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
workflow_call:
|
||||
inputs:
|
||||
sha:
|
||||
description: "The last commit sha in the release"
|
||||
type: string
|
||||
required: true
|
||||
target_branch:
|
||||
description: "The branch to release from"
|
||||
type: string
|
||||
@@ -75,26 +54,6 @@ on:
|
||||
description: "The release version number (i.e. 1.0.0b1)"
|
||||
type: string
|
||||
required: true
|
||||
build_script_path:
|
||||
description: "Build script path"
|
||||
type: string
|
||||
default: "scripts/build-dist.sh"
|
||||
required: true
|
||||
env_setup_script_path:
|
||||
description: "Environment setup script path"
|
||||
type: string
|
||||
default: "scripts/env-setup.sh"
|
||||
required: false
|
||||
s3_bucket_name:
|
||||
description: "AWS S3 bucket name"
|
||||
type: string
|
||||
default: "core-team-artifacts"
|
||||
required: true
|
||||
package_test_command:
|
||||
description: "Package test command"
|
||||
type: string
|
||||
default: "dbt --version"
|
||||
required: true
|
||||
test_run:
|
||||
description: "Test run (Publish release as draft)"
|
||||
type: boolean
|
||||
@@ -114,32 +73,47 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
log-inputs:
|
||||
job-setup:
|
||||
name: Log Inputs
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
starting_sha: ${{ steps.set_sha.outputs.starting_sha }}
|
||||
steps:
|
||||
- name: "[DEBUG] Print Variables"
|
||||
run: |
|
||||
echo The last commit sha in the release: ${{ inputs.sha }}
|
||||
echo Inputs
|
||||
echo The branch to release from: ${{ inputs.target_branch }}
|
||||
echo The release version number: ${{ inputs.version_number }}
|
||||
echo Build script path: ${{ inputs.build_script_path }}
|
||||
echo Environment setup script path: ${{ inputs.env_setup_script_path }}
|
||||
echo AWS S3 bucket name: ${{ inputs.s3_bucket_name }}
|
||||
echo Package test command: ${{ inputs.package_test_command }}
|
||||
echo Test run: ${{ inputs.test_run }}
|
||||
echo Nightly release: ${{ inputs.nightly_release }}
|
||||
echo Only Docker: ${{ inputs.only_docker }}
|
||||
|
||||
- name: "Checkout target branch"
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.target_branch }}
|
||||
|
||||
# release-prep.yml really shouldn't take in the sha but since core + all adapters
|
||||
# depend on it now this workaround lets us not input it manually with risk of error.
|
||||
# The changes always get merged into the head so we can't use a specific commit for
|
||||
# releases anyways.
|
||||
- name: "Capture sha"
|
||||
id: set_sha
|
||||
run: |
|
||||
echo "starting_sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
bump-version-generate-changelog:
|
||||
name: Bump package version, Generate changelog
|
||||
needs: [job-setup]
|
||||
if: ${{ !inputs.only_docker }}
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
|
||||
|
||||
with:
|
||||
sha: ${{ inputs.sha }}
|
||||
sha: ${{ needs.job-setup.outputs.starting_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
target_branch: ${{ inputs.target_branch }}
|
||||
env_setup_script_path: ${{ inputs.env_setup_script_path }}
|
||||
env_setup_script_path: "scripts/env-setup.sh"
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
|
||||
@@ -147,7 +121,7 @@ jobs:
|
||||
|
||||
log-outputs-bump-version-generate-changelog:
|
||||
name: "[Log output] Bump package version, Generate changelog"
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !failure() && !cancelled() && !inputs.only_docker }}
|
||||
|
||||
needs: [bump-version-generate-changelog]
|
||||
|
||||
@@ -161,8 +135,8 @@ jobs:
|
||||
|
||||
build-test-package:
|
||||
name: Build, Test, Package
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
needs: [bump-version-generate-changelog]
|
||||
if: ${{ !failure() && !cancelled() && !inputs.only_docker }}
|
||||
needs: [job-setup, bump-version-generate-changelog]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
|
||||
|
||||
@@ -170,9 +144,9 @@ jobs:
|
||||
sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
|
||||
build_script_path: ${{ inputs.build_script_path }}
|
||||
s3_bucket_name: ${{ inputs.s3_bucket_name }}
|
||||
package_test_command: ${{ inputs.package_test_command }}
|
||||
build_script_path: "scripts/build-dist.sh"
|
||||
s3_bucket_name: "core-team-artifacts"
|
||||
package_test_command: "dbt --version"
|
||||
test_run: ${{ inputs.test_run }}
|
||||
nightly_release: ${{ inputs.nightly_release }}
|
||||
|
||||
@@ -182,7 +156,7 @@ jobs:
|
||||
|
||||
github-release:
|
||||
name: GitHub Release
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: ${{ !failure() && !cancelled() && !inputs.only_docker }}
|
||||
|
||||
needs: [bump-version-generate-changelog, build-test-package]
|
||||
|
||||
@@ -209,6 +183,51 @@ jobs:
|
||||
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||
TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
|
||||
|
||||
determine-docker-package:
|
||||
# dbt-postgres exists within dbt-core for versions 1.7 and earlier but is a separate package for 1.8 and later.
|
||||
# determine if we need to release dbt-core or both dbt-core and dbt-postgres
|
||||
name: Determine Docker Package
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: [pypi-release]
|
||||
outputs:
|
||||
matrix: ${{ steps.determine-docker-package.outputs.matrix }}
|
||||
steps:
|
||||
- name: "Audit Version And Parse Into Parts"
|
||||
id: semver
|
||||
uses: dbt-labs/actions/parse-semver@v1.1.0
|
||||
with:
|
||||
version: ${{ inputs.version_number }}
|
||||
|
||||
- name: "Determine Packages to Release"
|
||||
id: determine-docker-package
|
||||
run: |
|
||||
if [ ${{ steps.semver.outputs.minor }} -ge 8 ]; then
|
||||
json_output={\"package\":[\"dbt-core\"]}
|
||||
else
|
||||
json_output={\"package\":[\"dbt-core\",\"dbt-postgres\"]}
|
||||
fi
|
||||
echo "matrix=$json_output" >> $GITHUB_OUTPUT
|
||||
|
||||
docker-release:
|
||||
name: "Docker Release for ${{ matrix.package }}"
|
||||
needs: [determine-docker-package]
|
||||
# We cannot release to docker on a test run because it uses the tag in GitHub as
|
||||
# what we need to release but draft releases don't actually tag the commit so it
|
||||
# finds nothing to release
|
||||
if: ${{ !failure() && !cancelled() && (!inputs.test_run || inputs.only_docker) }}
|
||||
strategy:
|
||||
matrix: ${{fromJson(needs.determine-docker-package.outputs.matrix)}}
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/release-docker.yml@main
|
||||
with:
|
||||
package: ${{ matrix.package }}
|
||||
version_number: ${{ inputs.version_number }}
|
||||
test_run: ${{ inputs.test_run }}
|
||||
|
||||
slack-notification:
|
||||
name: Slack Notification
|
||||
if: ${{ failure() && (!inputs.test_run || inputs.nightly_release) }}
|
||||
@@ -219,6 +238,7 @@ jobs:
|
||||
build-test-package,
|
||||
github-release,
|
||||
pypi-release,
|
||||
docker-release,
|
||||
]
|
||||
|
||||
uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
|
||||
|
||||
58
.github/workflows/schema-check.yml
vendored
58
.github/workflows/schema-check.yml
vendored
@@ -13,20 +13,18 @@
|
||||
name: Artifact Schema Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ opened, reopened, labeled, unlabeled, synchronize ]
|
||||
paths-ignore: [ '.changes/**', '.github/**', 'tests/**', '**.md', '**.yml' ]
|
||||
|
||||
workflow_dispatch:
|
||||
pull_request: #TODO: remove before merging
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "*.latest"
|
||||
- "releases/*"
|
||||
|
||||
# no special access is needed
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
LATEST_SCHEMA_PATH: ${{ github.workspace }}/new_schemas
|
||||
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}//schema_schanges.txt
|
||||
SCHEMA_DIFF_ARTIFACT: ${{ github.workspace }}/schema_changes.txt
|
||||
DBT_REPO_DIRECTORY: ${{ github.workspace }}/dbt
|
||||
SCHEMA_REPO_DIRECTORY: ${{ github.workspace }}/schemas.getdbt.com
|
||||
|
||||
@@ -37,24 +35,41 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Checkout dbt repo
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.DBT_REPO_DIRECTORY }}
|
||||
|
||||
- name: Check for changes in core/dbt/artifacts
|
||||
# https://github.com/marketplace/actions/paths-changes-filter
|
||||
uses: dorny/paths-filter@v3
|
||||
id: check_artifact_changes
|
||||
with:
|
||||
filters: |
|
||||
artifacts_changed:
|
||||
- 'core/dbt/artifacts/**'
|
||||
list-files: shell
|
||||
working-directory: ${{ env.DBT_REPO_DIRECTORY }}
|
||||
|
||||
- name: Succeed if no artifacts have changed
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'false'
|
||||
run: |
|
||||
echo "No artifact changes found in core/dbt/artifacts. CI check passed."
|
||||
|
||||
- name: Checkout schemas.getdbt.com repo
|
||||
uses: actions/checkout@v3
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: dbt-labs/schemas.getdbt.com
|
||||
ref: 'main'
|
||||
ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
|
||||
path: ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
|
||||
- name: Generate current schema
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
|
||||
run: |
|
||||
cd ${{ env.DBT_REPO_DIRECTORY }}
|
||||
python3 -m venv env
|
||||
@@ -65,26 +80,17 @@ jobs:
|
||||
|
||||
# Copy generated schema files into the schemas.getdbt.com repo
|
||||
# Do a git diff to find any changes
|
||||
# Ignore any date or version changes though
|
||||
# Ignore any lines with date-like (yyyy-mm-dd) or version-like (x.y.z) changes
|
||||
- name: Compare schemas
|
||||
if: steps.check_artifact_changes.outputs.artifacts_changed == 'true'
|
||||
run: |
|
||||
cp -r ${{ env.LATEST_SCHEMA_PATH }}/dbt ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
cd ${{ env.SCHEMA_REPO_DIRECTORY }}
|
||||
diff_results=$(git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
|
||||
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' --compact-summary)
|
||||
if [[ $(echo diff_results) ]]; then
|
||||
echo $diff_results
|
||||
echo "Schema changes detected!"
|
||||
git diff -I='*[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T' \
|
||||
-I='*[0-9]{1}.[0-9]{2}.[0-9]{1}(rc[0-9]|b[0-9]| )' > ${{ env.SCHEMA_DIFF_ARTIFACT }}
|
||||
exit 1
|
||||
else
|
||||
echo "No schema changes detected"
|
||||
fi
|
||||
git diff -I='*[0-9]{4}-[0-9]{2}-[0-9]{2}' -I='*[0-9]+\.[0-9]+\.[0-9]+' --exit-code > ${{ env.SCHEMA_DIFF_ARTIFACT }}
|
||||
|
||||
- name: Upload schema diff
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
if: ${{ failure() && steps.check_artifact_changes.outputs.artifacts_changed == 'true' }}
|
||||
with:
|
||||
name: 'schema_schanges.txt'
|
||||
name: 'schema_changes.txt'
|
||||
path: '${{ env.SCHEMA_DIFF_ARTIFACT }}'
|
||||
|
||||
@@ -69,12 +69,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: checkout dev
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.8"
|
||||
|
||||
|
||||
8
.github/workflows/test-repeater.yml
vendored
8
.github/workflows/test-repeater.yml
vendored
@@ -36,7 +36,7 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- 'ubuntu-latest'
|
||||
- 'macos-latest'
|
||||
- 'macos-12'
|
||||
- 'windows-latest'
|
||||
num_runs_per_batch:
|
||||
description: 'Max number of times to run the test per batch. We always run 10 batches.'
|
||||
@@ -83,12 +83,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: "Setup Python"
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "${{ inputs.python_version }}"
|
||||
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
|
||||
# mac and windows don't use make due to limitations with docker with those runners in GitHub
|
||||
- name: "Set up postgres (macos)"
|
||||
if: inputs.os == 'macos-latest'
|
||||
if: inputs.os == 'macos-12'
|
||||
uses: ./.github/actions/setup-postgres-macos
|
||||
|
||||
- name: "Set up postgres (windows)"
|
||||
|
||||
1
.github/workflows/test/.actrc
vendored
1
.github/workflows/test/.actrc
vendored
@@ -1 +0,0 @@
|
||||
-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest
|
||||
1
.github/workflows/test/.gitignore
vendored
1
.github/workflows/test/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
.secrets
|
||||
1
.github/workflows/test/.secrets.EXAMPLE
vendored
1
.github/workflows/test/.secrets.EXAMPLE
vendored
@@ -1 +0,0 @@
|
||||
GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"inputs": {
|
||||
"version_number": "1.0.1",
|
||||
"package": "dbt-postgres"
|
||||
}
|
||||
}
|
||||
28
.github/workflows/version-bump.yml
vendored
28
.github/workflows/version-bump.yml
vendored
@@ -1,28 +0,0 @@
|
||||
# **what?**
|
||||
# This workflow will take the new version number to bump to. With that
|
||||
# it will run versionbump to update the version number everywhere in the
|
||||
# code base and then run changie to create the corresponding changelog.
|
||||
# A PR will be created with the changes that can be reviewed before committing.
|
||||
|
||||
# **why?**
|
||||
# This is to aid in releasing dbt and making sure we have updated
|
||||
# the version in all places and generated the changelog.
|
||||
|
||||
# **when?**
|
||||
# This is triggered manually
|
||||
|
||||
name: Version Bump
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version_number:
|
||||
description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
version_bump_and_changie:
|
||||
uses: dbt-labs/actions/.github/workflows/version-bump.yml@main
|
||||
with:
|
||||
version_number: ${{ inputs.version_number }}
|
||||
secrets: inherit # ok since what we are calling is internally maintained
|
||||
4
.isort.cfg
Normal file
4
.isort.cfg
Normal file
@@ -0,0 +1,4 @@
|
||||
[settings]
|
||||
profile=black
|
||||
extend_skip_glob=.github/*,third-party-stubs/*,scripts/*
|
||||
known_first_party=dbt,dbt_adapters,dbt_common,dbt_extractor,dbt_semantic_interface
|
||||
@@ -1,7 +1,7 @@
|
||||
# Configuration for pre-commit hooks (see https://pre-commit.com/).
|
||||
# Eventually the hooks described here will be run as tests before merging each PR.
|
||||
|
||||
exclude: ^(core/dbt/docs/build/|core/dbt/events/types_pb2.py)
|
||||
exclude: ^(core/dbt/docs/build/|core/dbt/common/events/types_pb2.py|core/dbt/events/core_types_pb2.py|core/dbt/adapters/events/adapter_types_pb2.py)
|
||||
|
||||
# Force all unspecified python hooks to run python 3.8
|
||||
default_language_version:
|
||||
@@ -19,6 +19,10 @@ repos:
|
||||
exclude_types:
|
||||
- "markdown"
|
||||
- id: check-case-conflict
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.3.0
|
||||
hooks:
|
||||
|
||||
@@ -26,12 +26,13 @@ Legacy tests are found in the 'test' directory:
|
||||
|
||||
The "tasks" map to top-level dbt commands. So `dbt run` => task.run.RunTask, etc. Some are more like abstract base classes (GraphRunnableTask, for example) but all the concrete types outside of task should map to tasks. Currently one executes at a time. The tasks kick off their “Runners” and those do execute in parallel. The parallelism is managed via a thread pool, in GraphRunnableTask.
|
||||
|
||||
core/dbt/include/index.html
|
||||
core/dbt/task/docs/index.html
|
||||
This is the docs website code. It comes from the dbt-docs repository, and is generated when a release is packaged.
|
||||
|
||||
## Adapters
|
||||
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc. For testing and development purposes, the dbt-postgres plugin lives alongside the dbt-core codebase, in the [`plugins`](plugins) subdirectory. Like other adapter plugins, it is a self-contained codebase and package that builds on top of dbt-core.
|
||||
dbt uses an adapter-plugin pattern to extend support to different databases, warehouses, query engines, etc.
|
||||
Note: dbt-postgres used to exist in dbt-core but is now in [its own repo](https://github.com/dbt-labs/dbt-postgres)
|
||||
|
||||
Each adapter is a mix of python, Jinja2, and SQL. The adapter code also makes heavy use of Jinja2 to wrap modular chunks of SQL functionality, define default implementations, and allow plugins to override it.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# dbt Core Changelog
|
||||
|
||||
- This file provides a full account of all changes to `dbt-core` and `dbt-postgres`
|
||||
- This file provides a full account of all changes to `dbt-core`
|
||||
- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
|
||||
- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
|
||||
- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry)
|
||||
@@ -10,6 +10,7 @@
|
||||
For information on prior major and minor releases, see their changelogs:
|
||||
|
||||
|
||||
* [1.8](https://github.com/dbt-labs/dbt-core/blob/1.8.latest/CHANGELOG.md)
|
||||
* [1.7](https://github.com/dbt-labs/dbt-core/blob/1.7.latest/CHANGELOG.md)
|
||||
* [1.6](https://github.com/dbt-labs/dbt-core/blob/1.6.latest/CHANGELOG.md)
|
||||
* [1.5](https://github.com/dbt-labs/dbt-core/blob/1.5.latest/CHANGELOG.md)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
6. [Debugging](#debugging)
|
||||
7. [Adding or modifying a changelog entry](#adding-or-modifying-a-changelog-entry)
|
||||
8. [Submitting a Pull Request](#submitting-a-pull-request)
|
||||
9. [Troubleshooting Tips](#troubleshooting-tips)
|
||||
|
||||
## About this document
|
||||
|
||||
@@ -21,10 +22,10 @@ If you get stuck, we're happy to help! Drop us a line in the `#dbt-core-developm
|
||||
|
||||
### Notes
|
||||
|
||||
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead. The sole exception is Postgres; the `dbt-postgres` plugin lives in this repository (`dbt-core`).
|
||||
- **Adapters:** Is your issue or proposed code change related to a specific [database adapter](https://docs.getdbt.com/docs/available-adapters)? If so, please open issues, PRs, and discussions in that adapter's repository instead.
|
||||
- **CLA:** Please note that anyone contributing code to `dbt-core` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements). If you are unable to sign the CLA, the `dbt-core` maintainers will unfortunately be unable to merge any of your Pull Requests. We welcome you to participate in discussions, open issues, and comment on existing ones.
|
||||
- **Branches:** All pull requests from community contributors should target the `main` branch (default). If the change is needed as a patch for a minor version of dbt that has already been released (or is already a release candidate), a maintainer will backport the changes in your PR to the relevant "latest" release branch (`1.0.latest`, `1.1.latest`, ...). If an issue fix applies to a release branch, that fix should be first committed to the development branch and then to the release branch (rarely release-branch fixes may not apply to `main`).
|
||||
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via pip, homebrew, and dbt Cloud.
|
||||
- **Releases**: Before releasing a new minor version of Core, we prepare a series of alphas and release candidates to allow users (especially employees of dbt Labs!) to test the new version in live environments. This is an important quality assurance step, as it exposes the new code to a wide variety of complicated deployments and can surface bugs before official release. Releases are accessible via our [supported installation methods](https://docs.getdbt.com/docs/core/installation-overview#install-dbt-core).
|
||||
|
||||
## Getting the code
|
||||
|
||||
@@ -44,9 +45,7 @@ If you are not a member of the `dbt-labs` GitHub organization, you can contribut
|
||||
|
||||
### dbt Labs contributors
|
||||
|
||||
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch. Branch names should be fixed by `CT-XXX/` where:
|
||||
* CT stands for 'core team'
|
||||
* XXX stands for a JIRA ticket number
|
||||
If you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-core` repo. Rather than forking `dbt-core` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
|
||||
|
||||
## Setting up an environment
|
||||
|
||||
@@ -171,9 +170,9 @@ Finally, you can also run a specific test or group of tests using [`pytest`](htt
|
||||
|
||||
```sh
|
||||
# run all unit tests in a file
|
||||
python3 -m pytest tests/unit/test_graph.py
|
||||
python3 -m pytest tests/unit/test_base_column.py
|
||||
# run a specific unit test
|
||||
python3 -m pytest tests/unit/test_graph.py::GraphTest::test__dependency_list
|
||||
python3 -m pytest tests/unit/test_base_column.py::TestNumericType::test__numeric_type
|
||||
# run specific Postgres functional tests
|
||||
python3 -m pytest tests/functional/sources
|
||||
```
|
||||
@@ -221,10 +220,12 @@ You don't need to worry about which `dbt-core` version your change will go into.
|
||||
|
||||
## Submitting a Pull Request
|
||||
|
||||
Code can be merged into the current development branch `main` by opening a pull request. A `dbt-core` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
|
||||
Code can be merged into the current development branch `main` by opening a pull request. If the proposal looks like it's on the right track, then a `dbt-core` maintainer will triage the PR and label it as `ready_for_review`. From this point, two code reviewers will be assigned with the aim of responding to any updates to the PR within about one week. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code. Once merged, your contribution will be available for the next release of `dbt-core`.
|
||||
|
||||
Automated tests run via GitHub Actions. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-core` repository trigger integration tests against Postgres. dbt Labs also provides CI environments in which to test changes to other adapters, triggered by PRs in those adapters' repositories, as well as periodic maintenance checks of each adapter in concert with the latest `dbt-core` code changes.
|
||||
|
||||
Once all tests are passing and your PR has been approved, a `dbt-core` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
|
||||
|
||||
## Troubleshooting Tips
|
||||
|
||||
Sometimes, the content license agreement auto-check bot doesn't find a user's entry in its roster. If you need to force a rerun, add `@cla-bot check` in a comment on the pull request.
|
||||
|
||||
19
Makefile
19
Makefile
@@ -30,17 +30,22 @@ CI_FLAGS =\
|
||||
.PHONY: dev_req
|
||||
dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
|
||||
@\
|
||||
pip install -r dev-requirements.txt
|
||||
pip install -r editable-requirements.txt
|
||||
pip install -r dev-requirements.txt -r editable-requirements.txt
|
||||
|
||||
.PHONY: dev
|
||||
dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit.
|
||||
@\
|
||||
pre-commit install
|
||||
|
||||
.PHONY: proto_types
|
||||
proto_types: ## generates google protobuf python file from types.proto
|
||||
protoc -I=./core/dbt/events --python_out=./core/dbt/events ./core/dbt/events/types.proto
|
||||
.PHONY: dev-uninstall
|
||||
dev-uninstall: ## Uninstall all packages in venv except for build tools
|
||||
@\
|
||||
pip freeze | grep -v "^-e" | cut -d "@" -f1 | xargs pip uninstall -y; \
|
||||
pip uninstall -y dbt-core
|
||||
|
||||
.PHONY: core_proto_types
|
||||
core_proto_types: ## generates google protobuf python file from core_types.proto
|
||||
protoc -I=./core/dbt/events --python_out=./core/dbt/events ./core/dbt/events/core_types.proto
|
||||
|
||||
.PHONY: mypy
|
||||
mypy: .env ## Runs mypy against staged changes for static type checking.
|
||||
@@ -77,12 +82,12 @@ test: .env ## Runs unit tests with py and code checks against staged changes.
|
||||
$(DOCKER_CMD) pre-commit run mypy-check --hook-stage manual | grep -v "INFO"
|
||||
|
||||
.PHONY: integration
|
||||
integration: .env ## Runs postgres integration tests with py-integration
|
||||
integration: .env ## Runs core integration tests using postgres with py-integration
|
||||
@\
|
||||
$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto
|
||||
|
||||
.PHONY: integration-fail-fast
|
||||
integration-fail-fast: .env ## Runs postgres integration tests with py-integration in "fail fast" mode.
|
||||
integration-fail-fast: .env ## Runs core integration tests using postgres with py-integration in "fail fast" mode.
|
||||
@\
|
||||
$(DOCKER_CMD) tox -e py-integration -- -x -nauto
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ These select statements, or "models", form a dbt project. Models frequently buil
|
||||
|
||||
## Getting started
|
||||
|
||||
- [Install dbt](https://docs.getdbt.com/docs/get-started/installation)
|
||||
- [Install dbt Core](https://docs.getdbt.com/docs/get-started/installation) or explore the [dbt Cloud CLI](https://docs.getdbt.com/docs/cloud/cloud-cli-installation), a command-line interface powered by [dbt Cloud](https://docs.getdbt.com/docs/cloud/about-cloud/dbt-cloud-features) that enhances collaboration.
|
||||
- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
|
||||
|
||||
## Join the dbt Community
|
||||
@@ -31,7 +31,7 @@ These select statements, or "models", form a dbt project. Models frequently buil
|
||||
|
||||
## Reporting bugs and contributing code
|
||||
|
||||
- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-core/issues/new)
|
||||
- Want to report a bug or request a feature? Let us know and open [an issue](https://github.com/dbt-labs/dbt-core/issues/new/choose)
|
||||
- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
1
SECURITY.md
Normal file
1
SECURITY.md
Normal file
@@ -0,0 +1 @@
|
||||
[About dbt Core versions](https://docs.getdbt.com/docs/dbt-versions/core)
|
||||
@@ -1,2 +1,3 @@
|
||||
recursive-include dbt/include *.py *.sql *.yml *.html *.md .gitkeep .gitignore
|
||||
include dbt/py.typed
|
||||
recursive-include dbt/task/docs *.html
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Adapters README
|
||||
|
||||
The Adapters module is responsible for defining database connection methods, caching information from databases, how relations are defined, and the two major connection types we have - base and sql.
|
||||
|
||||
# Directories
|
||||
|
||||
## `base`
|
||||
|
||||
Defines the base implementation Adapters can use to build out full functionality.
|
||||
|
||||
## `sql`
|
||||
|
||||
Defines a sql implementation for adapters that initially inherits the above base implementation and comes with some premade methods and macros that can be overwritten as needed per adapter. (most common type of adapter.)
|
||||
|
||||
# Files
|
||||
|
||||
## `cache.py`
|
||||
|
||||
Cached information from the database.
|
||||
|
||||
## `factory.py`
|
||||
Defines how we generate adapter objects
|
||||
|
||||
## `protocol.py`
|
||||
|
||||
Defines various interfaces for various adapter objects. Helps mypy correctly resolve methods.
|
||||
|
||||
## `reference_keys.py`
|
||||
|
||||
Configures naming scheme for cache elements to be universal.
|
||||
@@ -1,7 +0,0 @@
|
||||
# N.B.
|
||||
# This will add to the package’s __path__ all subdirectories of directories on sys.path named after the package which effectively combines both modules into a single namespace (dbt.adapters)
|
||||
# The matching statement is in plugins/postgres/dbt/adapters/__init__.py
|
||||
|
||||
from pkgutil import extend_path
|
||||
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
@@ -1,10 +0,0 @@
|
||||
|
||||
## Base adapters
|
||||
|
||||
### impl.py
|
||||
|
||||
The class `SQLAdapter` in [base/imply.py](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/adapters/base/impl.py) is a (mostly) abstract object that adapter objects inherit from. The base class scaffolds out methods that every adapter project usually should implement for smooth communication between dbt and database.
|
||||
|
||||
Some target databases require more or fewer methods--it all depends on what the warehouse's featureset is.
|
||||
|
||||
Look into the class for function-level comments.
|
||||
@@ -1,19 +0,0 @@
|
||||
# these are all just exports, #noqa them so flake8 will be happy
|
||||
|
||||
# TODO: Should we still include this in the `adapters` namespace?
|
||||
from dbt.contracts.connection import Credentials # noqa: F401
|
||||
from dbt.adapters.base.meta import available # noqa: F401
|
||||
from dbt.adapters.base.connections import BaseConnectionManager # noqa: F401
|
||||
from dbt.adapters.base.relation import ( # noqa: F401
|
||||
BaseRelation,
|
||||
RelationType,
|
||||
SchemaSearchMap,
|
||||
)
|
||||
from dbt.adapters.base.column import Column # noqa: F401
|
||||
from dbt.adapters.base.impl import ( # noqa: F401
|
||||
AdapterConfig,
|
||||
BaseAdapter,
|
||||
PythonJobHelper,
|
||||
ConstraintSupport,
|
||||
)
|
||||
from dbt.adapters.base.plugin import AdapterPlugin # noqa: F401
|
||||
@@ -1,161 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
import re
|
||||
from typing import Dict, ClassVar, Any, Optional
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
|
||||
|
||||
@dataclass
|
||||
class Column:
|
||||
# Note: This is automatically used by contract code
|
||||
# No-op conversions (INTEGER => INT) have been removed.
|
||||
# Any adapter that wants to take advantage of "translate_type"
|
||||
# should create a ClassVar with the appropriate conversions.
|
||||
TYPE_LABELS: ClassVar[Dict[str, str]] = {
|
||||
"STRING": "TEXT",
|
||||
}
|
||||
column: str
|
||||
dtype: str
|
||||
char_size: Optional[int] = None
|
||||
numeric_precision: Optional[Any] = None
|
||||
numeric_scale: Optional[Any] = None
|
||||
|
||||
@classmethod
|
||||
def translate_type(cls, dtype: str) -> str:
|
||||
return cls.TYPE_LABELS.get(dtype.upper(), dtype)
|
||||
|
||||
@classmethod
|
||||
def create(cls, name, label_or_dtype: str) -> "Column":
|
||||
column_type = cls.translate_type(label_or_dtype)
|
||||
return cls(name, column_type)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self.column
|
||||
|
||||
@property
|
||||
def quoted(self) -> str:
|
||||
return '"{}"'.format(self.column)
|
||||
|
||||
@property
|
||||
def data_type(self) -> str:
|
||||
if self.is_string():
|
||||
return self.string_type(self.string_size())
|
||||
elif self.is_numeric():
|
||||
return self.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
|
||||
else:
|
||||
return self.dtype
|
||||
|
||||
def is_string(self) -> bool:
|
||||
return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
|
||||
|
||||
def is_number(self):
|
||||
return any([self.is_integer(), self.is_numeric(), self.is_float()])
|
||||
|
||||
def is_float(self):
|
||||
return self.dtype.lower() in [
|
||||
# floats
|
||||
"real",
|
||||
"float4",
|
||||
"float",
|
||||
"double precision",
|
||||
"float8",
|
||||
"double",
|
||||
]
|
||||
|
||||
def is_integer(self) -> bool:
|
||||
return self.dtype.lower() in [
|
||||
# real types
|
||||
"smallint",
|
||||
"integer",
|
||||
"bigint",
|
||||
"smallserial",
|
||||
"serial",
|
||||
"bigserial",
|
||||
# aliases
|
||||
"int2",
|
||||
"int4",
|
||||
"int8",
|
||||
"serial2",
|
||||
"serial4",
|
||||
"serial8",
|
||||
]
|
||||
|
||||
def is_numeric(self) -> bool:
|
||||
return self.dtype.lower() in ["numeric", "decimal"]
|
||||
|
||||
def string_size(self) -> int:
|
||||
if not self.is_string():
|
||||
raise DbtRuntimeError("Called string_size() on non-string field!")
|
||||
|
||||
if self.dtype == "text" or self.char_size is None:
|
||||
# char_size should never be None. Handle it reasonably just in case
|
||||
return 256
|
||||
else:
|
||||
return int(self.char_size)
|
||||
|
||||
def can_expand_to(self, other_column: "Column") -> bool:
|
||||
"""returns True if this column can be expanded to the size of the
|
||||
other column"""
|
||||
if not self.is_string() or not other_column.is_string():
|
||||
return False
|
||||
|
||||
return other_column.string_size() > self.string_size()
|
||||
|
||||
def literal(self, value: Any) -> str:
|
||||
return "{}::{}".format(value, self.data_type)
|
||||
|
||||
@classmethod
|
||||
def string_type(cls, size: int) -> str:
|
||||
return "character varying({})".format(size)
|
||||
|
||||
@classmethod
|
||||
def numeric_type(cls, dtype: str, precision: Any, scale: Any) -> str:
|
||||
# This could be decimal(...), numeric(...), number(...)
|
||||
# Just use whatever was fed in here -- don't try to get too clever
|
||||
if precision is None or scale is None:
|
||||
return dtype
|
||||
else:
|
||||
return "{}({},{})".format(dtype, precision, scale)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<Column {} ({})>".format(self.name, self.data_type)
|
||||
|
||||
@classmethod
|
||||
def from_description(cls, name: str, raw_data_type: str) -> "Column":
|
||||
match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
|
||||
if match is None:
|
||||
raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"')
|
||||
data_type, size_info = match.groups()
|
||||
char_size = None
|
||||
numeric_precision = None
|
||||
numeric_scale = None
|
||||
if size_info is not None:
|
||||
# strip out the parentheses
|
||||
size_info = size_info[1:-1]
|
||||
parts = size_info.split(",")
|
||||
if len(parts) == 1:
|
||||
try:
|
||||
char_size = int(parts[0])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[0]}" to an integer'
|
||||
)
|
||||
elif len(parts) == 2:
|
||||
try:
|
||||
numeric_precision = int(parts[0])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[0]}" to an integer'
|
||||
)
|
||||
try:
|
||||
numeric_scale = int(parts[1])
|
||||
except ValueError:
|
||||
raise DbtRuntimeError(
|
||||
f'Could not interpret data_type "{raw_data_type}": '
|
||||
f'could not convert "{parts[1]}" to an integer'
|
||||
)
|
||||
|
||||
return cls(name, data_type, char_size, numeric_precision, numeric_scale)
|
||||
@@ -1,435 +0,0 @@
|
||||
import abc
|
||||
import os
|
||||
from time import sleep
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
# multiprocessing.RLock is a function returning this type
|
||||
from multiprocessing.synchronize import RLock
|
||||
from threading import get_ident
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Tuple,
|
||||
Hashable,
|
||||
Optional,
|
||||
ContextManager,
|
||||
List,
|
||||
Type,
|
||||
Union,
|
||||
Iterable,
|
||||
Callable,
|
||||
)
|
||||
|
||||
import agate
|
||||
|
||||
import dbt.exceptions
|
||||
from dbt.contracts.connection import (
|
||||
Connection,
|
||||
Identifier,
|
||||
ConnectionState,
|
||||
AdapterRequiredConfig,
|
||||
LazyHandle,
|
||||
AdapterResponse,
|
||||
)
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.adapters.base.query_headers import (
|
||||
MacroQueryStringSetter,
|
||||
)
|
||||
from dbt.events import AdapterLogger
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import (
|
||||
NewConnection,
|
||||
ConnectionReused,
|
||||
ConnectionLeftOpenInCleanup,
|
||||
ConnectionLeftOpen,
|
||||
ConnectionClosedInCleanup,
|
||||
ConnectionClosed,
|
||||
Rollback,
|
||||
RollbackFailed,
|
||||
)
|
||||
from dbt.events.contextvars import get_node_info
|
||||
from dbt import flags
|
||||
from dbt.utils import cast_to_str
|
||||
|
||||
SleepTime = Union[int, float] # As taken by time.sleep.
|
||||
AdapterHandle = Any # Adapter connection handle objects can be any class.
|
||||
|
||||
|
||||
class BaseConnectionManager(metaclass=abc.ABCMeta):
|
||||
"""Methods to implement:
|
||||
- exception_handler
|
||||
- cancel_open
|
||||
- open
|
||||
- begin
|
||||
- commit
|
||||
- clear_transaction
|
||||
- execute
|
||||
|
||||
You must also set the 'TYPE' class attribute with a class-unique constant
|
||||
string.
|
||||
"""
|
||||
|
||||
TYPE: str = NotImplemented
|
||||
|
||||
def __init__(self, profile: AdapterRequiredConfig) -> None:
|
||||
self.profile = profile
|
||||
self.thread_connections: Dict[Hashable, Connection] = {}
|
||||
self.lock: RLock = flags.MP_CONTEXT.RLock()
|
||||
self.query_header: Optional[MacroQueryStringSetter] = None
|
||||
|
||||
def set_query_header(self, manifest: Manifest) -> None:
|
||||
self.query_header = MacroQueryStringSetter(self.profile, manifest)
|
||||
|
||||
@staticmethod
|
||||
def get_thread_identifier() -> Hashable:
|
||||
# note that get_ident() may be re-used, but we should never experience
|
||||
# that within a single process
|
||||
return (os.getpid(), get_ident())
|
||||
|
||||
def get_thread_connection(self) -> Connection:
|
||||
key = self.get_thread_identifier()
|
||||
with self.lock:
|
||||
if key not in self.thread_connections:
|
||||
raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections))
|
||||
return self.thread_connections[key]
|
||||
|
||||
def set_thread_connection(self, conn: Connection) -> None:
|
||||
key = self.get_thread_identifier()
|
||||
if key in self.thread_connections:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
"In set_thread_connection, existing connection exists for {}"
|
||||
)
|
||||
self.thread_connections[key] = conn
|
||||
|
||||
def get_if_exists(self) -> Optional[Connection]:
|
||||
key = self.get_thread_identifier()
|
||||
with self.lock:
|
||||
return self.thread_connections.get(key)
|
||||
|
||||
def clear_thread_connection(self) -> None:
|
||||
key = self.get_thread_identifier()
|
||||
with self.lock:
|
||||
if key in self.thread_connections:
|
||||
del self.thread_connections[key]
|
||||
|
||||
def clear_transaction(self) -> None:
|
||||
"""Clear any existing transactions."""
|
||||
conn = self.get_thread_connection()
|
||||
if conn is not None:
|
||||
if conn.transaction_open:
|
||||
self._rollback(conn)
|
||||
self.begin()
|
||||
self.commit()
|
||||
|
||||
def rollback_if_open(self) -> None:
|
||||
conn = self.get_if_exists()
|
||||
if conn is not None and conn.handle and conn.transaction_open:
|
||||
self._rollback(conn)
|
||||
|
||||
@abc.abstractmethod
|
||||
def exception_handler(self, sql: str) -> ContextManager:
|
||||
"""Create a context manager that handles exceptions caused by database
|
||||
interactions.
|
||||
|
||||
:param str sql: The SQL string that the block inside the context
|
||||
manager is executing.
|
||||
:return: A context manager that handles exceptions raised by the
|
||||
underlying database.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`exception_handler` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
def set_connection_name(self, name: Optional[str] = None) -> Connection:
|
||||
"""Called by 'acquire_connection' in BaseAdapter, which is called by
|
||||
'connection_named', called by 'connection_for(node)'.
|
||||
Creates a connection for this thread if one doesn't already
|
||||
exist, and will rename an existing connection."""
|
||||
|
||||
conn_name: str = "master" if name is None else name
|
||||
|
||||
# Get a connection for this thread
|
||||
conn = self.get_if_exists()
|
||||
|
||||
if conn and conn.name == conn_name and conn.state == "open":
|
||||
# Found a connection and nothing to do, so just return it
|
||||
return conn
|
||||
|
||||
if conn is None:
|
||||
# Create a new connection
|
||||
conn = Connection(
|
||||
type=Identifier(self.TYPE),
|
||||
name=conn_name,
|
||||
state=ConnectionState.INIT,
|
||||
transaction_open=False,
|
||||
handle=None,
|
||||
credentials=self.profile.credentials,
|
||||
)
|
||||
conn.handle = LazyHandle(self.open)
|
||||
# Add the connection to thread_connections for this thread
|
||||
self.set_thread_connection(conn)
|
||||
fire_event(
|
||||
NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info())
|
||||
)
|
||||
else: # existing connection either wasn't open or didn't have the right name
|
||||
if conn.state != "open":
|
||||
conn.handle = LazyHandle(self.open)
|
||||
if conn.name != conn_name:
|
||||
orig_conn_name: str = conn.name or ""
|
||||
conn.name = conn_name
|
||||
fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name))
|
||||
|
||||
return conn
|
||||
|
||||
@classmethod
|
||||
def retry_connection(
|
||||
cls,
|
||||
connection: Connection,
|
||||
connect: Callable[[], AdapterHandle],
|
||||
logger: AdapterLogger,
|
||||
retryable_exceptions: Iterable[Type[Exception]],
|
||||
retry_limit: int = 1,
|
||||
retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1,
|
||||
_attempts: int = 0,
|
||||
) -> Connection:
|
||||
"""Given a Connection, set its handle by calling connect.
|
||||
|
||||
The calls to connect will be retried up to retry_limit times to deal with transient
|
||||
connection errors. By default, one retry will be attempted if retryable_exceptions is set.
|
||||
|
||||
:param Connection connection: An instance of a Connection that needs a handle to be set,
|
||||
usually when attempting to open it.
|
||||
:param connect: A callable that returns the appropiate connection handle for a
|
||||
given adapter. This callable will be retried retry_limit times if a subclass of any
|
||||
Exception in retryable_exceptions is raised by connect.
|
||||
:type connect: Callable[[], AdapterHandle]
|
||||
:param AdapterLogger logger: A logger to emit messages on retry attempts or errors. When
|
||||
handling expected errors, we call debug, and call warning on unexpected errors or when
|
||||
all retry attempts have been exhausted.
|
||||
:param retryable_exceptions: An iterable of exception classes that if raised by
|
||||
connect should trigger a retry.
|
||||
:type retryable_exceptions: Iterable[Type[Exception]]
|
||||
:param int retry_limit: How many times to retry the call to connect. If this limit
|
||||
is exceeded before a successful call, a FailedToConnectError will be raised.
|
||||
Must be non-negative.
|
||||
:param retry_timeout: Time to wait between attempts to connect. Can also take a
|
||||
Callable that takes the number of attempts so far, beginning at 0, and returns an int
|
||||
or float to be passed to time.sleep.
|
||||
:type retry_timeout: Union[Callable[[int], SleepTime], SleepTime] = 1
|
||||
:param int _attempts: Parameter used to keep track of the number of attempts in calling the
|
||||
connect function across recursive calls. Passed as an argument to retry_timeout if it
|
||||
is a Callable. This parameter should not be set by the initial caller.
|
||||
:raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without
|
||||
successfully acquiring a handle.
|
||||
:return: The given connection with its appropriate state and handle attributes set
|
||||
depending on whether we successfully acquired a handle or not.
|
||||
"""
|
||||
timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout
|
||||
if timeout < 0:
|
||||
raise dbt.exceptions.FailedToConnectError(
|
||||
"retry_timeout cannot be negative or return a negative time."
|
||||
)
|
||||
|
||||
if retry_limit < 0 or retry_limit > sys.getrecursionlimit():
|
||||
# This guard is not perfect others may add to the recursion limit (e.g. built-ins).
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative")
|
||||
|
||||
try:
|
||||
connection.handle = connect()
|
||||
connection.state = ConnectionState.OPEN
|
||||
return connection
|
||||
|
||||
except tuple(retryable_exceptions) as e:
|
||||
if retry_limit <= 0:
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError(str(e))
|
||||
|
||||
logger.debug(
|
||||
f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n"
|
||||
f"{retry_limit} attempts remaining. Retrying in {timeout} seconds.\n"
|
||||
f"Error:\n{e}"
|
||||
)
|
||||
|
||||
sleep(timeout)
|
||||
return cls.retry_connection(
|
||||
connection=connection,
|
||||
connect=connect,
|
||||
logger=logger,
|
||||
retry_limit=retry_limit - 1,
|
||||
retry_timeout=retry_timeout,
|
||||
retryable_exceptions=retryable_exceptions,
|
||||
_attempts=_attempts + 1,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
connection.handle = None
|
||||
connection.state = ConnectionState.FAIL
|
||||
raise dbt.exceptions.FailedToConnectError(str(e))
|
||||
|
||||
@abc.abstractmethod
|
||||
def cancel_open(self) -> Optional[List[str]]:
|
||||
"""Cancel all open connections on the adapter. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`cancel_open` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@abc.abstractmethod
|
||||
def open(cls, connection: Connection) -> Connection:
|
||||
"""Open the given connection on the adapter and return it.
|
||||
|
||||
This may mutate the given connection (in particular, its state and its
|
||||
handle).
|
||||
|
||||
This should be thread-safe, or hold the lock if necessary. The given
|
||||
connection should not be in either in_use or available.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!")
|
||||
|
||||
def release(self) -> None:
|
||||
with self.lock:
|
||||
conn = self.get_if_exists()
|
||||
if conn is None:
|
||||
return
|
||||
|
||||
try:
|
||||
# always close the connection. close() calls _rollback() if there
|
||||
# is an open transaction
|
||||
self.close(conn)
|
||||
except Exception:
|
||||
# if rollback or close failed, remove our busted connection
|
||||
self.clear_thread_connection()
|
||||
raise
|
||||
|
||||
def cleanup_all(self) -> None:
|
||||
with self.lock:
|
||||
for connection in self.thread_connections.values():
|
||||
if connection.state not in {"closed", "init"}:
|
||||
fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name)))
|
||||
else:
|
||||
fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name)))
|
||||
self.close(connection)
|
||||
|
||||
# garbage collect these connections
|
||||
self.thread_connections.clear()
|
||||
|
||||
@abc.abstractmethod
|
||||
def begin(self) -> None:
|
||||
"""Begin a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!")
|
||||
|
||||
@abc.abstractmethod
|
||||
def commit(self) -> None:
|
||||
"""Commit a transaction. (passable)"""
|
||||
raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!")
|
||||
|
||||
@classmethod
|
||||
def _rollback_handle(cls, connection: Connection) -> None:
|
||||
"""Perform the actual rollback operation."""
|
||||
try:
|
||||
connection.handle.rollback()
|
||||
except Exception:
|
||||
fire_event(
|
||||
RollbackFailed(
|
||||
conn_name=cast_to_str(connection.name),
|
||||
exc_info=traceback.format_exc(),
|
||||
node_info=get_node_info(),
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _close_handle(cls, connection: Connection) -> None:
|
||||
"""Perform the actual close operation."""
|
||||
# On windows, sometimes connection handles don't have a close() attr.
|
||||
if hasattr(connection.handle, "close"):
|
||||
fire_event(
|
||||
ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info())
|
||||
)
|
||||
connection.handle.close()
|
||||
else:
|
||||
fire_event(
|
||||
ConnectionLeftOpen(
|
||||
conn_name=cast_to_str(connection.name), node_info=get_node_info()
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _rollback(cls, connection: Connection) -> None:
|
||||
"""Roll back the given connection."""
|
||||
if connection.transaction_open is False:
|
||||
raise dbt.exceptions.DbtInternalError(
|
||||
f"Tried to rollback transaction on connection "
|
||||
f'"{connection.name}", but it does not have one open!'
|
||||
)
|
||||
|
||||
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
|
||||
cls._rollback_handle(connection)
|
||||
|
||||
connection.transaction_open = False
|
||||
|
||||
@classmethod
|
||||
def close(cls, connection: Connection) -> Connection:
|
||||
# if the connection is in closed or init, there's nothing to do
|
||||
if connection.state in {ConnectionState.CLOSED, ConnectionState.INIT}:
|
||||
return connection
|
||||
|
||||
if connection.transaction_open and connection.handle:
|
||||
fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info()))
|
||||
cls._rollback_handle(connection)
|
||||
connection.transaction_open = False
|
||||
|
||||
cls._close_handle(connection)
|
||||
connection.state = ConnectionState.CLOSED
|
||||
|
||||
return connection
|
||||
|
||||
def commit_if_has_connection(self) -> None:
|
||||
"""If the named connection exists, commit the current transaction."""
|
||||
connection = self.get_if_exists()
|
||||
if connection:
|
||||
self.commit()
|
||||
|
||||
def _add_query_comment(self, sql: str) -> str:
|
||||
if self.query_header is None:
|
||||
return sql
|
||||
return self.query_header.add(sql)
|
||||
|
||||
@abc.abstractmethod
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False, limit: Optional[int] = None
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
"""Execute the given SQL.
|
||||
|
||||
:param str sql: The sql to execute.
|
||||
:param bool auto_begin: If set, and dbt is not currently inside a
|
||||
transaction, automatically begin one.
|
||||
:param bool fetch: If set, fetch results.
|
||||
:param int limit: If set, limits the result set
|
||||
:return: A tuple of the query status and results (empty if fetch=False).
|
||||
:rtype: Tuple[AdapterResponse, agate.Table]
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!")
|
||||
|
||||
def add_select_query(self, sql: str) -> Tuple[Connection, Any]:
|
||||
"""
|
||||
This was added here because base.impl.BaseAdapter.get_column_schema_from_query expects it to be here.
|
||||
That method wouldn't work unless the adapter used sql.impl.SQLAdapter, sql.connections.SQLConnectionManager
|
||||
or defined this method on <Adapter>ConnectionManager before passing it in to <Adapter>Adapter.
|
||||
|
||||
See https://github.com/dbt-labs/dbt-core/issues/8396 for more information.
|
||||
"""
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`add_select_query` is not implemented for this adapter!"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def data_type_code_to_name(cls, type_code: Union[int, str]) -> str:
|
||||
"""Get the string representation of the data type from the type_code."""
|
||||
# https://peps.python.org/pep-0249/#type-objects
|
||||
raise dbt.exceptions.NotImplementedError(
|
||||
"`data_type_code_to_name` is not implemented for this adapter!"
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,128 +0,0 @@
|
||||
import abc
|
||||
from functools import wraps
|
||||
from typing import Callable, Optional, Any, FrozenSet, Dict, Set
|
||||
|
||||
from dbt.deprecations import warn, renamed_method
|
||||
|
||||
|
||||
Decorator = Callable[[Any], Callable]
|
||||
|
||||
|
||||
class _Available:
|
||||
def __call__(self, func: Callable) -> Callable:
|
||||
func._is_available_ = True # type: ignore
|
||||
return func
|
||||
|
||||
def parse(self, parse_replacement: Callable) -> Decorator:
|
||||
"""A decorator factory to indicate that a method on the adapter will be
|
||||
exposed to the database wrapper, and will be stubbed out at parse time
|
||||
with the given function.
|
||||
|
||||
@available.parse()
|
||||
def my_method(self, a, b):
|
||||
if something:
|
||||
return None
|
||||
return big_expensive_db_query()
|
||||
|
||||
@available.parse(lambda *args, **args: {})
|
||||
def my_other_method(self, a, b):
|
||||
x = {}
|
||||
x.update(big_expensive_db_query())
|
||||
return x
|
||||
"""
|
||||
|
||||
def inner(func):
|
||||
func._parse_replacement_ = parse_replacement
|
||||
return self(func)
|
||||
|
||||
return inner
|
||||
|
||||
def deprecated(
|
||||
self, supported_name: str, parse_replacement: Optional[Callable] = None
|
||||
) -> Decorator:
|
||||
"""A decorator that marks a function as available, but also prints a
|
||||
deprecation warning. Use like
|
||||
|
||||
@available.deprecated('my_new_method')
|
||||
def my_old_method(self, arg):
|
||||
args = compatability_shim(arg)
|
||||
return self.my_new_method(*args)
|
||||
|
||||
@available.deprecated('my_new_slow_method', lambda *a, **k: (0, ''))
|
||||
def my_old_slow_method(self, arg):
|
||||
args = compatibility_shim(arg)
|
||||
return self.my_new_slow_method(*args)
|
||||
|
||||
To make `adapter.my_old_method` available but also print out a warning
|
||||
on use directing users to `my_new_method`.
|
||||
|
||||
The optional parse_replacement, if provided, will provide a parse-time
|
||||
replacement for the actual method (see `available.parse`).
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
func_name = func.__name__
|
||||
renamed_method(func_name, supported_name)
|
||||
|
||||
@wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
warn("adapter:{}".format(func_name))
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if parse_replacement:
|
||||
available_function = self.parse(parse_replacement)
|
||||
else:
|
||||
available_function = self
|
||||
return available_function(inner)
|
||||
|
||||
return wrapper
|
||||
|
||||
def parse_none(self, func: Callable) -> Callable:
|
||||
wrapper = self.parse(lambda *a, **k: None)
|
||||
return wrapper(func)
|
||||
|
||||
def parse_list(self, func: Callable) -> Callable:
|
||||
wrapper = self.parse(lambda *a, **k: [])
|
||||
return wrapper(func)
|
||||
|
||||
|
||||
available = _Available()
|
||||
|
||||
|
||||
class AdapterMeta(abc.ABCMeta):
|
||||
_available_: FrozenSet[str]
|
||||
_parse_replacements_: Dict[str, Callable]
|
||||
|
||||
def __new__(mcls, name, bases, namespace, **kwargs) -> "AdapterMeta":
|
||||
# mypy does not like the `**kwargs`. But `ABCMeta` itself takes
|
||||
# `**kwargs` in its argspec here (and passes them to `type.__new__`.
|
||||
# I'm not sure there is any benefit to it after poking around a bit,
|
||||
# but having it doesn't hurt on the python side (and omitting it could
|
||||
# hurt for obscure metaclass reasons, for all I know)
|
||||
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) # type: ignore
|
||||
|
||||
# this is very much inspired by ABCMeta's own implementation
|
||||
|
||||
# dict mapping the method name to whether the model name should be
|
||||
# injected into the arguments. All methods in here are exposed to the
|
||||
# context.
|
||||
available: Set[str] = set()
|
||||
replacements: Dict[str, Any] = {}
|
||||
|
||||
# collect base class data first
|
||||
for base in bases:
|
||||
available.update(getattr(base, "_available_", set()))
|
||||
replacements.update(getattr(base, "_parse_replacements_", set()))
|
||||
|
||||
# override with local data if it exists
|
||||
for name, value in namespace.items():
|
||||
if getattr(value, "_is_available_", False):
|
||||
available.add(name)
|
||||
parse_replacement = getattr(value, "_parse_replacement_", None)
|
||||
if parse_replacement is not None:
|
||||
replacements[name] = parse_replacement
|
||||
|
||||
cls._available_ = frozenset(available)
|
||||
# should this be a namedtuple so it will be immutable like _available_?
|
||||
cls._parse_replacements_ = replacements
|
||||
return cls
|
||||
@@ -1,42 +0,0 @@
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from dbt.adapters.base import Credentials
|
||||
from dbt.exceptions import CompilationError
|
||||
from dbt.adapters.protocol import AdapterProtocol
|
||||
|
||||
|
||||
def project_name_from_path(include_path: str) -> str:
|
||||
# avoid an import cycle
|
||||
from dbt.config.project import PartialProject
|
||||
|
||||
partial = PartialProject.from_project_root(include_path)
|
||||
if partial.project_name is None:
|
||||
raise CompilationError(f"Invalid project at {include_path}: name not set!")
|
||||
return partial.project_name
|
||||
|
||||
|
||||
class AdapterPlugin:
|
||||
"""Defines the basic requirements for a dbt adapter plugin.
|
||||
|
||||
:param include_path: The path to this adapter plugin's root
|
||||
:param dependencies: A list of adapter names that this adapter depends
|
||||
upon.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
adapter: Type[AdapterProtocol],
|
||||
credentials: Type[Credentials],
|
||||
include_path: str,
|
||||
dependencies: Optional[List[str]] = None,
|
||||
) -> None:
|
||||
|
||||
self.adapter: Type[AdapterProtocol] = adapter
|
||||
self.credentials: Type[Credentials] = credentials
|
||||
self.include_path: str = include_path
|
||||
self.project_name: str = project_name_from_path(include_path)
|
||||
self.dependencies: List[str]
|
||||
if dependencies is None:
|
||||
self.dependencies = []
|
||||
else:
|
||||
self.dependencies = dependencies
|
||||
@@ -1,102 +0,0 @@
|
||||
from threading import local
|
||||
from typing import Optional, Callable, Dict, Any
|
||||
|
||||
from dbt.clients.jinja import QueryStringGenerator
|
||||
|
||||
from dbt.context.manifest import generate_query_header_context
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, QueryComment
|
||||
from dbt.contracts.graph.nodes import ResultNode
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
|
||||
|
||||
class NodeWrapper:
|
||||
def __init__(self, node) -> None:
|
||||
self._inner_node = node
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._inner_node, name, "")
|
||||
|
||||
|
||||
class _QueryComment(local):
|
||||
"""A thread-local class storing thread-specific state information for
|
||||
connection management, namely:
|
||||
- the current thread's query comment.
|
||||
- a source_name indicating what set the current thread's query comment
|
||||
"""
|
||||
|
||||
def __init__(self, initial) -> None:
|
||||
self.query_comment: Optional[str] = initial
|
||||
self.append: bool = False
|
||||
|
||||
def add(self, sql: str) -> str:
|
||||
if not self.query_comment:
|
||||
return sql
|
||||
|
||||
if self.append:
|
||||
# replace last ';' with '<comment>;'
|
||||
sql = sql.rstrip()
|
||||
if sql[-1] == ";":
|
||||
sql = sql[:-1]
|
||||
return "{}\n/* {} */;".format(sql, self.query_comment.strip())
|
||||
|
||||
return "{}\n/* {} */".format(sql, self.query_comment.strip())
|
||||
|
||||
return "/* {} */\n{}".format(self.query_comment.strip(), sql)
|
||||
|
||||
def set(self, comment: Optional[str], append: bool):
|
||||
if isinstance(comment, str) and "*/" in comment:
|
||||
# tell the user "no" so they don't hurt themselves by writing
|
||||
# garbage
|
||||
raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}')
|
||||
self.query_comment = comment
|
||||
self.append = append
|
||||
|
||||
|
||||
QueryStringFunc = Callable[[str, Optional[NodeWrapper]], str]
|
||||
|
||||
|
||||
class MacroQueryStringSetter:
|
||||
def __init__(self, config: AdapterRequiredConfig, manifest: Manifest) -> None:
|
||||
self.manifest = manifest
|
||||
self.config = config
|
||||
|
||||
comment_macro = self._get_comment_macro()
|
||||
self.generator: QueryStringFunc = lambda name, model: ""
|
||||
# if the comment value was None or the empty string, just skip it
|
||||
if comment_macro:
|
||||
assert isinstance(comment_macro, str)
|
||||
macro = "\n".join(
|
||||
(
|
||||
"{%- macro query_comment_macro(connection_name, node) -%}",
|
||||
comment_macro,
|
||||
"{% endmacro %}",
|
||||
)
|
||||
)
|
||||
ctx = self._get_context()
|
||||
self.generator = QueryStringGenerator(macro, ctx)
|
||||
self.comment = _QueryComment(None)
|
||||
self.reset()
|
||||
|
||||
def _get_comment_macro(self) -> Optional[str]:
|
||||
return self.config.query_comment.comment
|
||||
|
||||
def _get_context(self) -> Dict[str, Any]:
|
||||
return generate_query_header_context(self.config, self.manifest)
|
||||
|
||||
def add(self, sql: str) -> str:
|
||||
return self.comment.add(sql)
|
||||
|
||||
def reset(self):
|
||||
self.set("master", None)
|
||||
|
||||
def set(self, name: str, node: Optional[ResultNode]):
|
||||
wrapped: Optional[NodeWrapper] = None
|
||||
if node is not None:
|
||||
wrapped = NodeWrapper(node)
|
||||
comment_str = self.generator(name, wrapped)
|
||||
|
||||
append = False
|
||||
if isinstance(self.config.query_comment, QueryComment):
|
||||
append = self.config.query_comment.append
|
||||
self.comment.set(comment_str, append)
|
||||
@@ -1,485 +0,0 @@
|
||||
from collections.abc import Hashable
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set, Union, FrozenSet
|
||||
|
||||
from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode
|
||||
from dbt.contracts.relation import (
|
||||
RelationType,
|
||||
ComponentName,
|
||||
HasQuoting,
|
||||
FakeAPIObject,
|
||||
Policy,
|
||||
Path,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
ApproximateMatchError,
|
||||
DbtInternalError,
|
||||
MultipleDatabasesNotAllowedError,
|
||||
)
|
||||
from dbt.node_types import NodeType
|
||||
from dbt.utils import filter_null_values, deep_merge, classproperty
|
||||
|
||||
import dbt.exceptions
|
||||
|
||||
|
||||
Self = TypeVar("Self", bound="BaseRelation")
|
||||
SerializableIterable = Union[Tuple, FrozenSet]
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=False, repr=False)
|
||||
class BaseRelation(FakeAPIObject, Hashable):
|
||||
path: Path
|
||||
type: Optional[RelationType] = None
|
||||
quote_character: str = '"'
|
||||
# Python 3.11 requires that these use default_factory instead of simple default
|
||||
# ValueError: mutable default <class 'dbt.contracts.relation.Policy'> for field include_policy is not allowed: use default_factory
|
||||
include_policy: Policy = field(default_factory=lambda: Policy())
|
||||
quote_policy: Policy = field(default_factory=lambda: Policy())
|
||||
dbt_created: bool = False
|
||||
|
||||
# register relation types that can be renamed for the purpose of replacing relations using stages and backups
|
||||
# adding a relation type here also requires defining the associated rename macro
|
||||
# e.g. adding RelationType.View in dbt-postgres requires that you define:
|
||||
# include/postgres/macros/relations/view/rename.sql::postgres__get_rename_view_sql()
|
||||
renameable_relations: SerializableIterable = ()
|
||||
|
||||
# register relation types that are atomically replaceable, e.g. they have "create or replace" syntax
|
||||
# adding a relation type here also requires defining the associated replace macro
|
||||
# e.g. adding RelationType.View in dbt-postgres requires that you define:
|
||||
# include/postgres/macros/relations/view/replace.sql::postgres__get_replace_view_sql()
|
||||
replaceable_relations: SerializableIterable = ()
|
||||
|
||||
def _is_exactish_match(self, field: ComponentName, value: str) -> bool:
|
||||
if self.dbt_created and self.quote_policy.get_part(field) is False:
|
||||
return self.path.get_lowered_part(field) == value.lower()
|
||||
else:
|
||||
return self.path.get_part(field) == value
|
||||
|
||||
@classmethod
|
||||
def _get_field_named(cls, field_name):
|
||||
for f, _ in cls._get_fields():
|
||||
if f.name == field_name:
|
||||
return f
|
||||
# this should be unreachable
|
||||
raise ValueError(f"BaseRelation has no {field_name} field!")
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return False
|
||||
return self.to_dict(omit_none=True) == other.to_dict(omit_none=True)
|
||||
|
||||
@classmethod
|
||||
def get_default_quote_policy(cls) -> Policy:
|
||||
return cls._get_field_named("quote_policy").default_factory()
|
||||
|
||||
@classmethod
|
||||
def get_default_include_policy(cls) -> Policy:
|
||||
return cls._get_field_named("include_policy").default_factory()
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Override `.get` to return a metadata object so we don't break
|
||||
dbt_utils.
|
||||
"""
|
||||
if key == "metadata":
|
||||
return {"type": self.__class__.__name__}
|
||||
return super().get(key, default)
|
||||
|
||||
def matches(
|
||||
self,
|
||||
database: Optional[str] = None,
|
||||
schema: Optional[str] = None,
|
||||
identifier: Optional[str] = None,
|
||||
) -> bool:
|
||||
search = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
|
||||
if not search:
|
||||
# nothing was passed in
|
||||
raise dbt.exceptions.DbtRuntimeError(
|
||||
"Tried to match relation, but no search path was passed!"
|
||||
)
|
||||
|
||||
exact_match = True
|
||||
approximate_match = True
|
||||
|
||||
for k, v in search.items():
|
||||
if not self._is_exactish_match(k, v):
|
||||
exact_match = False
|
||||
if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
|
||||
self.quote_character
|
||||
):
|
||||
approximate_match = False # type: ignore[union-attr]
|
||||
|
||||
if approximate_match and not exact_match:
|
||||
target = self.create(database=database, schema=schema, identifier=identifier)
|
||||
raise ApproximateMatchError(target, self)
|
||||
|
||||
return exact_match
|
||||
|
||||
def replace_path(self, **kwargs):
|
||||
return self.replace(path=self.path.replace(**kwargs))
|
||||
|
||||
def quote(
|
||||
self: Self,
|
||||
database: Optional[bool] = None,
|
||||
schema: Optional[bool] = None,
|
||||
identifier: Optional[bool] = None,
|
||||
) -> Self:
|
||||
policy = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
|
||||
new_quote_policy = self.quote_policy.replace_dict(policy)
|
||||
return self.replace(quote_policy=new_quote_policy)
|
||||
|
||||
def include(
|
||||
self: Self,
|
||||
database: Optional[bool] = None,
|
||||
schema: Optional[bool] = None,
|
||||
identifier: Optional[bool] = None,
|
||||
) -> Self:
|
||||
policy = filter_null_values(
|
||||
{
|
||||
ComponentName.Database: database,
|
||||
ComponentName.Schema: schema,
|
||||
ComponentName.Identifier: identifier,
|
||||
}
|
||||
)
|
||||
|
||||
new_include_policy = self.include_policy.replace_dict(policy)
|
||||
return self.replace(include_policy=new_include_policy)
|
||||
|
||||
def information_schema(self, view_name=None) -> "InformationSchema":
|
||||
# some of our data comes from jinja, where things can be `Undefined`.
|
||||
if not isinstance(view_name, str):
|
||||
view_name = None
|
||||
|
||||
# Kick the user-supplied schema out of the information schema relation
|
||||
# Instead address this as <database>.information_schema by default
|
||||
info_schema = InformationSchema.from_relation(self, view_name)
|
||||
return info_schema.incorporate(path={"schema": None})
|
||||
|
||||
def information_schema_only(self) -> "InformationSchema":
|
||||
return self.information_schema()
|
||||
|
||||
def without_identifier(self) -> "BaseRelation":
|
||||
"""Return a form of this relation that only has the database and schema
|
||||
set to included. To get the appropriately-quoted form the schema out of
|
||||
the result (for use as part of a query), use `.render()`. To get the
|
||||
raw database or schema name, use `.database` or `.schema`.
|
||||
|
||||
The hash of the returned object is the result of render().
|
||||
"""
|
||||
return self.include(identifier=False).replace_path(identifier=None)
|
||||
|
||||
def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
|
||||
for key in ComponentName:
|
||||
path_part: Optional[str] = None
|
||||
if self.include_policy.get_part(key):
|
||||
path_part = self.path.get_part(key)
|
||||
if path_part is not None and self.quote_policy.get_part(key):
|
||||
path_part = self.quoted(path_part)
|
||||
yield key, path_part
|
||||
|
||||
def render(self) -> str:
|
||||
# if there is nothing set, this will return the empty string.
|
||||
return ".".join(part for _, part in self._render_iterator() if part is not None)
|
||||
|
||||
def quoted(self, identifier):
|
||||
return "{quote_char}{identifier}{quote_char}".format(
|
||||
quote_char=self.quote_character,
|
||||
identifier=identifier,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self:
|
||||
source_quoting = source.quoting.to_dict(omit_none=True)
|
||||
source_quoting.pop("column", None)
|
||||
quote_policy = deep_merge(
|
||||
cls.get_default_quote_policy().to_dict(omit_none=True),
|
||||
source_quoting,
|
||||
kwargs.get("quote_policy", {}),
|
||||
)
|
||||
|
||||
return cls.create(
|
||||
database=source.database,
|
||||
schema=source.schema,
|
||||
identifier=source.identifier,
|
||||
quote_policy=quote_policy,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def add_ephemeral_prefix(name: str):
|
||||
return f"__dbt__cte__{name}"
|
||||
|
||||
@classmethod
|
||||
def create_ephemeral_from_node(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ManifestNode,
|
||||
) -> Self:
|
||||
# Note that ephemeral models are based on the name.
|
||||
identifier = cls.add_ephemeral_prefix(node.name)
|
||||
return cls.create(
|
||||
type=cls.CTE,
|
||||
identifier=identifier,
|
||||
).quote(identifier=False)
|
||||
|
||||
@classmethod
|
||||
def create_from_node(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node,
|
||||
quote_policy: Optional[Dict[str, bool]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Self:
|
||||
if quote_policy is None:
|
||||
quote_policy = {}
|
||||
|
||||
quote_policy = dbt.utils.merge(config.quoting, quote_policy)
|
||||
|
||||
return cls.create(
|
||||
database=node.database,
|
||||
schema=node.schema,
|
||||
identifier=node.alias,
|
||||
quote_policy=quote_policy,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from(
|
||||
cls: Type[Self],
|
||||
config: HasQuoting,
|
||||
node: ResultNode,
|
||||
**kwargs: Any,
|
||||
) -> Self:
|
||||
if node.resource_type == NodeType.Source:
|
||||
if not isinstance(node, SourceDefinition):
|
||||
raise DbtInternalError(
|
||||
"type mismatch, expected SourceDefinition but got {}".format(type(node))
|
||||
)
|
||||
return cls.create_from_source(node, **kwargs)
|
||||
else:
|
||||
# Can't use ManifestNode here because of parameterized generics
|
||||
if not isinstance(node, (ParsedNode)):
|
||||
raise DbtInternalError(
|
||||
f"type mismatch, expected ManifestNode but got {type(node)}"
|
||||
)
|
||||
return cls.create_from_node(config, node, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def create(
|
||||
cls: Type[Self],
|
||||
database: Optional[str] = None,
|
||||
schema: Optional[str] = None,
|
||||
identifier: Optional[str] = None,
|
||||
type: Optional[RelationType] = None,
|
||||
**kwargs,
|
||||
) -> Self:
|
||||
kwargs.update(
|
||||
{
|
||||
"path": {
|
||||
"database": database,
|
||||
"schema": schema,
|
||||
"identifier": identifier,
|
||||
},
|
||||
"type": type,
|
||||
}
|
||||
)
|
||||
return cls.from_dict(kwargs)
|
||||
|
||||
@property
|
||||
def can_be_renamed(self) -> bool:
|
||||
return self.type in self.renameable_relations
|
||||
|
||||
@property
|
||||
def can_be_replaced(self) -> bool:
|
||||
return self.type in self.replaceable_relations
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<{} {}>".format(self.__class__.__name__, self.render())
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.render())
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.render()
|
||||
|
||||
@property
|
||||
def database(self) -> Optional[str]:
|
||||
return self.path.database
|
||||
|
||||
@property
|
||||
def schema(self) -> Optional[str]:
|
||||
return self.path.schema
|
||||
|
||||
@property
|
||||
def identifier(self) -> Optional[str]:
|
||||
return self.path.identifier
|
||||
|
||||
@property
|
||||
def table(self) -> Optional[str]:
|
||||
return self.path.identifier
|
||||
|
||||
# Here for compatibility with old Relation interface
|
||||
@property
|
||||
def name(self) -> Optional[str]:
|
||||
return self.identifier
|
||||
|
||||
@property
|
||||
def is_table(self) -> bool:
|
||||
return self.type == RelationType.Table
|
||||
|
||||
@property
|
||||
def is_cte(self) -> bool:
|
||||
return self.type == RelationType.CTE
|
||||
|
||||
@property
|
||||
def is_view(self) -> bool:
|
||||
return self.type == RelationType.View
|
||||
|
||||
@property
|
||||
def is_materialized_view(self) -> bool:
|
||||
return self.type == RelationType.MaterializedView
|
||||
|
||||
@classproperty
|
||||
def Table(cls) -> str:
|
||||
return str(RelationType.Table)
|
||||
|
||||
@classproperty
|
||||
def CTE(cls) -> str:
|
||||
return str(RelationType.CTE)
|
||||
|
||||
@classproperty
|
||||
def View(cls) -> str:
|
||||
return str(RelationType.View)
|
||||
|
||||
@classproperty
|
||||
def External(cls) -> str:
|
||||
return str(RelationType.External)
|
||||
|
||||
@classproperty
|
||||
def MaterializedView(cls) -> str:
|
||||
return str(RelationType.MaterializedView)
|
||||
|
||||
@classproperty
|
||||
def get_relation_type(cls) -> Type[RelationType]:
|
||||
return RelationType
|
||||
|
||||
|
||||
Info = TypeVar("Info", bound="InformationSchema")
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=False, repr=False)
|
||||
class InformationSchema(BaseRelation):
|
||||
information_schema_view: Optional[str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if not isinstance(self.information_schema_view, (type(None), str)):
|
||||
raise dbt.exceptions.CompilationError(
|
||||
"Got an invalid name: {}".format(self.information_schema_view)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
|
||||
return Path(
|
||||
database=relation.database,
|
||||
schema=relation.schema,
|
||||
identifier="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_include_policy(
|
||||
cls,
|
||||
relation,
|
||||
information_schema_view: Optional[str],
|
||||
) -> Policy:
|
||||
return relation.include_policy.replace(
|
||||
database=relation.database is not None,
|
||||
schema=False,
|
||||
identifier=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_quote_policy(
|
||||
cls,
|
||||
relation,
|
||||
information_schema_view: Optional[str],
|
||||
) -> Policy:
|
||||
return relation.quote_policy.replace(
|
||||
identifier=False,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_relation(
|
||||
cls: Type[Info],
|
||||
relation: BaseRelation,
|
||||
information_schema_view: Optional[str],
|
||||
) -> Info:
|
||||
include_policy = cls.get_include_policy(relation, information_schema_view)
|
||||
quote_policy = cls.get_quote_policy(relation, information_schema_view)
|
||||
path = cls.get_path(relation, information_schema_view)
|
||||
return cls(
|
||||
type=RelationType.View,
|
||||
path=path,
|
||||
include_policy=include_policy,
|
||||
quote_policy=quote_policy,
|
||||
information_schema_view=information_schema_view,
|
||||
)
|
||||
|
||||
def _render_iterator(self):
|
||||
for k, v in super()._render_iterator():
|
||||
yield k, v
|
||||
yield None, self.information_schema_view
|
||||
|
||||
|
||||
class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
|
||||
"""A utility class to keep track of what information_schema tables to
|
||||
search for what schemas. The schema values are all lowercased to avoid
|
||||
duplication.
|
||||
"""
|
||||
|
||||
def add(self, relation: BaseRelation):
|
||||
key = relation.information_schema_only()
|
||||
if key not in self:
|
||||
self[key] = set()
|
||||
schema: Optional[str] = None
|
||||
if relation.schema is not None:
|
||||
schema = relation.schema.lower()
|
||||
self[key].add(schema)
|
||||
|
||||
def search(self) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
|
||||
for information_schema, schemas in self.items():
|
||||
for schema in schemas:
|
||||
yield information_schema, schema
|
||||
|
||||
def flatten(self, allow_multiple_databases: bool = False) -> "SchemaSearchMap":
|
||||
new = self.__class__()
|
||||
|
||||
# make sure we don't have multiple databases if allow_multiple_databases is set to False
|
||||
if not allow_multiple_databases:
|
||||
seen = {r.database.lower() for r in self if r.database}
|
||||
if len(seen) > 1:
|
||||
raise MultipleDatabasesNotAllowedError(seen)
|
||||
|
||||
for information_schema_name, schema in self.search():
|
||||
path = {"database": information_schema_name.database, "schema": schema}
|
||||
new.add(
|
||||
information_schema_name.incorporate(
|
||||
path=path,
|
||||
quote_policy={"database": False},
|
||||
include_policy={"database": False},
|
||||
)
|
||||
)
|
||||
|
||||
return new
|
||||
@@ -1,520 +0,0 @@
|
||||
import threading
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from dbt.adapters.reference_keys import (
|
||||
_make_ref_key,
|
||||
_make_ref_key_dict,
|
||||
_ReferenceKey,
|
||||
)
|
||||
from dbt.exceptions import (
|
||||
DependentLinkNotCachedError,
|
||||
NewNameAlreadyInCacheError,
|
||||
NoneRelationFoundError,
|
||||
ReferencedLinkNotCachedError,
|
||||
TruncatedModelNameCausedCollisionError,
|
||||
)
|
||||
from dbt.events.functions import fire_event, fire_event_if
|
||||
from dbt.events.types import CacheAction, CacheDumpGraph
|
||||
from dbt.flags import get_flags
|
||||
from dbt.utils import lowercase
|
||||
|
||||
|
||||
def dot_separated(key: _ReferenceKey) -> str:
|
||||
"""Return the key in dot-separated string form.
|
||||
|
||||
:param _ReferenceKey key: The key to stringify.
|
||||
"""
|
||||
return ".".join(map(str, key))
|
||||
|
||||
|
||||
class _CachedRelation:
|
||||
"""Nothing about _CachedRelation is guaranteed to be thread-safe!
|
||||
|
||||
:attr str schema: The schema of this relation.
|
||||
:attr str identifier: The identifier of this relation.
|
||||
:attr Dict[_ReferenceKey, _CachedRelation] referenced_by: The relations
|
||||
that refer to this relation.
|
||||
:attr BaseRelation inner: The underlying dbt relation.
|
||||
"""
|
||||
|
||||
def __init__(self, inner) -> None:
|
||||
self.referenced_by: Dict[_ReferenceKey, _CachedRelation] = {}
|
||||
self.inner = inner
|
||||
|
||||
def __str__(self) -> str:
|
||||
return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
|
||||
self.database, self.schema, self.identifier, self.inner
|
||||
)
|
||||
|
||||
@property
|
||||
def database(self) -> Optional[str]:
|
||||
return lowercase(self.inner.database)
|
||||
|
||||
@property
|
||||
def schema(self) -> Optional[str]:
|
||||
return lowercase(self.inner.schema)
|
||||
|
||||
@property
|
||||
def identifier(self) -> Optional[str]:
|
||||
return lowercase(self.inner.identifier)
|
||||
|
||||
def __copy__(self):
|
||||
new = self.__class__(self.inner)
|
||||
new.__dict__.update(self.__dict__)
|
||||
return new
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
new = self.__class__(self.inner.incorporate())
|
||||
new.__dict__.update(self.__dict__)
|
||||
new.referenced_by = deepcopy(self.referenced_by, memo)
|
||||
|
||||
def is_referenced_by(self, key):
|
||||
return key in self.referenced_by
|
||||
|
||||
def key(self):
|
||||
"""Get the _ReferenceKey that represents this relation
|
||||
|
||||
:return _ReferenceKey: A key for this relation.
|
||||
"""
|
||||
return _make_ref_key(self)
|
||||
|
||||
def add_reference(self, referrer: "_CachedRelation"):
|
||||
"""Add a reference from referrer to self, indicating that if this node
|
||||
were drop...cascaded, the referrer would be dropped as well.
|
||||
|
||||
:param _CachedRelation referrer: The node that refers to this node.
|
||||
"""
|
||||
self.referenced_by[referrer.key()] = referrer
|
||||
|
||||
def collect_consequences(self):
|
||||
"""Recursively collect a set of _ReferenceKeys that would
|
||||
consequentially get dropped if this were dropped via
|
||||
"drop ... cascade".
|
||||
|
||||
:return Set[_ReferenceKey]: All the relations that would be dropped
|
||||
"""
|
||||
consequences = {self.key()}
|
||||
for relation in self.referenced_by.values():
|
||||
consequences.update(relation.collect_consequences())
|
||||
return consequences
|
||||
|
||||
def release_references(self, keys):
|
||||
"""Non-recursively indicate that an iterable of _ReferenceKey no longer
|
||||
exist. Unknown keys are ignored.
|
||||
|
||||
:param Iterable[_ReferenceKey] keys: The keys to drop.
|
||||
"""
|
||||
keys = set(self.referenced_by) & set(keys)
|
||||
for key in keys:
|
||||
self.referenced_by.pop(key)
|
||||
|
||||
def rename(self, new_relation):
|
||||
"""Rename this cached relation to new_relation.
|
||||
Note that this will change the output of key(), all refs must be
|
||||
updated!
|
||||
|
||||
:param _CachedRelation new_relation: The new name to apply to the
|
||||
relation
|
||||
"""
|
||||
# Relations store this stuff inside their `path` dict. But they
|
||||
# also store a table_name, and usually use it in their .render(),
|
||||
# so we need to update that as well. It doesn't appear that
|
||||
# table_name is ever anything but the identifier (via .create())
|
||||
self.inner = self.inner.incorporate(
|
||||
path={
|
||||
"database": new_relation.inner.database,
|
||||
"schema": new_relation.inner.schema,
|
||||
"identifier": new_relation.inner.identifier,
|
||||
},
|
||||
)
|
||||
|
||||
def rename_key(self, old_key, new_key):
|
||||
"""Rename a reference that may or may not exist. Only handles the
|
||||
reference itself, so this is the other half of what `rename` does.
|
||||
|
||||
If old_key is not in referenced_by, this is a no-op.
|
||||
|
||||
:param _ReferenceKey old_key: The old key to be renamed.
|
||||
:param _ReferenceKey new_key: The new key to rename to.
|
||||
:raises InternalError: If the new key already exists.
|
||||
"""
|
||||
if new_key in self.referenced_by:
|
||||
raise NewNameAlreadyInCacheError(old_key, new_key)
|
||||
|
||||
if old_key not in self.referenced_by:
|
||||
return
|
||||
value = self.referenced_by.pop(old_key)
|
||||
self.referenced_by[new_key] = value
|
||||
|
||||
def dump_graph_entry(self):
|
||||
"""Return a key/value pair representing this key and its referents.
|
||||
|
||||
return List[str]: The dot-separated form of all referent keys.
|
||||
"""
|
||||
return [dot_separated(r) for r in self.referenced_by]
|
||||
|
||||
|
||||
class RelationsCache:
|
||||
"""A cache of the relations known to dbt. Keeps track of relationships
|
||||
declared between tables and handles renames/drops as a real database would.
|
||||
|
||||
:attr Dict[_ReferenceKey, _CachedRelation] relations: The known relations.
|
||||
:attr threading.RLock lock: The lock around relations, held during updates.
|
||||
The adapters also hold this lock while filling the cache.
|
||||
:attr Set[str] schemas: The set of known/cached schemas, all lowercased.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.relations: Dict[_ReferenceKey, _CachedRelation] = {}
|
||||
self.lock = threading.RLock()
|
||||
self.schemas: Set[Tuple[Optional[str], Optional[str]]] = set()
|
||||
|
||||
def add_schema(
|
||||
self,
|
||||
database: Optional[str],
|
||||
schema: Optional[str],
|
||||
) -> None:
|
||||
"""Add a schema to the set of known schemas (case-insensitive)
|
||||
|
||||
:param database: The database name to add.
|
||||
:param schema: The schema name to add.
|
||||
"""
|
||||
self.schemas.add((lowercase(database), lowercase(schema)))
|
||||
|
||||
def drop_schema(
|
||||
self,
|
||||
database: Optional[str],
|
||||
schema: Optional[str],
|
||||
) -> None:
|
||||
"""Drop the given schema and remove it from the set of known schemas.
|
||||
|
||||
Then remove all its contents (and their dependents, etc) as well.
|
||||
"""
|
||||
key = (lowercase(database), lowercase(schema))
|
||||
if key not in self.schemas:
|
||||
return
|
||||
|
||||
# avoid iterating over self.relations while removing things by
|
||||
# collecting the list first.
|
||||
|
||||
with self.lock:
|
||||
to_remove = self._list_relations_in_schema(database, schema)
|
||||
self._remove_all(to_remove)
|
||||
# handle a drop_schema race by using discard() over remove()
|
||||
self.schemas.discard(key)
|
||||
|
||||
def update_schemas(self, schemas: Iterable[Tuple[Optional[str], str]]):
|
||||
"""Add multiple schemas to the set of known schemas (case-insensitive)
|
||||
|
||||
:param schemas: An iterable of the schema names to add.
|
||||
"""
|
||||
self.schemas.update((lowercase(d), s.lower()) for (d, s) in schemas)
|
||||
|
||||
def __contains__(self, schema_id: Tuple[Optional[str], str]):
|
||||
"""A schema is 'in' the relations cache if it is in the set of cached
|
||||
schemas.
|
||||
|
||||
:param schema_id: The db name and schema name to look up.
|
||||
"""
|
||||
db, schema = schema_id
|
||||
return (lowercase(db), schema.lower()) in self.schemas
|
||||
|
||||
def dump_graph(self):
|
||||
"""Dump a key-only representation of the schema to a dictionary. Every
|
||||
known relation is a key with a value of a list of keys it is referenced
|
||||
by.
|
||||
"""
|
||||
# we have to hold the lock for the entire dump, if other threads modify
|
||||
# self.relations or any cache entry's referenced_by during iteration
|
||||
# it's a runtime error!
|
||||
with self.lock:
|
||||
return {dot_separated(k): str(v.dump_graph_entry()) for k, v in self.relations.items()}
|
||||
|
||||
def _setdefault(self, relation: _CachedRelation):
|
||||
"""Add a relation to the cache, or return it if it already exists.
|
||||
|
||||
:param _CachedRelation relation: The relation to set or get.
|
||||
:return _CachedRelation: The relation stored under the given relation's
|
||||
key
|
||||
"""
|
||||
self.add_schema(relation.database, relation.schema)
|
||||
key = relation.key()
|
||||
return self.relations.setdefault(key, relation)
|
||||
|
||||
def _add_link(self, referenced_key, dependent_key):
|
||||
"""Add a link between two relations to the database. Both the old and
|
||||
new entries must alraedy exist in the database.
|
||||
|
||||
:param _ReferenceKey referenced_key: The key identifying the referenced
|
||||
model (the one that if dropped will drop the dependent model).
|
||||
:param _ReferenceKey dependent_key: The key identifying the dependent
|
||||
model.
|
||||
:raises InternalError: If either entry does not exist.
|
||||
"""
|
||||
referenced = self.relations.get(referenced_key)
|
||||
if referenced is None:
|
||||
return
|
||||
if referenced is None:
|
||||
raise ReferencedLinkNotCachedError(referenced_key)
|
||||
|
||||
dependent = self.relations.get(dependent_key)
|
||||
if dependent is None:
|
||||
raise DependentLinkNotCachedError(dependent_key)
|
||||
|
||||
assert dependent is not None # we just raised!
|
||||
|
||||
referenced.add_reference(dependent)
|
||||
|
||||
# This is called in plugins/postgres/dbt/adapters/postgres/impl.py
|
||||
def add_link(self, referenced, dependent):
|
||||
"""Add a link between two relations to the database. If either relation
|
||||
does not exist, it will be added as an "external" relation.
|
||||
|
||||
The dependent model refers _to_ the referenced model. So, given
|
||||
arguments of (jake_test, bar, jake_test, foo):
|
||||
both values are in the schema jake_test and foo is a view that refers
|
||||
to bar, so "drop bar cascade" will drop foo and all of foo's
|
||||
dependents.
|
||||
|
||||
:param BaseRelation referenced: The referenced model.
|
||||
:param BaseRelation dependent: The dependent model.
|
||||
:raises InternalError: If either entry does not exist.
|
||||
"""
|
||||
ref_key = _make_ref_key(referenced)
|
||||
dep_key = _make_ref_key(dependent)
|
||||
if (ref_key.database, ref_key.schema) not in self:
|
||||
# if we have not cached the referenced schema at all, we must be
|
||||
# referring to a table outside our control. There's no need to make
|
||||
# a link - we will never drop the referenced relation during a run.
|
||||
fire_event(
|
||||
CacheAction(
|
||||
ref_key=ref_key._asdict(),
|
||||
ref_key_2=dep_key._asdict(),
|
||||
)
|
||||
)
|
||||
return
|
||||
if ref_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
referenced = referenced.replace(type=referenced.External)
|
||||
self.add(referenced)
|
||||
if dep_key not in self.relations:
|
||||
# Insert a dummy "external" relation.
|
||||
dependent = dependent.replace(type=referenced.External)
|
||||
self.add(dependent)
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="add_link",
|
||||
ref_key=dep_key._asdict(),
|
||||
ref_key_2=ref_key._asdict(),
|
||||
)
|
||||
)
|
||||
with self.lock:
|
||||
self._add_link(ref_key, dep_key)
|
||||
|
||||
def add(self, relation):
|
||||
"""Add the relation inner to the cache, under the schema schema and
|
||||
identifier identifier
|
||||
|
||||
:param BaseRelation relation: The underlying relation.
|
||||
"""
|
||||
flags = get_flags()
|
||||
cached = _CachedRelation(relation)
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()),
|
||||
)
|
||||
fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_dict(cached)))
|
||||
|
||||
with self.lock:
|
||||
self._setdefault(cached)
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()),
|
||||
)
|
||||
|
||||
def _remove_refs(self, keys):
|
||||
"""Removes all references to all entries in keys. This does not
|
||||
cascade!
|
||||
|
||||
:param Iterable[_ReferenceKey] keys: The keys to remove.
|
||||
"""
|
||||
# remove direct refs
|
||||
for key in keys:
|
||||
del self.relations[key]
|
||||
# then remove all entries from each child
|
||||
for cached in self.relations.values():
|
||||
cached.release_references(keys)
|
||||
|
||||
def drop(self, relation):
|
||||
"""Drop the named relation and cascade it appropriately to all
|
||||
dependent relations.
|
||||
|
||||
Because dbt proactively does many `drop relation if exist ... cascade`
|
||||
that are noops, nonexistent relation drops cause a debug log and no
|
||||
other actions.
|
||||
|
||||
:param str schema: The schema of the relation to drop.
|
||||
:param str identifier: The identifier of the relation to drop.
|
||||
"""
|
||||
dropped_key = _make_ref_key(relation)
|
||||
dropped_key_msg = _make_ref_key_dict(relation)
|
||||
fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg))
|
||||
with self.lock:
|
||||
if dropped_key not in self.relations:
|
||||
fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg))
|
||||
return
|
||||
consequences = self.relations[dropped_key].collect_consequences()
|
||||
# convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs
|
||||
consequence_msgs = [key._asdict() for key in consequences]
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs
|
||||
)
|
||||
)
|
||||
self._remove_refs(consequences)
|
||||
|
||||
def _rename_relation(self, old_key, new_relation):
|
||||
"""Rename a relation named old_key to new_key, updating references.
|
||||
Return whether or not there was a key to rename.
|
||||
|
||||
:param _ReferenceKey old_key: The existing key, to rename from.
|
||||
:param _CachedRelation new_key: The new relation, to rename to.
|
||||
"""
|
||||
# On the database level, a rename updates all values that were
|
||||
# previously referenced by old_name to be referenced by new_name.
|
||||
# basically, the name changes but some underlying ID moves. Kind of
|
||||
# like an object reference!
|
||||
relation = self.relations.pop(old_key)
|
||||
new_key = new_relation.key()
|
||||
|
||||
# relation has to rename its innards, so it needs the _CachedRelation.
|
||||
relation.rename(new_relation)
|
||||
# update all the relations that refer to it
|
||||
for cached in self.relations.values():
|
||||
if cached.is_referenced_by(old_key):
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="update_reference",
|
||||
ref_key=_make_ref_key_dict(old_key),
|
||||
ref_key_2=_make_ref_key_dict(new_key),
|
||||
ref_key_3=_make_ref_key_dict(cached.key()),
|
||||
)
|
||||
)
|
||||
|
||||
cached.rename_key(old_key, new_key)
|
||||
|
||||
self.relations[new_key] = relation
|
||||
# also fixup the schemas!
|
||||
self.add_schema(new_key.database, new_key.schema)
|
||||
|
||||
return True
|
||||
|
||||
def _check_rename_constraints(self, old_key, new_key):
|
||||
"""Check the rename constraints, and return whether or not the rename
|
||||
can proceed.
|
||||
|
||||
If the new key is already present, that is an error.
|
||||
If the old key is absent, we debug log and return False, assuming it's
|
||||
a temp table being renamed.
|
||||
|
||||
:param _ReferenceKey old_key: The existing key, to rename from.
|
||||
:param _ReferenceKey new_key: The new key, to rename to.
|
||||
:return bool: If the old relation exists for renaming.
|
||||
:raises InternalError: If the new key is already present.
|
||||
"""
|
||||
if new_key in self.relations:
|
||||
# Tell user when collision caused by model names truncated during
|
||||
# materialization.
|
||||
raise TruncatedModelNameCausedCollisionError(new_key, self.relations)
|
||||
|
||||
if old_key not in self.relations:
|
||||
fire_event(CacheAction(action="temporary_relation", ref_key=old_key._asdict()))
|
||||
return False
|
||||
return True
|
||||
|
||||
def rename(self, old, new):
|
||||
"""Rename the old schema/identifier to the new schema/identifier and
|
||||
update references.
|
||||
|
||||
If the new schema/identifier is already present, that is an error.
|
||||
If the schema/identifier key is absent, we only debug log and return,
|
||||
assuming it's a temp table being renamed.
|
||||
|
||||
:param BaseRelation old: The existing relation name information.
|
||||
:param BaseRelation new: The new relation name information.
|
||||
:raises InternalError: If the new key is already present.
|
||||
"""
|
||||
old_key = _make_ref_key(old)
|
||||
new_key = _make_ref_key(new)
|
||||
fire_event(
|
||||
CacheAction(
|
||||
action="rename_relation",
|
||||
ref_key=old_key._asdict(),
|
||||
ref_key_2=new_key._asdict(),
|
||||
)
|
||||
)
|
||||
flags = get_flags()
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()),
|
||||
)
|
||||
|
||||
with self.lock:
|
||||
if self._check_rename_constraints(old_key, new_key):
|
||||
self._rename_relation(old_key, _CachedRelation(new))
|
||||
else:
|
||||
self._setdefault(_CachedRelation(new))
|
||||
|
||||
fire_event_if(
|
||||
flags.LOG_CACHE_EVENTS,
|
||||
lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()),
|
||||
)
|
||||
|
||||
def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
|
||||
"""Case-insensitively yield all relations matching the given schema.
|
||||
|
||||
:param str schema: The case-insensitive schema name to list from.
|
||||
:return List[BaseRelation]: The list of relations with the given
|
||||
schema
|
||||
"""
|
||||
database = lowercase(database)
|
||||
schema = lowercase(schema)
|
||||
with self.lock:
|
||||
results = [
|
||||
r.inner
|
||||
for r in self.relations.values()
|
||||
if (lowercase(r.schema) == schema and lowercase(r.database) == database)
|
||||
]
|
||||
|
||||
if None in results:
|
||||
raise NoneRelationFoundError()
|
||||
return results
|
||||
|
||||
def clear(self):
|
||||
"""Clear the cache"""
|
||||
with self.lock:
|
||||
self.relations.clear()
|
||||
self.schemas.clear()
|
||||
|
||||
def _list_relations_in_schema(
|
||||
self, database: Optional[str], schema: Optional[str]
|
||||
) -> List[_CachedRelation]:
|
||||
"""Get the relations in a schema. Callers should hold the lock."""
|
||||
key = (lowercase(database), lowercase(schema))
|
||||
|
||||
to_remove: List[_CachedRelation] = []
|
||||
for cachekey, relation in self.relations.items():
|
||||
if (cachekey.database, cachekey.schema) == key:
|
||||
to_remove.append(relation)
|
||||
return to_remove
|
||||
|
||||
def _remove_all(self, to_remove: List[_CachedRelation]):
|
||||
"""Remove all the listed relations. Ignore relations that have been
|
||||
cascaded out.
|
||||
"""
|
||||
for relation in to_remove:
|
||||
# it may have been cascaded out already
|
||||
drop_key = _make_ref_key(relation)
|
||||
if drop_key in self.relations:
|
||||
self.drop(drop_key)
|
||||
@@ -1,52 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional, DefaultDict, Mapping
|
||||
|
||||
|
||||
class Capability(str, Enum):
|
||||
"""Enumeration of optional adapter features which can be probed using BaseAdapter.capabilities()"""
|
||||
|
||||
SchemaMetadataByRelations = "SchemaMetadataByRelations"
|
||||
"""Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving
|
||||
all the relations in a schema."""
|
||||
|
||||
TableLastModifiedMetadata = "TableLastModifiedMetadata"
|
||||
"""Indicates support for determining the time of the last table modification by querying database metadata."""
|
||||
|
||||
|
||||
class Support(str, Enum):
|
||||
Unknown = "Unknown"
|
||||
"""The adapter has not declared whether this capability is a feature of the underlying DBMS."""
|
||||
|
||||
Unsupported = "Unsupported"
|
||||
"""This capability is not possible with the underlying DBMS, so the adapter does not implement related macros."""
|
||||
|
||||
NotImplemented = "NotImplemented"
|
||||
"""This capability is available in the underlying DBMS, but support has not yet been implemented in the adapter."""
|
||||
|
||||
Versioned = "Versioned"
|
||||
"""Some versions of the DBMS supported by the adapter support this capability and the adapter has implemented any
|
||||
macros needed to use it."""
|
||||
|
||||
Full = "Full"
|
||||
"""All versions of the DBMS supported by the adapter support this capability and the adapter has implemented any
|
||||
macros needed to use it."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class CapabilitySupport:
|
||||
support: Support
|
||||
first_version: Optional[str] = None
|
||||
|
||||
def __bool__(self):
|
||||
return self.support == Support.Versioned or self.support == Support.Full
|
||||
|
||||
|
||||
class CapabilityDict(DefaultDict[Capability, CapabilitySupport]):
|
||||
def __init__(self, vals: Mapping[Capability, CapabilitySupport]):
|
||||
super().__init__(self._default)
|
||||
self.update(vals)
|
||||
|
||||
@staticmethod
|
||||
def _default():
|
||||
return CapabilitySupport(support=Support.Unknown)
|
||||
@@ -1,237 +0,0 @@
|
||||
import threading
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Type
|
||||
|
||||
from dbt.adapters.base.plugin import AdapterPlugin
|
||||
from dbt.adapters.protocol import AdapterConfig, AdapterProtocol, RelationProtocol
|
||||
from dbt.contracts.connection import AdapterRequiredConfig, Credentials
|
||||
from dbt.events.functions import fire_event
|
||||
from dbt.events.types import AdapterImportError, PluginLoadError, AdapterRegistered
|
||||
from dbt.exceptions import DbtInternalError, DbtRuntimeError
|
||||
from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH
|
||||
from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME
|
||||
from dbt.semver import VersionSpecifier
|
||||
|
||||
Adapter = AdapterProtocol
|
||||
|
||||
|
||||
class AdapterContainer:
|
||||
def __init__(self) -> None:
|
||||
self.lock = threading.Lock()
|
||||
self.adapters: Dict[str, Adapter] = {}
|
||||
self.plugins: Dict[str, AdapterPlugin] = {}
|
||||
# map package names to their include paths
|
||||
self.packages: Dict[str, Path] = {
|
||||
GLOBAL_PROJECT_NAME: Path(GLOBAL_PROJECT_PATH),
|
||||
}
|
||||
|
||||
def get_plugin_by_name(self, name: str) -> AdapterPlugin:
|
||||
with self.lock:
|
||||
if name in self.plugins:
|
||||
return self.plugins[name]
|
||||
names = ", ".join(self.plugins.keys())
|
||||
|
||||
message = f"Invalid adapter type {name}! Must be one of {names}"
|
||||
raise DbtRuntimeError(message)
|
||||
|
||||
def get_adapter_class_by_name(self, name: str) -> Type[Adapter]:
|
||||
plugin = self.get_plugin_by_name(name)
|
||||
return plugin.adapter
|
||||
|
||||
def get_relation_class_by_name(self, name: str) -> Type[RelationProtocol]:
|
||||
adapter = self.get_adapter_class_by_name(name)
|
||||
return adapter.Relation
|
||||
|
||||
def get_config_class_by_name(self, name: str) -> Type[AdapterConfig]:
|
||||
adapter = self.get_adapter_class_by_name(name)
|
||||
return adapter.AdapterSpecificConfigs
|
||||
|
||||
def load_plugin(self, name: str) -> Type[Credentials]:
|
||||
# this doesn't need a lock: in the worst case we'll overwrite packages
|
||||
# and adapter_type entries with the same value, as they're all
|
||||
# singletons
|
||||
try:
|
||||
# mypy doesn't think modules have any attributes.
|
||||
mod: Any = import_module("." + name, "dbt.adapters")
|
||||
except ModuleNotFoundError as exc:
|
||||
# if we failed to import the target module in particular, inform
|
||||
# the user about it via a runtime error
|
||||
if exc.name == "dbt.adapters." + name:
|
||||
fire_event(AdapterImportError(exc=str(exc)))
|
||||
raise DbtRuntimeError(f"Could not find adapter type {name}!")
|
||||
# otherwise, the error had to have come from some underlying
|
||||
# library. Log the stack trace.
|
||||
|
||||
fire_event(PluginLoadError(exc_info=traceback.format_exc()))
|
||||
raise
|
||||
plugin: AdapterPlugin = mod.Plugin
|
||||
plugin_type = plugin.adapter.type()
|
||||
|
||||
if plugin_type != name:
|
||||
raise DbtRuntimeError(
|
||||
f"Expected to find adapter with type named {name}, got "
|
||||
f"adapter with type {plugin_type}"
|
||||
)
|
||||
|
||||
with self.lock:
|
||||
# things do hold the lock to iterate over it so we need it to add
|
||||
self.plugins[name] = plugin
|
||||
|
||||
self.packages[plugin.project_name] = Path(plugin.include_path)
|
||||
|
||||
for dep in plugin.dependencies:
|
||||
self.load_plugin(dep)
|
||||
|
||||
return plugin.credentials
|
||||
|
||||
def register_adapter(self, config: AdapterRequiredConfig) -> None:
|
||||
adapter_name = config.credentials.type
|
||||
adapter_type = self.get_adapter_class_by_name(adapter_name)
|
||||
adapter_version = import_module(f".{adapter_name}.__version__", "dbt.adapters").version
|
||||
adapter_version_specifier = VersionSpecifier.from_version_string(
|
||||
adapter_version
|
||||
).to_version_string()
|
||||
fire_event(
|
||||
AdapterRegistered(adapter_name=adapter_name, adapter_version=adapter_version_specifier)
|
||||
)
|
||||
with self.lock:
|
||||
if adapter_name in self.adapters:
|
||||
# this shouldn't really happen...
|
||||
return
|
||||
|
||||
adapter: Adapter = adapter_type(config) # type: ignore
|
||||
self.adapters[adapter_name] = adapter
|
||||
|
||||
def lookup_adapter(self, adapter_name: str) -> Adapter:
|
||||
return self.adapters[adapter_name]
|
||||
|
||||
def reset_adapters(self):
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
with self.lock:
|
||||
for adapter in self.adapters.values():
|
||||
adapter.cleanup_connections()
|
||||
self.adapters.clear()
|
||||
|
||||
def cleanup_connections(self):
|
||||
"""Only clean up the adapter connections list without resetting the
|
||||
actual adapters.
|
||||
"""
|
||||
with self.lock:
|
||||
for adapter in self.adapters.values():
|
||||
adapter.cleanup_connections()
|
||||
|
||||
def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]:
|
||||
"""Iterate over the known adapter plugins. If a name is provided,
|
||||
iterate in dependency order over the named plugin and its dependencies.
|
||||
"""
|
||||
if name is None:
|
||||
return list(self.plugins.values())
|
||||
|
||||
plugins: List[AdapterPlugin] = []
|
||||
seen: Set[str] = set()
|
||||
plugin_names: List[str] = [name]
|
||||
while plugin_names:
|
||||
plugin_name = plugin_names[0]
|
||||
plugin_names = plugin_names[1:]
|
||||
try:
|
||||
plugin = self.plugins[plugin_name]
|
||||
except KeyError:
|
||||
raise DbtInternalError(f"No plugin found for {plugin_name}") from None
|
||||
plugins.append(plugin)
|
||||
seen.add(plugin_name)
|
||||
for dep in plugin.dependencies:
|
||||
if dep not in seen:
|
||||
plugin_names.append(dep)
|
||||
return plugins
|
||||
|
||||
def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
|
||||
package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
|
||||
package_names.append(GLOBAL_PROJECT_NAME)
|
||||
return package_names
|
||||
|
||||
def get_include_paths(self, name: Optional[str]) -> List[Path]:
|
||||
paths = []
|
||||
for package_name in self.get_adapter_package_names(name):
|
||||
try:
|
||||
path = self.packages[package_name]
|
||||
except KeyError:
|
||||
raise DbtInternalError(f"No internal package listing found for {package_name}")
|
||||
paths.append(path)
|
||||
return paths
|
||||
|
||||
def get_adapter_type_names(self, name: Optional[str]) -> List[str]:
|
||||
return [p.adapter.type() for p in self.get_adapter_plugins(name)]
|
||||
|
||||
def get_adapter_constraint_support(self, name: Optional[str]) -> List[str]:
|
||||
return self.lookup_adapter(name).CONSTRAINT_SUPPORT # type: ignore
|
||||
|
||||
|
||||
FACTORY: AdapterContainer = AdapterContainer()
|
||||
|
||||
|
||||
def register_adapter(config: AdapterRequiredConfig) -> None:
|
||||
FACTORY.register_adapter(config)
|
||||
|
||||
|
||||
def get_adapter(config: AdapterRequiredConfig):
|
||||
return FACTORY.lookup_adapter(config.credentials.type)
|
||||
|
||||
|
||||
def get_adapter_by_type(adapter_type):
|
||||
return FACTORY.lookup_adapter(adapter_type)
|
||||
|
||||
|
||||
def reset_adapters():
|
||||
"""Clear the adapters. This is useful for tests, which change configs."""
|
||||
FACTORY.reset_adapters()
|
||||
|
||||
|
||||
def cleanup_connections():
|
||||
"""Only clean up the adapter connections list without resetting the actual
|
||||
adapters.
|
||||
"""
|
||||
FACTORY.cleanup_connections()
|
||||
|
||||
|
||||
def get_adapter_class_by_name(name: str) -> Type[AdapterProtocol]:
|
||||
return FACTORY.get_adapter_class_by_name(name)
|
||||
|
||||
|
||||
def get_config_class_by_name(name: str) -> Type[AdapterConfig]:
|
||||
return FACTORY.get_config_class_by_name(name)
|
||||
|
||||
|
||||
def get_relation_class_by_name(name: str) -> Type[RelationProtocol]:
|
||||
return FACTORY.get_relation_class_by_name(name)
|
||||
|
||||
|
||||
def load_plugin(name: str) -> Type[Credentials]:
|
||||
return FACTORY.load_plugin(name)
|
||||
|
||||
|
||||
def get_include_paths(name: Optional[str]) -> List[Path]:
|
||||
return FACTORY.get_include_paths(name)
|
||||
|
||||
|
||||
def get_adapter_package_names(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_package_names(name)
|
||||
|
||||
|
||||
def get_adapter_type_names(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_type_names(name)
|
||||
|
||||
|
||||
def get_adapter_constraint_support(name: Optional[str]) -> List[str]:
|
||||
return FACTORY.get_adapter_constraint_support(name)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def adapter_management():
|
||||
reset_adapters()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cleanup_connections()
|
||||
@@ -1,158 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Type,
|
||||
Hashable,
|
||||
Optional,
|
||||
ContextManager,
|
||||
List,
|
||||
Generic,
|
||||
TypeVar,
|
||||
Tuple,
|
||||
Dict,
|
||||
Any,
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
|
||||
import agate
|
||||
|
||||
from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
|
||||
from dbt.contracts.graph.nodes import ResultNode, ManifestNode
|
||||
from dbt.contracts.graph.model_config import BaseConfig
|
||||
from dbt.contracts.graph.manifest import Manifest
|
||||
from dbt.contracts.relation import Policy, HasQuoting
|
||||
|
||||
from dbt.graph import Graph
|
||||
|
||||
|
||||
@dataclass
|
||||
class AdapterConfig(BaseConfig):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionManagerProtocol(Protocol):
|
||||
TYPE: str
|
||||
|
||||
|
||||
class ColumnProtocol(Protocol):
|
||||
pass
|
||||
|
||||
|
||||
Self = TypeVar("Self", bound="RelationProtocol")
|
||||
|
||||
|
||||
class RelationProtocol(Protocol):
|
||||
@classmethod
|
||||
def get_default_quote_policy(cls) -> Policy:
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self:
|
||||
...
|
||||
|
||||
|
||||
class CompilerProtocol(Protocol):
|
||||
def compile(self, manifest: Manifest, write=True) -> Graph:
|
||||
...
|
||||
|
||||
def compile_node(
|
||||
self,
|
||||
node: ManifestNode,
|
||||
manifest: Manifest,
|
||||
extra_context: Optional[Dict[str, Any]] = None,
|
||||
) -> ManifestNode:
|
||||
...
|
||||
|
||||
|
||||
AdapterConfig_T = TypeVar("AdapterConfig_T", bound=AdapterConfig)
|
||||
ConnectionManager_T = TypeVar("ConnectionManager_T", bound=ConnectionManagerProtocol)
|
||||
Relation_T = TypeVar("Relation_T", bound=RelationProtocol)
|
||||
Column_T = TypeVar("Column_T", bound=ColumnProtocol)
|
||||
Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
|
||||
|
||||
|
||||
# TODO CT-211
|
||||
class AdapterProtocol( # type: ignore[misc]
|
||||
Protocol,
|
||||
Generic[
|
||||
AdapterConfig_T,
|
||||
ConnectionManager_T,
|
||||
Relation_T,
|
||||
Column_T,
|
||||
Compiler_T,
|
||||
],
|
||||
):
|
||||
# N.B. Technically these are ClassVars, but mypy doesn't support putting type vars in a
|
||||
# ClassVar due to the restrictiveness of PEP-526
|
||||
# See: https://github.com/python/mypy/issues/5144
|
||||
AdapterSpecificConfigs: Type[AdapterConfig_T]
|
||||
Column: Type[Column_T]
|
||||
Relation: Type[Relation_T]
|
||||
ConnectionManager: Type[ConnectionManager_T]
|
||||
connections: ConnectionManager_T
|
||||
|
||||
def __init__(self, config: AdapterRequiredConfig) -> None:
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def type(cls) -> str:
|
||||
pass
|
||||
|
||||
def set_query_header(self, manifest: Manifest) -> None:
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def get_thread_identifier() -> Hashable:
|
||||
...
|
||||
|
||||
def get_thread_connection(self) -> Connection:
|
||||
...
|
||||
|
||||
def set_thread_connection(self, conn: Connection) -> None:
|
||||
...
|
||||
|
||||
def get_if_exists(self) -> Optional[Connection]:
|
||||
...
|
||||
|
||||
def clear_thread_connection(self) -> None:
|
||||
...
|
||||
|
||||
def clear_transaction(self) -> None:
|
||||
...
|
||||
|
||||
def exception_handler(self, sql: str) -> ContextManager:
|
||||
...
|
||||
|
||||
def set_connection_name(self, name: Optional[str] = None) -> Connection:
|
||||
...
|
||||
|
||||
def cancel_open(self) -> Optional[List[str]]:
|
||||
...
|
||||
|
||||
def open(cls, connection: Connection) -> Connection:
|
||||
...
|
||||
|
||||
def release(self) -> None:
|
||||
...
|
||||
|
||||
def cleanup_all(self) -> None:
|
||||
...
|
||||
|
||||
def begin(self) -> None:
|
||||
...
|
||||
|
||||
def commit(self) -> None:
|
||||
...
|
||||
|
||||
def close(cls, connection: Connection) -> Connection:
|
||||
...
|
||||
|
||||
def commit_if_has_connection(self) -> None:
|
||||
...
|
||||
|
||||
def execute(
|
||||
self, sql: str, auto_begin: bool = False, fetch: bool = False
|
||||
) -> Tuple[AdapterResponse, agate.Table]:
|
||||
...
|
||||
|
||||
def get_compiler(self) -> Compiler_T:
|
||||
...
|
||||
@@ -1,37 +0,0 @@
|
||||
# this module exists to resolve circular imports with the events module
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
|
||||
|
||||
|
||||
def lowercase(value: Optional[str]) -> Optional[str]:
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return value.lower()
|
||||
|
||||
|
||||
# For backwards compatibility. New code should use _make_ref_key
|
||||
def _make_key(relation: Any) -> _ReferenceKey:
|
||||
return _make_ref_key(relation)
|
||||
|
||||
|
||||
def _make_ref_key(relation: Any) -> _ReferenceKey:
|
||||
"""Make _ReferenceKeys with lowercase values for the cache so we don't have
|
||||
to keep track of quoting
|
||||
"""
|
||||
# databases and schemas can both be None
|
||||
return _ReferenceKey(
|
||||
lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
|
||||
)
|
||||
|
||||
|
||||
def _make_ref_key_dict(relation: Any):
|
||||
return {
|
||||
"database": relation.database,
|
||||
"schema": relation.schema,
|
||||
"identifier": relation.identifier,
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
# RelationConfig
|
||||
This package serves as an initial abstraction for managing the inspection of existing relations and determining
|
||||
changes on those relations. It arose from the materialized view work and is currently only supporting
|
||||
materialized views for Postgres and Redshift as well as dynamic tables for Snowflake. There are three main
|
||||
classes in this package.
|
||||
|
||||
## RelationConfigBase
|
||||
This is a very small class that only has a `from_dict()` method and a default `NotImplementedError()`. At some
|
||||
point this could be replaced by a more robust framework, like `mashumaro` or `pydantic`.
|
||||
|
||||
## RelationConfigChange
|
||||
This class inherits from `RelationConfigBase` ; however, this can be thought of as a separate class. The subclassing
|
||||
merely points to the idea that both classes would likely inherit from the same class in a `mashumaro` or
|
||||
`pydantic` implementation. This class is much more restricted in attribution. It should really only
|
||||
ever need an `action` and a `context`. This can be though of as being analogous to a web request. You need to
|
||||
know what you're doing (`action`: 'create' = GET, 'drop' = DELETE, etc.) and the information (`context`) needed
|
||||
to make the change. In our scenarios, the context tends to be an instance of `RelationConfigBase` corresponding
|
||||
to the new state.
|
||||
|
||||
## RelationConfigValidationMixin
|
||||
This mixin provides optional validation mechanics that can be applied to either `RelationConfigBase` or
|
||||
`RelationConfigChange` subclasses. A validation rule is a combination of a `validation_check`, something
|
||||
that should evaluate to `True`, and an optional `validation_error`, an instance of `DbtRuntimeError`
|
||||
that should be raised in the event the `validation_check` fails. While optional, it's recommended that
|
||||
the `validation_error` be provided for clearer transparency to the end user.
|
||||
@@ -1,12 +0,0 @@
|
||||
from dbt.adapters.relation_configs.config_base import ( # noqa: F401
|
||||
RelationConfigBase,
|
||||
RelationResults,
|
||||
)
|
||||
from dbt.adapters.relation_configs.config_change import ( # noqa: F401
|
||||
RelationConfigChangeAction,
|
||||
RelationConfigChange,
|
||||
)
|
||||
from dbt.adapters.relation_configs.config_validation import ( # noqa: F401
|
||||
RelationConfigValidationMixin,
|
||||
RelationConfigValidationRule,
|
||||
)
|
||||
@@ -1,44 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Union, Dict
|
||||
|
||||
import agate
|
||||
from dbt.utils import filter_null_values
|
||||
|
||||
|
||||
"""
|
||||
This is what relation metadata from the database looks like. It's a dictionary because there will be
|
||||
multiple grains of data for a single object. For example, a materialized view in Postgres has base level information,
|
||||
like name. But it also can have multiple indexes, which needs to be a separate query. It might look like this:
|
||||
|
||||
{
|
||||
"base": agate.Row({"table_name": "table_abc", "query": "select * from table_def"})
|
||||
"indexes": agate.Table("rows": [
|
||||
agate.Row({"name": "index_a", "columns": ["column_a"], "type": "hash", "unique": False}),
|
||||
agate.Row({"name": "index_b", "columns": ["time_dim_a"], "type": "btree", "unique": False}),
|
||||
])
|
||||
}
|
||||
"""
|
||||
RelationResults = Dict[str, Union[agate.Row, agate.Table]]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RelationConfigBase:
|
||||
@classmethod
|
||||
def from_dict(cls, kwargs_dict) -> "RelationConfigBase":
|
||||
"""
|
||||
This assumes the subclass of `RelationConfigBase` is flat, in the sense that no attribute is
|
||||
itself another subclass of `RelationConfigBase`. If that's not the case, this should be overriden
|
||||
to manually manage that complexity.
|
||||
|
||||
Args:
|
||||
kwargs_dict: the dict representation of this instance
|
||||
|
||||
Returns: the `RelationConfigBase` representation associated with the provided dict
|
||||
"""
|
||||
return cls(**filter_null_values(kwargs_dict)) # type: ignore
|
||||
|
||||
@classmethod
|
||||
def _not_implemented_error(cls) -> NotImplementedError:
|
||||
return NotImplementedError(
|
||||
"This relation type has not been fully configured for this adapter."
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import Hashable
|
||||
|
||||
from dbt.adapters.relation_configs.config_base import RelationConfigBase
|
||||
from dbt.dataclass_schema import StrEnum
|
||||
|
||||
|
||||
class RelationConfigChangeAction(StrEnum):
|
||||
alter = "alter"
|
||||
create = "create"
|
||||
drop = "drop"
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=True, unsafe_hash=True)
|
||||
class RelationConfigChange(RelationConfigBase, ABC):
|
||||
action: RelationConfigChangeAction
|
||||
context: Hashable # this is usually a RelationConfig, e.g. IndexConfig, but shouldn't be limited
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def requires_full_refresh(self) -> bool:
|
||||
raise self._not_implemented_error()
|
||||
@@ -1,57 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Set, Optional
|
||||
|
||||
from dbt.exceptions import DbtRuntimeError
|
||||
|
||||
|
||||
@dataclass(frozen=True, eq=True, unsafe_hash=True)
|
||||
class RelationConfigValidationRule:
|
||||
validation_check: bool
|
||||
validation_error: Optional[DbtRuntimeError]
|
||||
|
||||
@property
|
||||
def default_error(self):
|
||||
return DbtRuntimeError(
|
||||
"There was a validation error in preparing this relation config."
|
||||
"No additional context was provided by this adapter."
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RelationConfigValidationMixin:
|
||||
def __post_init__(self):
|
||||
self.run_validation_rules()
|
||||
|
||||
@property
|
||||
def validation_rules(self) -> Set[RelationConfigValidationRule]:
|
||||
"""
|
||||
A set of validation rules to run against the object upon creation.
|
||||
|
||||
A validation rule is a combination of a validation check (bool) and an optional error message.
|
||||
|
||||
This defaults to no validation rules if not implemented. It's recommended to override this with values,
|
||||
but that may not always be necessary.
|
||||
|
||||
Returns: a set of validation rules
|
||||
"""
|
||||
return set()
|
||||
|
||||
def run_validation_rules(self):
|
||||
for validation_rule in self.validation_rules:
|
||||
try:
|
||||
assert validation_rule.validation_check
|
||||
except AssertionError:
|
||||
if validation_rule.validation_error:
|
||||
raise validation_rule.validation_error
|
||||
else:
|
||||
raise validation_rule.default_error
|
||||
self.run_child_validation_rules()
|
||||
|
||||
def run_child_validation_rules(self):
|
||||
for attr_value in vars(self).values():
|
||||
if hasattr(attr_value, "validation_rules"):
|
||||
attr_value.run_validation_rules()
|
||||
if isinstance(attr_value, set):
|
||||
for member in attr_value:
|
||||
if hasattr(member, "validation_rules"):
|
||||
member.run_validation_rules()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user