mirror of
https://github.com/dbt-labs/dbt-core
synced 2025-12-17 19:31:34 +00:00
* initial hatch implmentation
* cleanup docs
* replacing makefile
* cleanup hatch commands to match adapters
reorganize more to match adapters setup
script comment
dont pip install
fix test commands
* changelog
improve changelog
* CI fix
* fix for env
* use a standard version file
* remove odd license logic
* fix bumpversion
* remove sha input
* more cleanup
* fix legacy build path
* define version for pyproject.toml
* use hatch hook for license
* remove tox
* ensure tests are split
* remove temp file for testing
* explicitly match old verion in pyproject.toml
* fix up testing
* get rid of bumpversion
* put dev_dependencies.txtin hatch
* setup.py is now dead
* set python version for local dev
* local dev fixes
* temp script to compare wheels
* parity with existing wheel builds
* Revert "temp script to compare wheels"
This reverts commit c31417a092.
* fix docker test file
165 lines
5.3 KiB
YAML
165 lines
5.3 KiB
YAML
# **what?**
|
|
# This workflow will test all test(s) at the input path given number of times to determine if it's flaky or not. You can test with any supported OS/Python combination.
|
|
# This is batched in 10 to allow more test iterations faster.
|
|
|
|
# **why?**
|
|
# Testing if a test is flaky and if a previously flaky test has been fixed. This allows easy testing on supported python versions and OS combinations.
|
|
|
|
# **when?**
|
|
# This is triggered manually from dbt-core.
|
|
|
|
name: Flaky Tester
|
|
|
|
on:
|
|
workflow_dispatch:
|
|
inputs:
|
|
branch:
|
|
description: "Branch to check out"
|
|
type: string
|
|
required: true
|
|
default: "main"
|
|
test_path:
|
|
description: "Path to single test to run (ex: tests/functional/retry/test_retry.py::TestRetry::test_fail_fast)"
|
|
type: string
|
|
required: true
|
|
default: "tests/functional/..."
|
|
python_version:
|
|
description: "Version of Python to Test Against"
|
|
type: choice
|
|
options:
|
|
- "3.10"
|
|
- "3.11"
|
|
os:
|
|
description: "OS to run test in"
|
|
type: choice
|
|
options:
|
|
- "ubuntu-latest"
|
|
- "macos-14"
|
|
- "windows-latest"
|
|
num_runs_per_batch:
|
|
description: "Max number of times to run the test per batch. We always run 10 batches."
|
|
type: number
|
|
required: true
|
|
default: "50"
|
|
|
|
permissions: read-all
|
|
|
|
defaults:
|
|
run:
|
|
shell: bash
|
|
|
|
jobs:
|
|
debug:
|
|
runs-on: ${{ vars.UBUNTU_LATEST }}
|
|
steps:
|
|
- name: "[DEBUG] Output Inputs"
|
|
run: |
|
|
echo "Branch: ${{ inputs.branch }}"
|
|
echo "test_path: ${{ inputs.test_path }}"
|
|
echo "python_version: ${{ inputs.python_version }}"
|
|
echo "os: ${{ inputs.os }}"
|
|
echo "num_runs_per_batch: ${{ inputs.num_runs_per_batch }}"
|
|
|
|
pytest:
|
|
runs-on: ${{ inputs.os }}
|
|
strategy:
|
|
# run all batches, even if one fails. This informs how flaky the test may be.
|
|
fail-fast: false
|
|
# using a matrix to speed up the jobs since the matrix will run in parallel when runners are available
|
|
matrix:
|
|
batch: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
|
|
env:
|
|
PYTEST_ADDOPTS: "-v --color=yes -n4 --csv integration_results.csv"
|
|
DBT_TEST_USER_1: dbt_test_user_1
|
|
DBT_TEST_USER_2: dbt_test_user_2
|
|
DBT_TEST_USER_3: dbt_test_user_3
|
|
DD_CIVISIBILITY_AGENTLESS_ENABLED: true
|
|
DD_API_KEY: ${{ secrets.DATADOG_API_KEY }}
|
|
DD_SITE: datadoghq.com
|
|
DD_ENV: ci
|
|
DD_SERVICE: ${{ github.event.repository.name }}
|
|
|
|
steps:
|
|
- name: "Checkout code"
|
|
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # actions/checkout@v4
|
|
with:
|
|
ref: ${{ inputs.branch }}
|
|
|
|
- name: "Setup Python"
|
|
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # actions/setup-python@v6
|
|
with:
|
|
python-version: "${{ inputs.python_version }}"
|
|
|
|
- name: "Install hatch"
|
|
run: python -m pip install --user --upgrade pip hatch
|
|
|
|
- name: "Setup Dev Environment"
|
|
run: |
|
|
cd core
|
|
hatch run setup
|
|
|
|
- name: "Set up postgres (linux)"
|
|
if: inputs.os == '${{ vars.UBUNTU_LATEST }}'
|
|
run: |
|
|
cd core
|
|
hatch run setup-db
|
|
|
|
# mac and windows don't use make due to limitations with docker with those runners in GitHub
|
|
- name: Set up postgres (macos)
|
|
if: runner.os == 'macOS'
|
|
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # nick-fields/retry@v3
|
|
with:
|
|
timeout_minutes: 10
|
|
max_attempts: 3
|
|
command: ./test/setup_db.sh
|
|
|
|
- name: "Set up postgres (windows)"
|
|
if: inputs.os == 'windows-latest'
|
|
uses: ./.github/actions/setup-postgres-windows
|
|
|
|
- name: "Test Command"
|
|
id: command
|
|
run: |
|
|
test_command="python -m pytest ${{ inputs.test_path }}"
|
|
echo "test_command=$test_command" >> $GITHUB_OUTPUT
|
|
|
|
- name: "Run test ${{ inputs.num_runs_per_batch }} times"
|
|
id: pytest
|
|
run: |
|
|
set +e
|
|
for ((i=1; i<=${{ inputs.num_runs_per_batch }}; i++))
|
|
do
|
|
echo "Running pytest iteration $i..."
|
|
python -m pytest --ddtrace ${{ inputs.test_path }}
|
|
exit_code=$?
|
|
|
|
if [[ $exit_code -eq 0 ]]; then
|
|
success=$((success + 1))
|
|
echo "Iteration $i: Success"
|
|
else
|
|
failure=$((failure + 1))
|
|
echo "Iteration $i: Failure"
|
|
fi
|
|
|
|
echo
|
|
echo "==========================="
|
|
echo "Successful runs: $success"
|
|
echo "Failed runs: $failure"
|
|
echo "==========================="
|
|
echo
|
|
done
|
|
|
|
echo "failure=$failure" >> $GITHUB_OUTPUT
|
|
|
|
- name: "Success and Failure Summary: ${{ inputs.os }}/Python ${{ inputs.python_version }}"
|
|
run: |
|
|
echo "Batch: ${{ matrix.batch }}"
|
|
echo "Successful runs: ${{ steps.pytest.outputs.success }}"
|
|
echo "Failed runs: ${{ steps.pytest.outputs.failure }}"
|
|
|
|
- name: "Error for Failures"
|
|
if: ${{ steps.pytest.outputs.failure }}
|
|
run: |
|
|
echo "Batch ${{ matrix.batch }} failed ${{ steps.pytest.outputs.failure }} of ${{ inputs.num_runs_per_batch }} tests"
|
|
exit 1
|