mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-01-06 00:40:10 -06:00
* fix: register durable steps and workflows separately * chore: initial copy of pooled listener * feat: initial generic impl * feat: use pooled listener for wf run listener * refactor: move listeners to subdir * feat: refactor durable event listener * fix: bug * feat: share single pooled workflow listener and event listener everywhere * cruft: rm hatchet fixture * fix: rebase issue * feat: remove asyncio api client in favor of sync one * chore: minor version * proposal: crazy hack idea to make the workflow run listener work * fix: sleeps and error handling * Revert "cruft: rm hatchet fixture" This reverts commit b75f625e6ccec095e8c4e294d6727db166796411. * fix: set timeout * fix: rm pytest-timeout * fix: rm retry * fix: use v1 by default * fix: try removing retry state * fix: try using async client? * fix: try running sequentially * debug: loop * debug: maybe it's this? * fix: lint * fix: re-remove unused fixtures * fix: lazily create clients in admin client * fix: default * fix: lazily initialize dispatcher client * fix: hint * fix: no. way. * feat: add back retries in ci * fix: clients + imports * fix: loop scope * debug: try running skipped tests in ci again * Revert "debug: try running skipped tests in ci again" This reverts commit 8d9e18150e5207ee6051d8df8a6fe2a7504c722e. * fix: rm duped code * refactor: rename everything as `to_proto` * refactor: removals of `namespace` being passed around * fix: task output stupidity * feat: add deprecation warning * fix: remove more unused code * feat: mix sync and async in dag example * fix: autouse * fix: more input types * feat: remove ability to pass in loop * fix: overload key gen
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
import asyncio
|
|
|
|
import pytest
|
|
|
|
from examples.on_failure.worker import on_failure_wf
|
|
from hatchet_sdk import Hatchet
|
|
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
|
|
|
|
|
|
@pytest.mark.asyncio(loop_scope="session")
|
|
async def test_run_timeout(hatchet: Hatchet) -> None:
|
|
run = on_failure_wf.run_no_wait()
|
|
try:
|
|
await run.aio_result()
|
|
|
|
assert False, "Expected workflow to timeout"
|
|
except Exception as e:
|
|
assert "step1 failed" in str(e)
|
|
|
|
await asyncio.sleep(5) # Wait for the on_failure job to finish
|
|
|
|
details = await hatchet.runs.aio_get(run.workflow_run_id)
|
|
|
|
assert len(details.tasks) == 2
|
|
assert sum(t.status == V1TaskStatus.COMPLETED for t in details.tasks) == 1
|
|
assert sum(t.status == V1TaskStatus.FAILED for t in details.tasks) == 1
|
|
|
|
completed_task = next(
|
|
t for t in details.tasks if t.status == V1TaskStatus.COMPLETED
|
|
)
|
|
failed_task = next(t for t in details.tasks if t.status == V1TaskStatus.FAILED)
|
|
|
|
assert "on_failure" in completed_task.display_name
|
|
assert "step1" in failed_task.display_name
|