mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2025-12-30 13:19:44 -06:00
* fix: contextvars explicit copy * feat: fix a ton of ruff errors * fix: couple more ruff rules * fix: ignore unhelpful rule * fix: exception group in newer Python versions for improved handling * fix: workflow docs * feat: context docs * feat: simple task counter * feat: config for setting max tasks * feat: graceful exit once worker exceeds max tasks * fix: optional * fix: docs * fix: events docs + gen * chore: gen * fix: one more dangling task * feat: add xdist in ci * fix: CI * fix: xdist fails me once again * fix: fix + extend some tests * fix: test cleanup * fix: exception group * fix: ugh * feat: changelog * Add Ruff linter callout to post * refactor: clean up runner error handling * feat: improved errors * fix: lint * feat: hacky serde impl * fix: improve serde + formatting * fix: logging * fix: lint * fix: unexpected errors * fix: naming, ruff * fix: rm cruft * Fix: Attempt to fix namespacing issue in event waits (#1885) * feat: add xdist in ci * fix: attempt to fix namespacing issue in event waits * fix: namespaced worker names * fix: applied namespace to the wrong thing * fix: rm hack * drive by: namespacing improvement * fix: delay * fix: changelog * fix: initial log work * fix: more logging work * fix: rm print cruft * feat: use a queue to send logs * fix: sentinel value to stop the loop * fix: use the log sender everywhere * fix: make streaming blocking, remove more thread pools * feat: changelog * fix: linting issues * fix: broken test * chore: bunch more generated stuff * fix: changelog * fix: one more * fix: mypy * chore: gen * Feat: Streaming Improvements (#1886) * Fix: Filter list improvements (#1899) * fix: uuid validation * fix: improve filter filtering * fix: inner join * fix: bug in workflow cached prop * chore: bump * fix: lint * chore: changelog * fix: separate filter queries * feat: improve filter filtering * fix: queries and the like * feat: add xdist in ci * feat: streaming test + gen * feat: add index to stream event * fix: rm langfuse dep * fix: lf * chore: gen * feat: impl index for stream on context * feat: tweak protos * feat: extend test * feat: send event index through queue * feat: first pass + debug logging * debug: fixes * debug: more possible issues * feat: generate new stream event protos * feat: first pass at using an alternate exchange for replaying incoming stream events * fix: exchange create timing * fix: rm unused protos * chore: gen * feat: python cleanup * fix: revert rabbit changes * fix: unwind a bunch of cruft * fix: optional index * chore: gen python * fix: event index nil handling * feat: improve test * fix: stream impl in sdk * fix: make test faster * chore: gen a ton more stuff * fix: test * fix: sorting helper * fix: bug * fix: one more ordering bug * feat: add some tests for buffering logic * feat: hangup test * feat: test no buffering if no index sent * fix: regular mutex * fix: pr feedback * fix: conflicts
72 lines
2.1 KiB
Python
72 lines
2.1 KiB
Python
import asyncio
|
|
|
|
import pytest
|
|
|
|
from examples.non_retryable.worker import (
|
|
non_retryable_workflow,
|
|
should_not_retry,
|
|
should_not_retry_successful_task,
|
|
should_retry_wrong_exception_type,
|
|
)
|
|
from hatchet_sdk import Hatchet
|
|
from hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType
|
|
from hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails
|
|
from hatchet_sdk.exceptions import FailedTaskRunExceptionGroup
|
|
|
|
|
|
def find_id(runs: V1WorkflowRunDetails, match: str) -> str:
|
|
return next(t.metadata.id for t in runs.tasks if match in t.display_name)
|
|
|
|
|
|
@pytest.mark.asyncio(loop_scope="session")
|
|
async def test_no_retry(hatchet: Hatchet) -> None:
|
|
ref = await non_retryable_workflow.aio_run_no_wait()
|
|
|
|
with pytest.raises(FailedTaskRunExceptionGroup) as exc_info:
|
|
await ref.aio_result()
|
|
|
|
exception_group = exc_info.value
|
|
|
|
assert len(exception_group.exceptions) == 2
|
|
|
|
exc_text = [e.exc for e in exception_group.exceptions]
|
|
|
|
non_retries = [
|
|
e
|
|
for e in exc_text
|
|
if "This task should retry because it's not a NonRetryableException" in e
|
|
]
|
|
|
|
other_errors = [e for e in exc_text if "This task should not retry" in e]
|
|
|
|
assert len(non_retries) == 1
|
|
assert len(other_errors) == 1
|
|
|
|
await asyncio.sleep(3)
|
|
|
|
runs = await hatchet.runs.aio_get(ref.workflow_run_id)
|
|
task_to_id = {
|
|
task: find_id(runs, task.name)
|
|
for task in [
|
|
should_not_retry_successful_task,
|
|
should_retry_wrong_exception_type,
|
|
should_not_retry,
|
|
]
|
|
}
|
|
|
|
retrying_events = [
|
|
e for e in runs.task_events if e.event_type == V1TaskEventType.RETRYING
|
|
]
|
|
|
|
"""Only one task should be retried."""
|
|
assert len(retrying_events) == 1
|
|
|
|
"""The task id of the retrying events should match the tasks that are retried"""
|
|
assert retrying_events[0].task_id == task_to_id[should_retry_wrong_exception_type]
|
|
|
|
"""Three failed events should emit, one each for the two failing initial runs and one for the retry."""
|
|
assert (
|
|
len([e for e in runs.task_events if e.event_type == V1TaskEventType.FAILED])
|
|
== 3
|
|
)
|