[Docs, Python] Expand Cancellation + Conditional Workflow Docs, Fix cancellation in Python (#1471)

* feat: expand conditional docs

* feat: initial cancellation work + fixing some broken links

* feat: docs on cancellation

* python: fix cancellation

* python: cruft

* chore: version

* feat: python example

* fix: TS cancellation examples

* fix: lint

* feat: go example

* feat: half-baked ts conditional logic workflow

* feat: add ts example conditional workflow

* feat: go example

* feat: go example

* fix: cancellation test

* fix: thanks, copilot!

* Update frontend/docs/pages/home/conditional-workflows.mdx

Co-authored-by: Gabe Ruttner <gabriel.ruttner@gmail.com>

* fix: lint

* chore: lint

* fix: longer sleep

---------

Co-authored-by: Gabe Ruttner <gabriel.ruttner@gmail.com>
This commit is contained in:
Matt Kaye
2025-04-02 19:51:51 -04:00
committed by GitHub
parent 8a4810bfed
commit 77f81476bd
30 changed files with 670 additions and 62 deletions

View File

@@ -4,7 +4,6 @@ from examples.bulk_fanout.worker import ParentInput, bulk_parent_wf
from hatchet_sdk import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:
result = await bulk_parent_wf.aio_run(input=ParentInput(n=12))

View File

@@ -1,11 +1,29 @@
import asyncio
import pytest
from examples.cancellation.worker import wf
from examples.cancellation.worker import cancellation_workflow
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:
with pytest.raises(Exception, match="(Task exceeded timeout|TIMED_OUT)"):
await wf.aio_run()
async def test_cancellation(hatchet: Hatchet) -> None:
ref = await cancellation_workflow.aio_run_no_wait()
"""Sleep for a long time since we only need cancellation to happen _eventually_"""
await asyncio.sleep(10)
for i in range(30):
run = await hatchet.runs.aio_get(ref.workflow_run_id)
if run.run.status == V1TaskStatus.RUNNING:
await asyncio.sleep(1)
continue
assert run.run.status == V1TaskStatus.CANCELLED
assert not run.run.output
break
else:
assert False, "Workflow run did not cancel in time"

View File

@@ -0,0 +1,9 @@
import time
from examples.cancellation.worker import cancellation_workflow, hatchet
id = cancellation_workflow.run_no_wait()
time.sleep(5)
hatchet.runs.cancel(id.workflow_run_id)

View File

@@ -1,27 +1,49 @@
import asyncio
from datetime import timedelta
import time
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
wf = hatchet.workflow(name="CancelWorkflow")
cancellation_workflow = hatchet.workflow(name="CancelWorkflow")
@wf.task(execution_timeout=timedelta(seconds=10), retries=1)
async def step1(input: EmptyModel, ctx: Context) -> None:
i = 0
while not ctx.exit_flag and i < 40:
print(f"Waiting for cancellation {i}")
await asyncio.sleep(1)
i += 1
# ❓ Self-cancelling task
@cancellation_workflow.task()
async def self_cancel(input: EmptyModel, ctx: Context) -> dict[str, str]:
await asyncio.sleep(2)
if ctx.exit_flag:
print("Cancelled")
## Cancel the task
await ctx.aio_cancel()
await asyncio.sleep(10)
return {"error": "Task should have been cancelled"}
# !!
# ❓ Checking exit flag
@cancellation_workflow.task()
def check_flag(input: EmptyModel, ctx: Context) -> dict[str, str]:
for i in range(3):
time.sleep(1)
# Note: Checking the status of the exit flag is mostly useful for cancelling
# sync tasks without needing to forcibly kill the thread they're running on.
if ctx.exit_flag:
print("Task has been cancelled")
raise ValueError("Task has been cancelled")
return {"error": "Task should have been cancelled"}
# !!
def main() -> None:
worker = hatchet.worker("cancellation-worker", slots=4, workflows=[wf])
worker = hatchet.worker("cancellation-worker", workflows=[cancellation_workflow])
worker.start()

View File

@@ -5,7 +5,6 @@ from hatchet_sdk import Hatchet
from hatchet_sdk.workflow_run import WorkflowRunRef
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
@pytest.mark.skip(reason="The timing for this test is not reliable")
async def test_run(hatchet: Hatchet) -> None:

View File

@@ -7,7 +7,6 @@ from hatchet_sdk import Hatchet
from hatchet_sdk.workflow_run import WorkflowRunRef
# requires scope module or higher for shared event loop
@pytest.mark.skip(reason="The timing for this test is not reliable")
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:

View File

@@ -4,7 +4,6 @@ from examples.dag.worker import dag_workflow
from hatchet_sdk import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:
result = await dag_workflow.aio_run()

View File

@@ -6,7 +6,6 @@
# worker = fixture_bg_worker(["poetry", "run", "manual_trigger"])
# # requires scope module or higher for shared event loop
# @pytest.mark.asyncio()
# # @pytest.mark.asyncio()
# async def test_run(hatchet: Hatchet):
# # TODO

View File

@@ -4,7 +4,6 @@ from hatchet_sdk.clients.events import BulkPushEventOptions, BulkPushEventWithMe
from hatchet_sdk.hatchet import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_event_push(hatchet: Hatchet) -> None:
e = hatchet.event.push("user:create", {"test": "test"})

View File

@@ -4,7 +4,6 @@ from examples.fanout.worker import ParentInput, parent_wf
from hatchet_sdk import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:
result = await parent_wf.aio_run(ParentInput(n=2))

View File

@@ -4,7 +4,6 @@ from examples.logger.workflow import logging_workflow
from hatchet_sdk import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:
result = await logging_workflow.aio_run()

View File

@@ -7,7 +7,6 @@ from hatchet_sdk import Hatchet, Worker
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_run_timeout(aiohatchet: Hatchet, worker: Worker) -> None:
run = on_failure_wf.run_no_wait()

View File

@@ -7,7 +7,6 @@ from examples.rate_limit.worker import rate_limit_workflow
from hatchet_sdk import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.skip(reason="The timing for this test is not reliable")
@pytest.mark.asyncio()
async def test_run(hatchet: Hatchet) -> None:

View File

@@ -4,7 +4,6 @@ from examples.timeout.worker import refresh_timeout_wf, timeout_wf
from hatchet_sdk import Hatchet
# requires scope module or higher for shared event loop
@pytest.mark.asyncio()
async def test_execution_timeout(hatchet: Hatchet) -> None:
run = timeout_wf.run_no_wait()

View File

@@ -1,6 +1,6 @@
from examples.affinity_workers.worker import affinity_worker_workflow
from examples.bulk_fanout.worker import bulk_child_wf, bulk_parent_wf
from examples.cancellation.worker import wf
from examples.cancellation.worker import cancellation_workflow
from examples.concurrency_limit.worker import concurrency_limit_workflow
from examples.concurrency_limit_rr.worker import concurrency_limit_rr_workflow
from examples.dag.worker import dag_workflow
@@ -40,7 +40,7 @@ def main() -> None:
timeout_wf,
refresh_timeout_wf,
task_condition_workflow,
wf,
cancellation_workflow,
sync_fanout_parent,
sync_fanout_child,
non_retryable_workflow,