diff --git a/examples/go/streaming/consumer/main.go b/examples/go/streaming/consumer/main.go index 0fb815d63..1fcb2887f 100644 --- a/examples/go/streaming/consumer/main.go +++ b/examples/go/streaming/consumer/main.go @@ -9,6 +9,7 @@ import ( v1 "github.com/hatchet-dev/hatchet/pkg/v1" ) +// > Consume func main() { hatchet, err := v1.NewHatchetClient() if err != nil { diff --git a/examples/go/streaming/server/main.go b/examples/go/streaming/server/main.go index 083ac70f2..d82959c5a 100644 --- a/examples/go/streaming/server/main.go +++ b/examples/go/streaming/server/main.go @@ -11,6 +11,7 @@ import ( v1 "github.com/hatchet-dev/hatchet/pkg/v1" ) +// > Server func main() { hatchet, err := v1.NewHatchetClient() if err != nil { diff --git a/examples/go/streaming/shared/task.go b/examples/go/streaming/shared/task.go index 56e37b994..b080638d2 100644 --- a/examples/go/streaming/shared/task.go +++ b/examples/go/streaming/shared/task.go @@ -16,6 +16,7 @@ type StreamTaskOutput struct { Message string `json:"message"` } +// > Streaming const annaKarenina = ` Happy families are all alike; every unhappy family is unhappy in its own way. diff --git a/examples/python/bulk_operations/test_bulk_replay.py b/examples/python/bulk_operations/test_bulk_replay.py index 62afc32bc..834758ae3 100644 --- a/examples/python/bulk_operations/test_bulk_replay.py +++ b/examples/python/bulk_operations/test_bulk_replay.py @@ -77,7 +77,7 @@ async def test_bulk_replay(hatchet: Hatchet) -> None: ) ) - await asyncio.sleep(5) + await asyncio.sleep(10) runs = await hatchet.runs.aio_list( workflow_ids=workflow_ids, diff --git a/examples/python/concurrency_limit/test_concurrency_limit.py b/examples/python/concurrency_limit/test_concurrency_limit.py deleted file mode 100644 index 6e820fad3..000000000 --- a/examples/python/concurrency_limit/test_concurrency_limit.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from examples.concurrency_limit.worker import WorkflowInput, concurrency_limit_workflow -from hatchet_sdk.workflow_run import WorkflowRunRef - - -@pytest.mark.asyncio(loop_scope="session") -@pytest.mark.skip(reason="The timing for this test is not reliable") -async def test_run() -> None: - num_runs = 6 - runs: list[WorkflowRunRef] = [] - - # Start all runs - for i in range(1, num_runs + 1): - run = concurrency_limit_workflow.run_no_wait( - WorkflowInput(run=i, group_key=str(i)) - ) - runs.append(run) - - # Wait for all results - successful_runs = [] - cancelled_runs = [] - - # Process each run individually - for i, run in enumerate(runs, start=1): - try: - result = await run.aio_result() - successful_runs.append((i, result)) - except Exception as e: - if "CANCELLED_BY_CONCURRENCY_LIMIT" in str(e): - cancelled_runs.append((i, str(e))) - else: - raise # Re-raise if it's an unexpected error - - # Check that we have the correct number of successful and cancelled runs - assert ( - len(successful_runs) == 5 - ), f"Expected 5 successful runs, got {len(successful_runs)}" - assert ( - len(cancelled_runs) == 1 - ), f"Expected 1 cancelled run, got {len(cancelled_runs)}" diff --git a/examples/python/fanout/test_fanout.py b/examples/python/fanout/test_fanout.py index c12d8a7a3..9831c0a52 100644 --- a/examples/python/fanout/test_fanout.py +++ b/examples/python/fanout/test_fanout.py @@ -1,10 +1,50 @@ +import asyncio +from uuid import uuid4 + import pytest from examples.fanout.worker import ParentInput, parent_wf +from hatchet_sdk import Hatchet, TriggerWorkflowOptions @pytest.mark.asyncio(loop_scope="session") -async def test_run() -> None: - result = await parent_wf.aio_run(ParentInput(n=2)) +async def test_run(hatchet: Hatchet) -> None: + ref = await parent_wf.aio_run_no_wait( + ParentInput(n=2), + ) + + result = await ref.aio_result() assert len(result["spawn"]["results"]) == 2 + + +@pytest.mark.asyncio(loop_scope="session") +async def test_additional_metadata_propagation(hatchet: Hatchet) -> None: + test_run_id = uuid4().hex + + ref = await parent_wf.aio_run_no_wait( + ParentInput(n=2), + options=TriggerWorkflowOptions( + additional_metadata={"test_run_id": test_run_id} + ), + ) + + await ref.aio_result() + await asyncio.sleep(1) + + runs = await hatchet.runs.aio_list( + parent_task_external_id=ref.workflow_run_id, + additional_metadata={"test_run_id": test_run_id}, + ) + + assert runs.rows + + """Assert that the additional metadata is propagated to the child runs.""" + for run in runs.rows: + assert run.additional_metadata + assert run.additional_metadata["test_run_id"] == test_run_id + + assert run.children + for child in run.children: + assert child.additional_metadata + assert child.additional_metadata["test_run_id"] == test_run_id diff --git a/examples/python/fanout/worker.py b/examples/python/fanout/worker.py index 68c4f0e46..197f85ee9 100644 --- a/examples/python/fanout/worker.py +++ b/examples/python/fanout/worker.py @@ -34,7 +34,7 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]: ), ) for i in range(input.n) - ] + ], ) print(f"results {result}") @@ -46,13 +46,13 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]: # > FanoutChild @child_wf.task() -def process(input: ChildInput, ctx: Context) -> dict[str, str]: +async def process(input: ChildInput, ctx: Context) -> dict[str, str]: print(f"child process {input.a}") return {"status": input.a} @child_wf.task(parents=[process]) -def process2(input: ChildInput, ctx: Context) -> dict[str, str]: +async def process2(input: ChildInput, ctx: Context) -> dict[str, str]: process_output = ctx.task_output(process) a = process_output["status"] diff --git a/examples/python/fanout_sync/test_fanout_sync.py b/examples/python/fanout_sync/test_fanout_sync.py index d6dd76c80..2171cba38 100644 --- a/examples/python/fanout_sync/test_fanout_sync.py +++ b/examples/python/fanout_sync/test_fanout_sync.py @@ -1,4 +1,10 @@ +import asyncio +from uuid import uuid4 + +import pytest + from examples.fanout_sync.worker import ParentInput, sync_fanout_parent +from hatchet_sdk import Hatchet, TriggerWorkflowOptions def test_run() -> None: @@ -7,3 +13,37 @@ def test_run() -> None: result = sync_fanout_parent.run(ParentInput(n=N)) assert len(result["spawn"]["results"]) == N + + +@pytest.mark.asyncio(loop_scope="session") +async def test_additional_metadata_propagation_sync(hatchet: Hatchet) -> None: + test_run_id = uuid4().hex + + ref = await sync_fanout_parent.aio_run_no_wait( + ParentInput(n=2), + options=TriggerWorkflowOptions( + additional_metadata={"test_run_id": test_run_id} + ), + ) + + await ref.aio_result() + await asyncio.sleep(1) + + runs = await hatchet.runs.aio_list( + parent_task_external_id=ref.workflow_run_id, + additional_metadata={"test_run_id": test_run_id}, + ) + + print(runs.model_dump_json(indent=2)) + + assert runs.rows + + """Assert that the additional metadata is propagated to the child runs.""" + for run in runs.rows: + assert run.additional_metadata + assert run.additional_metadata["test_run_id"] == test_run_id + + assert run.children + for child in run.children: + assert child.additional_metadata + assert child.additional_metadata["test_run_id"] == test_run_id diff --git a/examples/python/fanout_sync/worker.py b/examples/python/fanout_sync/worker.py index 095403eae..5e7d68b12 100644 --- a/examples/python/fanout_sync/worker.py +++ b/examples/python/fanout_sync/worker.py @@ -47,6 +47,14 @@ def process(input: ChildInput, ctx: Context) -> dict[str, str]: return {"status": "success " + input.a} +@sync_fanout_child.task(parents=[process]) +def process2(input: ChildInput, ctx: Context) -> dict[str, str]: + process_output = ctx.task_output(process) + a = process_output["status"] + + return {"status2": a + "2"} + + def main() -> None: worker = hatchet.worker( "sync-fanout-worker", diff --git a/examples/python/quickstart/poetry.lock b/examples/python/quickstart/poetry.lock index 50eea70a2..0bb6a6dde 100644 --- a/examples/python/quickstart/poetry.lock +++ b/examples/python/quickstart/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -114,7 +114,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] [[package]] name = "aiohttp-retry" @@ -199,12 +199,12 @@ files = [ ] [package.extras] -benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "cel-python" @@ -460,14 +460,14 @@ setuptools = "*" [[package]] name = "hatchet-sdk" -version = "1.15.3" +version = "1.0.0a1" description = "" optional = false python-versions = "<4.0,>=3.10" groups = ["main"] files = [ - {file = "hatchet_sdk-1.15.3-py3-none-any.whl", hash = "sha256:319f56642e8ed06cad56a323e34b2c638c814dd51bb839f9b895033fd79877fd"}, - {file = "hatchet_sdk-1.15.3.tar.gz", hash = "sha256:827b1894d34e0d35b6ef7a856cbe7a07bc5b719449103dbfc9e8a56ca340d628"}, + {file = "hatchet_sdk-1.0.0a1-py3-none-any.whl", hash = "sha256:bfc84358c8842cecd0d95b30645109733b7292dff0db1a776ca862785ee93d7f"}, + {file = "hatchet_sdk-1.0.0a1.tar.gz", hash = "sha256:f0272bbaac6faed75ff727826e9f7b1ac42ae597f9b590e14d392aada9c9692f"}, ] [package.dependencies] @@ -483,11 +483,13 @@ grpcio-tools = [ {version = ">=1.64.1,<1.68.dev0 || >=1.69.dev0", markers = "python_version < \"3.13\""}, {version = ">=1.69.0", markers = "python_version >= \"3.13\""}, ] +nest-asyncio = ">=1.6.0,<2.0.0" prometheus-client = ">=0.21.1,<0.22.0" -protobuf = ">=5.29.5,<6.0.0" +protobuf = ">=5.29.1,<6.0.0" pydantic = ">=2.6.3,<3.0.0" pydantic-settings = ">=2.7.1,<3.0.0" python-dateutil = ">=2.9.0.post0,<3.0.0" +pyyaml = ">=6.0.1,<7.0.0" tenacity = ">=8.4.1" urllib3 = ">=1.26.20" @@ -643,6 +645,18 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + [[package]] name = "prometheus-client" version = "0.21.1" @@ -768,23 +782,23 @@ files = [ [[package]] name = "protobuf" -version = "5.29.5" +version = "5.29.4" description = "" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079"}, - {file = "protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc"}, - {file = "protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671"}, - {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015"}, - {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61"}, - {file = "protobuf-5.29.5-cp38-cp38-win32.whl", hash = "sha256:ef91363ad4faba7b25d844ef1ada59ff1604184c0bcd8b39b8a6bef15e1af238"}, - {file = "protobuf-5.29.5-cp38-cp38-win_amd64.whl", hash = "sha256:7318608d56b6402d2ea7704ff1e1e4597bee46d760e7e4dd42a3d45e24b87f2e"}, - {file = "protobuf-5.29.5-cp39-cp39-win32.whl", hash = "sha256:6f642dc9a61782fa72b90878af134c5afe1917c89a568cd3476d758d3c3a0736"}, - {file = "protobuf-5.29.5-cp39-cp39-win_amd64.whl", hash = "sha256:470f3af547ef17847a28e1f47200a1cbf0ba3ff57b7de50d22776607cd2ea353"}, - {file = "protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5"}, - {file = "protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84"}, + {file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"}, + {file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"}, + {file = "protobuf-5.29.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:307ecba1d852ec237e9ba668e087326a67564ef83e45a0189a772ede9e854dd0"}, + {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:aec4962f9ea93c431d5714ed1be1c93f13e1a8618e70035ba2b0564d9e633f2e"}, + {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:d7d3f7d1d5a66ed4942d4fefb12ac4b14a29028b209d4bfb25c68ae172059922"}, + {file = "protobuf-5.29.4-cp38-cp38-win32.whl", hash = "sha256:1832f0515b62d12d8e6ffc078d7e9eb06969aa6dc13c13e1036e39d73bebc2de"}, + {file = "protobuf-5.29.4-cp38-cp38-win_amd64.whl", hash = "sha256:476cb7b14914c780605a8cf62e38c2a85f8caff2e28a6a0bad827ec7d6c85d68"}, + {file = "protobuf-5.29.4-cp39-cp39-win32.whl", hash = "sha256:fd32223020cb25a2cc100366f1dedc904e2d71d9322403224cdde5fdced0dabe"}, + {file = "protobuf-5.29.4-cp39-cp39-win_amd64.whl", hash = "sha256:678974e1e3a9b975b8bc2447fca458db5f93a2fb6b0c8db46b6675b5b5346812"}, + {file = "protobuf-5.29.4-py3-none-any.whl", hash = "sha256:3fde11b505e1597f71b875ef2fc52062b6a9740e5f7c8997ce878b6009145862"}, + {file = "protobuf-5.29.4.tar.gz", hash = "sha256:4f1dfcd7997b31ef8f53ec82781ff434a28bf71d9102ddde14d076adcfc78c99"}, ] [[package]] @@ -806,7 +820,7 @@ typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] +timezone = ["tzdata"] [[package]] name = "pydantic-core" @@ -1048,13 +1062,13 @@ files = [ ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] -core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] +core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" @@ -1133,7 +1147,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -1238,4 +1252,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "ef28d3c03e5aa738c01e4004c6ee5037701fac8f034f2e09648af31c6d22b13d" +content-hash = "74c12e499aa797ca5c8559af579f1212b0e4e3a77f068f9385db39d70ba304e0" diff --git a/examples/python/rate_limit/test_rate_limit.py b/examples/python/rate_limit/test_rate_limit.py deleted file mode 100644 index 18e7b0454..000000000 --- a/examples/python/rate_limit/test_rate_limit.py +++ /dev/null @@ -1,27 +0,0 @@ -import asyncio -import time - -import pytest - -from examples.rate_limit.worker import rate_limit_workflow - - -@pytest.mark.skip(reason="The timing for this test is not reliable") -@pytest.mark.asyncio(loop_scope="session") -async def test_run() -> None: - - run1 = rate_limit_workflow.run_no_wait() - run2 = rate_limit_workflow.run_no_wait() - run3 = rate_limit_workflow.run_no_wait() - - start_time = time.time() - - await asyncio.gather(run1.aio_result(), run2.aio_result(), run3.aio_result()) - - end_time = time.time() - - total_time = end_time - start_time - - assert ( - 1 <= total_time <= 5 - ), f"Expected runtime to be a bit more than 1 seconds, but it took {total_time:.2f} seconds" diff --git a/examples/python/return_exceptions/test_return_exceptions.py b/examples/python/return_exceptions/test_return_exceptions.py new file mode 100644 index 000000000..2dca8d679 --- /dev/null +++ b/examples/python/return_exceptions/test_return_exceptions.py @@ -0,0 +1,40 @@ +import asyncio + +import pytest + +from examples.return_exceptions.worker import Input, return_exceptions_task + + +@pytest.mark.asyncio(loop_scope="session") +async def test_return_exceptions_async() -> None: + results = await return_exceptions_task.aio_run_many( + [ + return_exceptions_task.create_bulk_run_item(input=Input(index=i)) + for i in range(10) + ], + return_exceptions=True, + ) + + for i, result in enumerate(results): + if i % 2 == 0: + assert isinstance(result, Exception) + assert f"error in task with index {i}" in str(result) + else: + assert result == {"message": "this is a successful task."} + + +def test_return_exceptions_sync() -> None: + results = return_exceptions_task.run_many( + [ + return_exceptions_task.create_bulk_run_item(input=Input(index=i)) + for i in range(10) + ], + return_exceptions=True, + ) + + for i, result in enumerate(results): + if i % 2 == 0: + assert isinstance(result, Exception) + assert f"error in task with index {i}" in str(result) + else: + assert result == {"message": "this is a successful task."} diff --git a/examples/python/return_exceptions/worker.py b/examples/python/return_exceptions/worker.py new file mode 100644 index 000000000..10f5db8fa --- /dev/null +++ b/examples/python/return_exceptions/worker.py @@ -0,0 +1,17 @@ +from pydantic import BaseModel + +from hatchet_sdk import Context, EmptyModel, Hatchet + +hatchet = Hatchet() + + +class Input(EmptyModel): + index: int + + +@hatchet.task(input_validator=Input) +async def return_exceptions_task(input: Input, ctx: Context) -> dict[str, str]: + if input.index % 2 == 0: + raise ValueError(f"error in task with index {input.index}") + + return {"message": "this is a successful task."} diff --git a/examples/python/timeout/worker.py b/examples/python/timeout/worker.py index 7442a3475..c35f25541 100644 --- a/examples/python/timeout/worker.py +++ b/examples/python/timeout/worker.py @@ -15,10 +15,10 @@ timeout_wf = hatchet.workflow( # > ExecutionTimeout # 👀 Specify an execution timeout on a task @timeout_wf.task( - execution_timeout=timedelta(seconds=4), schedule_timeout=timedelta(minutes=10) + execution_timeout=timedelta(seconds=5), schedule_timeout=timedelta(minutes=10) ) def timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]: - time.sleep(5) + time.sleep(30) return {"status": "success"} @@ -29,7 +29,6 @@ refresh_timeout_wf = hatchet.workflow(name="RefreshTimeoutWorkflow") # > RefreshTimeout @refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4)) def refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]: - ctx.refresh_timeout(timedelta(seconds=10)) time.sleep(5) diff --git a/examples/python/unit_testing/test_unit.py b/examples/python/unit_testing/test_unit.py new file mode 100644 index 000000000..cebc84f17 --- /dev/null +++ b/examples/python/unit_testing/test_unit.py @@ -0,0 +1,96 @@ +import pytest + +from examples.unit_testing.workflows import ( + Lifespan, + UnitTestInput, + UnitTestOutput, + async_complex_workflow, + async_simple_workflow, + async_standalone, + durable_async_complex_workflow, + durable_async_simple_workflow, + durable_async_standalone, + durable_sync_complex_workflow, + durable_sync_simple_workflow, + durable_sync_standalone, + start, + sync_complex_workflow, + sync_simple_workflow, + sync_standalone, +) +from hatchet_sdk import Task + + +@pytest.mark.parametrize( + "func", + [ + sync_standalone, + durable_sync_standalone, + sync_simple_workflow, + durable_sync_simple_workflow, + sync_complex_workflow, + durable_sync_complex_workflow, + ], +) +def test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None: + input = UnitTestInput(key="test_key", number=42) + additional_metadata = {"meta_key": "meta_value"} + lifespan = Lifespan(mock_db_url="sqlite:///:memory:") + retry_count = 1 + + expected_output = UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=additional_metadata, + retry_count=retry_count, + mock_db_url=lifespan.mock_db_url, + ) + + assert ( + func.mock_run( + input=input, + additional_metadata=additional_metadata, + lifespan=lifespan, + retry_count=retry_count, + parent_outputs={start.name: expected_output.model_dump()}, + ) + == expected_output + ) + + +@pytest.mark.parametrize( + "func", + [ + async_standalone, + durable_async_standalone, + async_simple_workflow, + durable_async_simple_workflow, + async_complex_workflow, + durable_async_complex_workflow, + ], +) +@pytest.mark.asyncio(loop_scope="session") +async def test_simple_unit_async(func: Task[UnitTestInput, UnitTestOutput]) -> None: + input = UnitTestInput(key="test_key", number=42) + additional_metadata = {"meta_key": "meta_value"} + lifespan = Lifespan(mock_db_url="sqlite:///:memory:") + retry_count = 1 + + expected_output = UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=additional_metadata, + retry_count=retry_count, + mock_db_url=lifespan.mock_db_url, + ) + + assert ( + await func.aio_mock_run( + input=input, + additional_metadata=additional_metadata, + lifespan=lifespan, + retry_count=retry_count, + parent_outputs={start.name: expected_output.model_dump()}, + ) + == expected_output + ) diff --git a/examples/python/unit_testing/workflows.py b/examples/python/unit_testing/workflows.py new file mode 100644 index 000000000..ae42e61c9 --- /dev/null +++ b/examples/python/unit_testing/workflows.py @@ -0,0 +1,171 @@ +from typing import cast + +from pydantic import BaseModel + +from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet + + +class UnitTestInput(BaseModel): + key: str + number: int + + +class Lifespan(BaseModel): + mock_db_url: str + + +class UnitTestOutput(UnitTestInput, Lifespan): + additional_metadata: dict[str, str] + retry_count: int + + +hatchet = Hatchet() + + +@hatchet.task(input_validator=UnitTestInput) +def sync_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@hatchet.task(input_validator=UnitTestInput) +async def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@hatchet.durable_task(input_validator=UnitTestInput) +def durable_sync_standalone( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@hatchet.durable_task(input_validator=UnitTestInput) +async def durable_async_standalone( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +simple_workflow = hatchet.workflow( + name="simple-unit-test-workflow", input_validator=UnitTestInput +) + + +@simple_workflow.task() +def sync_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@simple_workflow.task() +async def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@simple_workflow.durable_task() +def durable_sync_simple_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@simple_workflow.durable_task() +async def durable_async_simple_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +complex_workflow = hatchet.workflow( + name="complex-unit-test-workflow", input_validator=UnitTestInput +) + + +@complex_workflow.task() +async def start(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@complex_workflow.task( + parents=[start], +) +def sync_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return ctx.task_output(start) + + +@complex_workflow.task( + parents=[start], +) +async def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return ctx.task_output(start) + + +@complex_workflow.durable_task( + parents=[start], +) +def durable_sync_complex_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return ctx.task_output(start) + + +@complex_workflow.durable_task( + parents=[start], +) +async def durable_async_complex_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return ctx.task_output(start) diff --git a/examples/python/worker.py b/examples/python/worker.py index ce248513f..26f226598 100644 --- a/examples/python/worker.py +++ b/examples/python/worker.py @@ -23,6 +23,7 @@ from examples.lifespans.simple import lifespan, lifespan_task from examples.logger.workflow import logging_workflow from examples.non_retryable.worker import non_retryable_workflow from examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details +from examples.return_exceptions.worker import return_exceptions_task from examples.simple.worker import simple, simple_durable from examples.timeout.worker import refresh_timeout_wf, timeout_wf from hatchet_sdk import Hatchet @@ -65,6 +66,7 @@ def main() -> None: bulk_replay_test_1, bulk_replay_test_2, bulk_replay_test_3, + return_exceptions_task, ], lifespan=lifespan, ) diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/bulk_operations/test_bulk_replay.ts b/frontend/app/src/next/lib/docs/generated/snips/python/bulk_operations/test_bulk_replay.ts index e8f7f4dc0..fd6d67602 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/bulk_operations/test_bulk_replay.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/bulk_operations/test_bulk_replay.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom hatchet_sdk import BulkCancelReplayOpts, Hatchet, RunFilter, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_bulk_replay(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n n = 100\n\n with pytest.raises(Exception):\n await bulk_replay_test_1.aio_run_many(\n [\n bulk_replay_test_1.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n "test_run_id": test_run_id,\n }\n )\n )\n for _ in range(n + 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_2.aio_run_many(\n [\n bulk_replay_test_2.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n "test_run_id": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_3.aio_run_many(\n [\n bulk_replay_test_3.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n "test_run_id": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 2)\n ]\n )\n\n workflow_ids = [\n bulk_replay_test_1.id,\n bulk_replay_test_2.id,\n bulk_replay_test_3.id,\n ]\n\n ## Should result in two batches of replays\n await hatchet.runs.aio_bulk_replay(\n opts=BulkCancelReplayOpts(\n filters=RunFilter(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={"test_run_id": test_run_id},\n )\n )\n )\n\n await asyncio.sleep(5)\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={"test_run_id": test_run_id},\n limit=1000,\n )\n\n assert len(runs.rows) == n + 1 + (n // 2 - 1) + (n // 2 - 2)\n\n for run in runs.rows:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.retry_count == 1\n assert run.attempt == 2\n\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_1.id]) == n + 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_2.id])\n == n // 2 - 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_3.id])\n == n // 2 - 2\n )\n', + 'import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom hatchet_sdk import BulkCancelReplayOpts, Hatchet, RunFilter, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_bulk_replay(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n n = 100\n\n with pytest.raises(Exception):\n await bulk_replay_test_1.aio_run_many(\n [\n bulk_replay_test_1.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n "test_run_id": test_run_id,\n }\n )\n )\n for _ in range(n + 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_2.aio_run_many(\n [\n bulk_replay_test_2.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n "test_run_id": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_3.aio_run_many(\n [\n bulk_replay_test_3.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n "test_run_id": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 2)\n ]\n )\n\n workflow_ids = [\n bulk_replay_test_1.id,\n bulk_replay_test_2.id,\n bulk_replay_test_3.id,\n ]\n\n ## Should result in two batches of replays\n await hatchet.runs.aio_bulk_replay(\n opts=BulkCancelReplayOpts(\n filters=RunFilter(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={"test_run_id": test_run_id},\n )\n )\n )\n\n await asyncio.sleep(10)\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={"test_run_id": test_run_id},\n limit=1000,\n )\n\n assert len(runs.rows) == n + 1 + (n // 2 - 1) + (n // 2 - 2)\n\n for run in runs.rows:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.retry_count == 1\n assert run.attempt == 2\n\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_1.id]) == n + 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_2.id])\n == n // 2 - 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_3.id])\n == n // 2 - 2\n )\n', source: 'out/python/bulk_operations/test_bulk_replay.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/index.ts b/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/index.ts index 7d8e0bc91..245a4ee68 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/index.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/index.ts @@ -1,7 +1,5 @@ -import test_concurrency_limit from './test_concurrency_limit'; import trigger from './trigger'; import worker from './worker'; -export { test_concurrency_limit }; export { trigger }; export { worker }; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/test_concurrency_limit.ts b/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/test_concurrency_limit.ts deleted file mode 100644 index 0314912fa..000000000 --- a/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit/test_concurrency_limit.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { Snippet } from '@/next/lib/docs/generated/snips/types'; - -const snippet: Snippet = { - language: 'python', - content: - 'import pytest\n\nfrom examples.concurrency_limit.worker import WorkflowInput, concurrency_limit_workflow\nfrom hatchet_sdk.workflow_run import WorkflowRunRef\n\n\n@pytest.mark.asyncio(loop_scope="session")\n@pytest.mark.skip(reason="The timing for this test is not reliable")\nasync def test_run() -> None:\n num_runs = 6\n runs: list[WorkflowRunRef] = []\n\n # Start all runs\n for i in range(1, num_runs + 1):\n run = concurrency_limit_workflow.run_no_wait(\n WorkflowInput(run=i, group_key=str(i))\n )\n runs.append(run)\n\n # Wait for all results\n successful_runs = []\n cancelled_runs = []\n\n # Process each run individually\n for i, run in enumerate(runs, start=1):\n try:\n result = await run.aio_result()\n successful_runs.append((i, result))\n except Exception as e:\n if "CANCELLED_BY_CONCURRENCY_LIMIT" in str(e):\n cancelled_runs.append((i, str(e)))\n else:\n raise # Re-raise if it\'s an unexpected error\n\n # Check that we have the correct number of successful and cancelled runs\n assert (\n len(successful_runs) == 5\n ), f"Expected 5 successful runs, got {len(successful_runs)}"\n assert (\n len(cancelled_runs) == 1\n ), f"Expected 1 cancelled run, got {len(cancelled_runs)}"\n', - source: 'out/python/concurrency_limit/test_concurrency_limit.py', - blocks: {}, - highlights: {}, -}; - -export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/fanout/test_fanout.ts b/frontend/app/src/next/lib/docs/generated/snips/python/fanout/test_fanout.ts index ff2175aef..a6afc8da4 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/fanout/test_fanout.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/fanout/test_fanout.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import pytest\n\nfrom examples.fanout.worker import ParentInput, parent_wf\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_run() -> None:\n result = await parent_wf.aio_run(ParentInput(n=2))\n\n assert len(result["spawn"]["results"]) == 2\n', + 'import asyncio\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.fanout.worker import ParentInput, parent_wf\nfrom hatchet_sdk import Hatchet, TriggerWorkflowOptions\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_run(hatchet: Hatchet) -> None:\n ref = await parent_wf.aio_run_no_wait(\n ParentInput(n=2),\n )\n\n result = await ref.aio_result()\n\n assert len(result["spawn"]["results"]) == 2\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_additional_metadata_propagation(hatchet: Hatchet) -> None:\n test_run_id = uuid4().hex\n\n ref = await parent_wf.aio_run_no_wait(\n ParentInput(n=2),\n options=TriggerWorkflowOptions(\n additional_metadata={"test_run_id": test_run_id}\n ),\n )\n\n await ref.aio_result()\n await asyncio.sleep(1)\n\n runs = await hatchet.runs.aio_list(\n parent_task_external_id=ref.workflow_run_id,\n additional_metadata={"test_run_id": test_run_id},\n )\n\n assert runs.rows\n\n """Assert that the additional metadata is propagated to the child runs."""\n for run in runs.rows:\n assert run.additional_metadata\n assert run.additional_metadata["test_run_id"] == test_run_id\n\n assert run.children\n for child in run.children:\n assert child.additional_metadata\n assert child.additional_metadata["test_run_id"] == test_run_id\n', source: 'out/python/fanout/test_fanout.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/fanout/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/fanout/worker.ts index c3f37caa3..60fabc068 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/fanout/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/fanout/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\n# > FanoutParent\nclass ParentInput(BaseModel):\n n: int = 100\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nparent_wf = hatchet.workflow(name="FanoutParent", input_validator=ParentInput)\nchild_wf = hatchet.workflow(name="FanoutChild", input_validator=ChildInput)\n\n\n@parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:\n print("spawning child")\n\n result = await child_wf.aio_run_many(\n [\n child_wf.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n options=TriggerWorkflowOptions(\n additional_metadata={"hello": "earth"}, key=f"child{i}"\n ),\n )\n for i in range(input.n)\n ]\n )\n\n print(f"results {result}")\n\n return {"results": result}\n\n\n\n\n# > FanoutChild\n@child_wf.task()\ndef process(input: ChildInput, ctx: Context) -> dict[str, str]:\n print(f"child process {input.a}")\n return {"status": input.a}\n\n\n@child_wf.task(parents=[process])\ndef process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n process_output = ctx.task_output(process)\n a = process_output["status"]\n\n return {"status2": a + "2"}\n\n\n\nchild_wf.create_bulk_run_item()\n\n\ndef main() -> None:\n worker = hatchet.worker("fanout-worker", slots=40, workflows=[parent_wf, child_wf])\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\n# > FanoutParent\nclass ParentInput(BaseModel):\n n: int = 100\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nparent_wf = hatchet.workflow(name="FanoutParent", input_validator=ParentInput)\nchild_wf = hatchet.workflow(name="FanoutChild", input_validator=ChildInput)\n\n\n@parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:\n print("spawning child")\n\n result = await child_wf.aio_run_many(\n [\n child_wf.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n options=TriggerWorkflowOptions(\n additional_metadata={"hello": "earth"}, key=f"child{i}"\n ),\n )\n for i in range(input.n)\n ],\n )\n\n print(f"results {result}")\n\n return {"results": result}\n\n\n\n\n# > FanoutChild\n@child_wf.task()\nasync def process(input: ChildInput, ctx: Context) -> dict[str, str]:\n print(f"child process {input.a}")\n return {"status": input.a}\n\n\n@child_wf.task(parents=[process])\nasync def process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n process_output = ctx.task_output(process)\n a = process_output["status"]\n\n return {"status2": a + "2"}\n\n\n\nchild_wf.create_bulk_run_item()\n\n\ndef main() -> None:\n worker = hatchet.worker("fanout-worker", slots=40, workflows=[parent_wf, child_wf])\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/fanout/worker.py', blocks: { fanoutparent: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/test_fanout_sync.ts b/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/test_fanout_sync.ts index fd585528d..38f6cbd79 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/test_fanout_sync.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/test_fanout_sync.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from examples.fanout_sync.worker import ParentInput, sync_fanout_parent\n\n\ndef test_run() -> None:\n N = 2\n\n result = sync_fanout_parent.run(ParentInput(n=N))\n\n assert len(result["spawn"]["results"]) == N\n', + 'import asyncio\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.fanout_sync.worker import ParentInput, sync_fanout_parent\nfrom hatchet_sdk import Hatchet, TriggerWorkflowOptions\n\n\ndef test_run() -> None:\n N = 2\n\n result = sync_fanout_parent.run(ParentInput(n=N))\n\n assert len(result["spawn"]["results"]) == N\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_additional_metadata_propagation_sync(hatchet: Hatchet) -> None:\n test_run_id = uuid4().hex\n\n ref = await sync_fanout_parent.aio_run_no_wait(\n ParentInput(n=2),\n options=TriggerWorkflowOptions(\n additional_metadata={"test_run_id": test_run_id}\n ),\n )\n\n await ref.aio_result()\n await asyncio.sleep(1)\n\n runs = await hatchet.runs.aio_list(\n parent_task_external_id=ref.workflow_run_id,\n additional_metadata={"test_run_id": test_run_id},\n )\n\n print(runs.model_dump_json(indent=2))\n\n assert runs.rows\n\n """Assert that the additional metadata is propagated to the child runs."""\n for run in runs.rows:\n assert run.additional_metadata\n assert run.additional_metadata["test_run_id"] == test_run_id\n\n assert run.children\n for child in run.children:\n assert child.additional_metadata\n assert child.additional_metadata["test_run_id"] == test_run_id\n', source: 'out/python/fanout_sync/test_fanout_sync.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/worker.ts index 31bfe62c5..f6e78da69 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/fanout_sync/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\nclass ParentInput(BaseModel):\n n: int = 5\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nsync_fanout_parent = hatchet.workflow(\n name="SyncFanoutParent", input_validator=ParentInput\n)\nsync_fanout_child = hatchet.workflow(name="SyncFanoutChild", input_validator=ChildInput)\n\n\n@sync_fanout_parent.task(execution_timeout=timedelta(minutes=5))\ndef spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:\n print("spawning child")\n\n results = sync_fanout_child.run_many(\n [\n sync_fanout_child.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n key=f"child{i}",\n options=TriggerWorkflowOptions(additional_metadata={"hello": "earth"}),\n )\n for i in range(input.n)\n ],\n )\n\n print(f"results {results}")\n\n return {"results": results}\n\n\n@sync_fanout_child.task()\ndef process(input: ChildInput, ctx: Context) -> dict[str, str]:\n return {"status": "success " + input.a}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "sync-fanout-worker",\n slots=40,\n workflows=[sync_fanout_parent, sync_fanout_child],\n )\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\nclass ParentInput(BaseModel):\n n: int = 5\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nsync_fanout_parent = hatchet.workflow(\n name="SyncFanoutParent", input_validator=ParentInput\n)\nsync_fanout_child = hatchet.workflow(name="SyncFanoutChild", input_validator=ChildInput)\n\n\n@sync_fanout_parent.task(execution_timeout=timedelta(minutes=5))\ndef spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:\n print("spawning child")\n\n results = sync_fanout_child.run_many(\n [\n sync_fanout_child.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n key=f"child{i}",\n options=TriggerWorkflowOptions(additional_metadata={"hello": "earth"}),\n )\n for i in range(input.n)\n ],\n )\n\n print(f"results {results}")\n\n return {"results": results}\n\n\n@sync_fanout_child.task()\ndef process(input: ChildInput, ctx: Context) -> dict[str, str]:\n return {"status": "success " + input.a}\n\n\n@sync_fanout_child.task(parents=[process])\ndef process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n process_output = ctx.task_output(process)\n a = process_output["status"]\n\n return {"status2": a + "2"}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "sync-fanout-worker",\n slots=40,\n workflows=[sync_fanout_parent, sync_fanout_child],\n )\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/fanout_sync/worker.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/index.ts b/frontend/app/src/next/lib/docs/generated/snips/python/index.ts index 65cc48fbe..b9af0e479 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/index.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/index.ts @@ -36,11 +36,13 @@ import * as priority from './priority'; import * as quickstart from './quickstart'; import * as rate_limit from './rate_limit'; import * as retries from './retries'; +import * as return_exceptions from './return_exceptions'; import * as scheduled from './scheduled'; import * as simple from './simple'; import * as sticky_workers from './sticky_workers'; import * as streaming from './streaming'; import * as timeout from './timeout'; +import * as unit_testing from './unit_testing'; import * as worker_existing_loop from './worker_existing_loop'; import * as workflow_registration from './workflow_registration'; @@ -82,10 +84,12 @@ export { priority }; export { quickstart }; export { rate_limit }; export { retries }; +export { return_exceptions }; export { scheduled }; export { simple }; export { sticky_workers }; export { streaming }; export { timeout }; +export { unit_testing }; export { worker_existing_loop }; export { workflow_registration }; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/index.ts b/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/index.ts index 23d4c4e03..a0208eeb6 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/index.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/index.ts @@ -1,9 +1,7 @@ import dynamic from './dynamic'; -import test_rate_limit from './test_rate_limit'; import trigger from './trigger'; import worker from './worker'; export { dynamic }; -export { test_rate_limit }; export { trigger }; export { worker }; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/test_rate_limit.ts b/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/test_rate_limit.ts deleted file mode 100644 index 732493d40..000000000 --- a/frontend/app/src/next/lib/docs/generated/snips/python/rate_limit/test_rate_limit.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { Snippet } from '@/next/lib/docs/generated/snips/types'; - -const snippet: Snippet = { - language: 'python', - content: - 'import asyncio\nimport time\n\nimport pytest\n\nfrom examples.rate_limit.worker import rate_limit_workflow\n\n\n@pytest.mark.skip(reason="The timing for this test is not reliable")\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_run() -> None:\n\n run1 = rate_limit_workflow.run_no_wait()\n run2 = rate_limit_workflow.run_no_wait()\n run3 = rate_limit_workflow.run_no_wait()\n\n start_time = time.time()\n\n await asyncio.gather(run1.aio_result(), run2.aio_result(), run3.aio_result())\n\n end_time = time.time()\n\n total_time = end_time - start_time\n\n assert (\n 1 <= total_time <= 5\n ), f"Expected runtime to be a bit more than 1 seconds, but it took {total_time:.2f} seconds"\n', - source: 'out/python/rate_limit/test_rate_limit.py', - blocks: {}, - highlights: {}, -}; - -export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/index.ts b/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/index.ts new file mode 100644 index 000000000..0d75b755c --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/index.ts @@ -0,0 +1,5 @@ +import test_return_exceptions from './test_return_exceptions'; +import worker from './worker'; + +export { test_return_exceptions }; +export { worker }; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/test_return_exceptions.ts b/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/test_return_exceptions.ts new file mode 100644 index 000000000..1aabaff71 --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/test_return_exceptions.ts @@ -0,0 +1,12 @@ +import { Snippet } from '@/next/lib/docs/generated/snips/types'; + +const snippet: Snippet = { + language: 'python', + content: + 'import asyncio\n\nimport pytest\n\nfrom examples.return_exceptions.worker import Input, return_exceptions_task\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_return_exceptions_async() -> None:\n results = await return_exceptions_task.aio_run_many(\n [\n return_exceptions_task.create_bulk_run_item(input=Input(index=i))\n for i in range(10)\n ],\n return_exceptions=True,\n )\n\n for i, result in enumerate(results):\n if i % 2 == 0:\n assert isinstance(result, Exception)\n assert f"error in task with index {i}" in str(result)\n else:\n assert result == {"message": "this is a successful task."}\n\n\ndef test_return_exceptions_sync() -> None:\n results = return_exceptions_task.run_many(\n [\n return_exceptions_task.create_bulk_run_item(input=Input(index=i))\n for i in range(10)\n ],\n return_exceptions=True,\n )\n\n for i, result in enumerate(results):\n if i % 2 == 0:\n assert isinstance(result, Exception)\n assert f"error in task with index {i}" in str(result)\n else:\n assert result == {"message": "this is a successful task."}\n', + source: 'out/python/return_exceptions/test_return_exceptions.py', + blocks: {}, + highlights: {}, +}; + +export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/worker.ts new file mode 100644 index 000000000..ca09ca5e2 --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/return_exceptions/worker.ts @@ -0,0 +1,12 @@ +import { Snippet } from '@/next/lib/docs/generated/snips/types'; + +const snippet: Snippet = { + language: 'python', + content: + 'from pydantic import BaseModel\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet()\n\n\nclass Input(EmptyModel):\n index: int\n\n\n@hatchet.task(input_validator=Input)\nasync def return_exceptions_task(input: Input, ctx: Context) -> dict[str, str]:\n if input.index % 2 == 0:\n raise ValueError(f"error in task with index {input.index}")\n\n return {"message": "this is a successful task."}\n', + source: 'out/python/return_exceptions/worker.py', + blocks: {}, + highlights: {}, +}; + +export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/timeout/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/timeout/worker.ts index 0ae2aa116..81943e7e5 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/timeout/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/timeout/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import time\nfrom datetime import timedelta\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TaskDefaults\n\nhatchet = Hatchet(debug=True)\n\n# > ScheduleTimeout\ntimeout_wf = hatchet.workflow(\n name="TimeoutWorkflow",\n task_defaults=TaskDefaults(execution_timeout=timedelta(minutes=2)),\n)\n\n\n# > ExecutionTimeout\n# 👀 Specify an execution timeout on a task\n@timeout_wf.task(\n execution_timeout=timedelta(seconds=4), schedule_timeout=timedelta(minutes=10)\n)\ndef timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n time.sleep(5)\n return {"status": "success"}\n\n\n\nrefresh_timeout_wf = hatchet.workflow(name="RefreshTimeoutWorkflow")\n\n\n# > RefreshTimeout\n@refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4))\ndef refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n\n ctx.refresh_timeout(timedelta(seconds=10))\n time.sleep(5)\n\n return {"status": "success"}\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "timeout-worker", slots=4, workflows=[timeout_wf, refresh_timeout_wf]\n )\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'import time\nfrom datetime import timedelta\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TaskDefaults\n\nhatchet = Hatchet(debug=True)\n\n# > ScheduleTimeout\ntimeout_wf = hatchet.workflow(\n name="TimeoutWorkflow",\n task_defaults=TaskDefaults(execution_timeout=timedelta(minutes=2)),\n)\n\n\n# > ExecutionTimeout\n# 👀 Specify an execution timeout on a task\n@timeout_wf.task(\n execution_timeout=timedelta(seconds=5), schedule_timeout=timedelta(minutes=10)\n)\ndef timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n time.sleep(30)\n return {"status": "success"}\n\n\n\nrefresh_timeout_wf = hatchet.workflow(name="RefreshTimeoutWorkflow")\n\n\n# > RefreshTimeout\n@refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4))\ndef refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n ctx.refresh_timeout(timedelta(seconds=10))\n time.sleep(5)\n\n return {"status": "success"}\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "timeout-worker", slots=4, workflows=[timeout_wf, refresh_timeout_wf]\n )\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/timeout/worker.py', blocks: { scheduletimeout: { @@ -16,7 +16,7 @@ const snippet: Snippet = { }, refreshtimeout: { start: 30, - stop: 38, + stop: 37, }, }, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/index.ts b/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/index.ts new file mode 100644 index 000000000..7b1b8c337 --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/index.ts @@ -0,0 +1,5 @@ +import test_unit from './test_unit'; +import workflows from './workflows'; + +export { test_unit }; +export { workflows }; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/test_unit.ts b/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/test_unit.ts new file mode 100644 index 000000000..d7a34cd47 --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/test_unit.ts @@ -0,0 +1,12 @@ +import { Snippet } from '@/next/lib/docs/generated/snips/types'; + +const snippet: Snippet = { + language: 'python', + content: + 'import pytest\n\nfrom examples.unit_testing.workflows import (\n Lifespan,\n UnitTestInput,\n UnitTestOutput,\n async_complex_workflow,\n async_simple_workflow,\n async_standalone,\n durable_async_complex_workflow,\n durable_async_simple_workflow,\n durable_async_standalone,\n durable_sync_complex_workflow,\n durable_sync_simple_workflow,\n durable_sync_standalone,\n start,\n sync_complex_workflow,\n sync_simple_workflow,\n sync_standalone,\n)\nfrom hatchet_sdk import Task\n\n\n@pytest.mark.parametrize(\n "func",\n [\n sync_standalone,\n durable_sync_standalone,\n sync_simple_workflow,\n durable_sync_simple_workflow,\n sync_complex_workflow,\n durable_sync_complex_workflow,\n ],\n)\ndef test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None:\n input = UnitTestInput(key="test_key", number=42)\n additional_metadata = {"meta_key": "meta_value"}\n lifespan = Lifespan(mock_db_url="sqlite:///:memory:")\n retry_count = 1\n\n expected_output = UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=additional_metadata,\n retry_count=retry_count,\n mock_db_url=lifespan.mock_db_url,\n )\n\n assert (\n func.mock_run(\n input=input,\n additional_metadata=additional_metadata,\n lifespan=lifespan,\n retry_count=retry_count,\n parent_outputs={start.name: expected_output.model_dump()},\n )\n == expected_output\n )\n\n\n@pytest.mark.parametrize(\n "func",\n [\n async_standalone,\n durable_async_standalone,\n async_simple_workflow,\n durable_async_simple_workflow,\n async_complex_workflow,\n durable_async_complex_workflow,\n ],\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_simple_unit_async(func: Task[UnitTestInput, UnitTestOutput]) -> None:\n input = UnitTestInput(key="test_key", number=42)\n additional_metadata = {"meta_key": "meta_value"}\n lifespan = Lifespan(mock_db_url="sqlite:///:memory:")\n retry_count = 1\n\n expected_output = UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=additional_metadata,\n retry_count=retry_count,\n mock_db_url=lifespan.mock_db_url,\n )\n\n assert (\n await func.aio_mock_run(\n input=input,\n additional_metadata=additional_metadata,\n lifespan=lifespan,\n retry_count=retry_count,\n parent_outputs={start.name: expected_output.model_dump()},\n )\n == expected_output\n )\n', + source: 'out/python/unit_testing/test_unit.py', + blocks: {}, + highlights: {}, +}; + +export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/workflows.ts b/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/workflows.ts new file mode 100644 index 000000000..6819ecfc0 --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/unit_testing/workflows.ts @@ -0,0 +1,12 @@ +import { Snippet } from '@/next/lib/docs/generated/snips/types'; + +const snippet: Snippet = { + language: 'python', + content: + 'from typing import cast\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet\n\n\nclass UnitTestInput(BaseModel):\n key: str\n number: int\n\n\nclass Lifespan(BaseModel):\n mock_db_url: str\n\n\nclass UnitTestOutput(UnitTestInput, Lifespan):\n additional_metadata: dict[str, str]\n retry_count: int\n\n\nhatchet = Hatchet()\n\n\n@hatchet.task(input_validator=UnitTestInput)\ndef sync_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@hatchet.task(input_validator=UnitTestInput)\nasync def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@hatchet.durable_task(input_validator=UnitTestInput)\ndef durable_sync_standalone(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@hatchet.durable_task(input_validator=UnitTestInput)\nasync def durable_async_standalone(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\nsimple_workflow = hatchet.workflow(\n name="simple-unit-test-workflow", input_validator=UnitTestInput\n)\n\n\n@simple_workflow.task()\ndef sync_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@simple_workflow.task()\nasync def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@simple_workflow.durable_task()\ndef durable_sync_simple_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@simple_workflow.durable_task()\nasync def durable_async_simple_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\ncomplex_workflow = hatchet.workflow(\n name="complex-unit-test-workflow", input_validator=UnitTestInput\n)\n\n\n@complex_workflow.task()\nasync def start(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@complex_workflow.task(\n parents=[start],\n)\ndef sync_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return ctx.task_output(start)\n\n\n@complex_workflow.task(\n parents=[start],\n)\nasync def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return ctx.task_output(start)\n\n\n@complex_workflow.durable_task(\n parents=[start],\n)\ndef durable_sync_complex_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return ctx.task_output(start)\n\n\n@complex_workflow.durable_task(\n parents=[start],\n)\nasync def durable_async_complex_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return ctx.task_output(start)\n', + source: 'out/python/unit_testing/workflows.py', + blocks: {}, + highlights: {}, +}; + +export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/worker.ts index c7fc3ecc1..0b074eee1 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from examples.affinity_workers.worker import affinity_worker_workflow\nfrom examples.bulk_fanout.worker import bulk_child_wf, bulk_parent_wf\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom examples.cancellation.worker import cancellation_workflow\nfrom examples.concurrency_limit.worker import concurrency_limit_workflow\nfrom examples.concurrency_limit_rr.worker import concurrency_limit_rr_workflow\nfrom examples.concurrency_multiple_keys.worker import concurrency_multiple_keys_workflow\nfrom examples.concurrency_workflow_level.worker import (\n concurrency_workflow_level_workflow,\n)\nfrom examples.conditions.worker import task_condition_workflow\nfrom examples.dag.worker import dag_workflow\nfrom examples.dedupe.worker import dedupe_child_wf, dedupe_parent_wf\nfrom examples.durable.worker import durable_workflow\nfrom examples.events.worker import event_workflow\nfrom examples.fanout.worker import child_wf, parent_wf\nfrom examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent\nfrom examples.lifespans.simple import lifespan, lifespan_task\nfrom examples.logger.workflow import logging_workflow\nfrom examples.non_retryable.worker import non_retryable_workflow\nfrom examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details\nfrom examples.simple.worker import simple, simple_durable\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\nfrom hatchet_sdk import Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "e2e-test-worker",\n slots=100,\n workflows=[\n affinity_worker_workflow,\n bulk_child_wf,\n bulk_parent_wf,\n concurrency_limit_workflow,\n concurrency_limit_rr_workflow,\n concurrency_multiple_keys_workflow,\n dag_workflow,\n dedupe_child_wf,\n dedupe_parent_wf,\n durable_workflow,\n child_wf,\n event_workflow,\n parent_wf,\n on_failure_wf,\n on_failure_wf_with_details,\n logging_workflow,\n timeout_wf,\n refresh_timeout_wf,\n task_condition_workflow,\n cancellation_workflow,\n sync_fanout_parent,\n sync_fanout_child,\n non_retryable_workflow,\n concurrency_workflow_level_workflow,\n lifespan_task,\n simple,\n simple_durable,\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n ],\n lifespan=lifespan,\n )\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'from examples.affinity_workers.worker import affinity_worker_workflow\nfrom examples.bulk_fanout.worker import bulk_child_wf, bulk_parent_wf\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom examples.cancellation.worker import cancellation_workflow\nfrom examples.concurrency_limit.worker import concurrency_limit_workflow\nfrom examples.concurrency_limit_rr.worker import concurrency_limit_rr_workflow\nfrom examples.concurrency_multiple_keys.worker import concurrency_multiple_keys_workflow\nfrom examples.concurrency_workflow_level.worker import (\n concurrency_workflow_level_workflow,\n)\nfrom examples.conditions.worker import task_condition_workflow\nfrom examples.dag.worker import dag_workflow\nfrom examples.dedupe.worker import dedupe_child_wf, dedupe_parent_wf\nfrom examples.durable.worker import durable_workflow\nfrom examples.events.worker import event_workflow\nfrom examples.fanout.worker import child_wf, parent_wf\nfrom examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent\nfrom examples.lifespans.simple import lifespan, lifespan_task\nfrom examples.logger.workflow import logging_workflow\nfrom examples.non_retryable.worker import non_retryable_workflow\nfrom examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details\nfrom examples.return_exceptions.worker import return_exceptions_task\nfrom examples.simple.worker import simple, simple_durable\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\nfrom hatchet_sdk import Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "e2e-test-worker",\n slots=100,\n workflows=[\n affinity_worker_workflow,\n bulk_child_wf,\n bulk_parent_wf,\n concurrency_limit_workflow,\n concurrency_limit_rr_workflow,\n concurrency_multiple_keys_workflow,\n dag_workflow,\n dedupe_child_wf,\n dedupe_parent_wf,\n durable_workflow,\n child_wf,\n event_workflow,\n parent_wf,\n on_failure_wf,\n on_failure_wf_with_details,\n logging_workflow,\n timeout_wf,\n refresh_timeout_wf,\n task_condition_workflow,\n cancellation_workflow,\n sync_fanout_parent,\n sync_fanout_child,\n non_retryable_workflow,\n concurrency_workflow_level_workflow,\n lifespan_task,\n simple,\n simple_durable,\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n return_exceptions_task,\n ],\n lifespan=lifespan,\n )\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/worker.py', blocks: {}, highlights: {}, diff --git a/frontend/docs/lib/generated/snips/python/bulk_operations/test_bulk_replay.ts b/frontend/docs/lib/generated/snips/python/bulk_operations/test_bulk_replay.ts index f025045f3..e471b9a0b 100644 --- a/frontend/docs/lib/generated/snips/python/bulk_operations/test_bulk_replay.ts +++ b/frontend/docs/lib/generated/snips/python/bulk_operations/test_bulk_replay.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom hatchet_sdk import BulkCancelReplayOpts, Hatchet, RunFilter, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_bulk_replay(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n n = 100\n\n with pytest.raises(Exception):\n await bulk_replay_test_1.aio_run_many(\n [\n bulk_replay_test_1.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n \"test_run_id\": test_run_id,\n }\n )\n )\n for _ in range(n + 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_2.aio_run_many(\n [\n bulk_replay_test_2.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n \"test_run_id\": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_3.aio_run_many(\n [\n bulk_replay_test_3.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n \"test_run_id\": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 2)\n ]\n )\n\n workflow_ids = [\n bulk_replay_test_1.id,\n bulk_replay_test_2.id,\n bulk_replay_test_3.id,\n ]\n\n ## Should result in two batches of replays\n await hatchet.runs.aio_bulk_replay(\n opts=BulkCancelReplayOpts(\n filters=RunFilter(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={\"test_run_id\": test_run_id},\n )\n )\n )\n\n await asyncio.sleep(5)\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={\"test_run_id\": test_run_id},\n limit=1000,\n )\n\n assert len(runs.rows) == n + 1 + (n // 2 - 1) + (n // 2 - 2)\n\n for run in runs.rows:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.retry_count == 1\n assert run.attempt == 2\n\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_1.id]) == n + 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_2.id])\n == n // 2 - 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_3.id])\n == n // 2 - 2\n )\n", + "content": "import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom hatchet_sdk import BulkCancelReplayOpts, Hatchet, RunFilter, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_bulk_replay(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n n = 100\n\n with pytest.raises(Exception):\n await bulk_replay_test_1.aio_run_many(\n [\n bulk_replay_test_1.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n \"test_run_id\": test_run_id,\n }\n )\n )\n for _ in range(n + 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_2.aio_run_many(\n [\n bulk_replay_test_2.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n \"test_run_id\": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 1)\n ]\n )\n\n with pytest.raises(Exception):\n await bulk_replay_test_3.aio_run_many(\n [\n bulk_replay_test_3.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n additional_metadata={\n \"test_run_id\": test_run_id,\n }\n )\n )\n for _ in range((n // 2) - 2)\n ]\n )\n\n workflow_ids = [\n bulk_replay_test_1.id,\n bulk_replay_test_2.id,\n bulk_replay_test_3.id,\n ]\n\n ## Should result in two batches of replays\n await hatchet.runs.aio_bulk_replay(\n opts=BulkCancelReplayOpts(\n filters=RunFilter(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={\"test_run_id\": test_run_id},\n )\n )\n )\n\n await asyncio.sleep(10)\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=workflow_ids,\n since=datetime.now(tz=timezone.utc) - timedelta(minutes=2),\n additional_metadata={\"test_run_id\": test_run_id},\n limit=1000,\n )\n\n assert len(runs.rows) == n + 1 + (n // 2 - 1) + (n // 2 - 2)\n\n for run in runs.rows:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.retry_count == 1\n assert run.attempt == 2\n\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_1.id]) == n + 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_2.id])\n == n // 2 - 1\n )\n assert (\n len([r for r in runs.rows if r.workflow_id == bulk_replay_test_3.id])\n == n // 2 - 2\n )\n", "source": "out/python/bulk_operations/test_bulk_replay.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/concurrency_limit/index.ts b/frontend/docs/lib/generated/snips/python/concurrency_limit/index.ts index 4c00e94c1..c443f556e 100644 --- a/frontend/docs/lib/generated/snips/python/concurrency_limit/index.ts +++ b/frontend/docs/lib/generated/snips/python/concurrency_limit/index.ts @@ -1,7 +1,5 @@ -import test_concurrency_limit from './test_concurrency_limit'; import trigger from './trigger'; import worker from './worker'; -export { test_concurrency_limit } export { trigger } export { worker } diff --git a/frontend/docs/lib/generated/snips/python/concurrency_limit/test_concurrency_limit.ts b/frontend/docs/lib/generated/snips/python/concurrency_limit/test_concurrency_limit.ts deleted file mode 100644 index 98b6ba737..000000000 --- a/frontend/docs/lib/generated/snips/python/concurrency_limit/test_concurrency_limit.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { Snippet } from '@/lib/generated/snips/types'; - -const snippet: Snippet = { - "language": "python", - "content": "import pytest\n\nfrom examples.concurrency_limit.worker import WorkflowInput, concurrency_limit_workflow\nfrom hatchet_sdk.workflow_run import WorkflowRunRef\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\n@pytest.mark.skip(reason=\"The timing for this test is not reliable\")\nasync def test_run() -> None:\n num_runs = 6\n runs: list[WorkflowRunRef] = []\n\n # Start all runs\n for i in range(1, num_runs + 1):\n run = concurrency_limit_workflow.run_no_wait(\n WorkflowInput(run=i, group_key=str(i))\n )\n runs.append(run)\n\n # Wait for all results\n successful_runs = []\n cancelled_runs = []\n\n # Process each run individually\n for i, run in enumerate(runs, start=1):\n try:\n result = await run.aio_result()\n successful_runs.append((i, result))\n except Exception as e:\n if \"CANCELLED_BY_CONCURRENCY_LIMIT\" in str(e):\n cancelled_runs.append((i, str(e)))\n else:\n raise # Re-raise if it's an unexpected error\n\n # Check that we have the correct number of successful and cancelled runs\n assert (\n len(successful_runs) == 5\n ), f\"Expected 5 successful runs, got {len(successful_runs)}\"\n assert (\n len(cancelled_runs) == 1\n ), f\"Expected 1 cancelled run, got {len(cancelled_runs)}\"\n", - "source": "out/python/concurrency_limit/test_concurrency_limit.py", - "blocks": {}, - "highlights": {} -}; - -export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/fanout/test_fanout.ts b/frontend/docs/lib/generated/snips/python/fanout/test_fanout.ts index 6fb0d17b1..fdefe522b 100644 --- a/frontend/docs/lib/generated/snips/python/fanout/test_fanout.ts +++ b/frontend/docs/lib/generated/snips/python/fanout/test_fanout.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import pytest\n\nfrom examples.fanout.worker import ParentInput, parent_wf\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_run() -> None:\n result = await parent_wf.aio_run(ParentInput(n=2))\n\n assert len(result[\"spawn\"][\"results\"]) == 2\n", + "content": "import asyncio\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.fanout.worker import ParentInput, parent_wf\nfrom hatchet_sdk import Hatchet, TriggerWorkflowOptions\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_run(hatchet: Hatchet) -> None:\n ref = await parent_wf.aio_run_no_wait(\n ParentInput(n=2),\n )\n\n result = await ref.aio_result()\n\n assert len(result[\"spawn\"][\"results\"]) == 2\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_additional_metadata_propagation(hatchet: Hatchet) -> None:\n test_run_id = uuid4().hex\n\n ref = await parent_wf.aio_run_no_wait(\n ParentInput(n=2),\n options=TriggerWorkflowOptions(\n additional_metadata={\"test_run_id\": test_run_id}\n ),\n )\n\n await ref.aio_result()\n await asyncio.sleep(1)\n\n runs = await hatchet.runs.aio_list(\n parent_task_external_id=ref.workflow_run_id,\n additional_metadata={\"test_run_id\": test_run_id},\n )\n\n assert runs.rows\n\n \"\"\"Assert that the additional metadata is propagated to the child runs.\"\"\"\n for run in runs.rows:\n assert run.additional_metadata\n assert run.additional_metadata[\"test_run_id\"] == test_run_id\n\n assert run.children\n for child in run.children:\n assert child.additional_metadata\n assert child.additional_metadata[\"test_run_id\"] == test_run_id\n", "source": "out/python/fanout/test_fanout.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/fanout/worker.ts b/frontend/docs/lib/generated/snips/python/fanout/worker.ts index 263068b33..54ec596ef 100644 --- a/frontend/docs/lib/generated/snips/python/fanout/worker.ts +++ b/frontend/docs/lib/generated/snips/python/fanout/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\n# > FanoutParent\nclass ParentInput(BaseModel):\n n: int = 100\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nparent_wf = hatchet.workflow(name=\"FanoutParent\", input_validator=ParentInput)\nchild_wf = hatchet.workflow(name=\"FanoutChild\", input_validator=ChildInput)\n\n\n@parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:\n print(\"spawning child\")\n\n result = await child_wf.aio_run_many(\n [\n child_wf.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n options=TriggerWorkflowOptions(\n additional_metadata={\"hello\": \"earth\"}, key=f\"child{i}\"\n ),\n )\n for i in range(input.n)\n ]\n )\n\n print(f\"results {result}\")\n\n return {\"results\": result}\n\n\n\n\n# > FanoutChild\n@child_wf.task()\ndef process(input: ChildInput, ctx: Context) -> dict[str, str]:\n print(f\"child process {input.a}\")\n return {\"status\": input.a}\n\n\n@child_wf.task(parents=[process])\ndef process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n process_output = ctx.task_output(process)\n a = process_output[\"status\"]\n\n return {\"status2\": a + \"2\"}\n\n\n\nchild_wf.create_bulk_run_item()\n\n\ndef main() -> None:\n worker = hatchet.worker(\"fanout-worker\", slots=40, workflows=[parent_wf, child_wf])\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\n# > FanoutParent\nclass ParentInput(BaseModel):\n n: int = 100\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nparent_wf = hatchet.workflow(name=\"FanoutParent\", input_validator=ParentInput)\nchild_wf = hatchet.workflow(name=\"FanoutChild\", input_validator=ChildInput)\n\n\n@parent_wf.task(execution_timeout=timedelta(minutes=5))\nasync def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:\n print(\"spawning child\")\n\n result = await child_wf.aio_run_many(\n [\n child_wf.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n options=TriggerWorkflowOptions(\n additional_metadata={\"hello\": \"earth\"}, key=f\"child{i}\"\n ),\n )\n for i in range(input.n)\n ],\n )\n\n print(f\"results {result}\")\n\n return {\"results\": result}\n\n\n\n\n# > FanoutChild\n@child_wf.task()\nasync def process(input: ChildInput, ctx: Context) -> dict[str, str]:\n print(f\"child process {input.a}\")\n return {\"status\": input.a}\n\n\n@child_wf.task(parents=[process])\nasync def process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n process_output = ctx.task_output(process)\n a = process_output[\"status\"]\n\n return {\"status2\": a + \"2\"}\n\n\n\nchild_wf.create_bulk_run_item()\n\n\ndef main() -> None:\n worker = hatchet.worker(\"fanout-worker\", slots=40, workflows=[parent_wf, child_wf])\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/fanout/worker.py", "blocks": { "fanoutparent": { diff --git a/frontend/docs/lib/generated/snips/python/fanout_sync/test_fanout_sync.ts b/frontend/docs/lib/generated/snips/python/fanout_sync/test_fanout_sync.ts index f3c25ac13..aa3832f60 100644 --- a/frontend/docs/lib/generated/snips/python/fanout_sync/test_fanout_sync.ts +++ b/frontend/docs/lib/generated/snips/python/fanout_sync/test_fanout_sync.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from examples.fanout_sync.worker import ParentInput, sync_fanout_parent\n\n\ndef test_run() -> None:\n N = 2\n\n result = sync_fanout_parent.run(ParentInput(n=N))\n\n assert len(result[\"spawn\"][\"results\"]) == N\n", + "content": "import asyncio\nfrom uuid import uuid4\n\nimport pytest\n\nfrom examples.fanout_sync.worker import ParentInput, sync_fanout_parent\nfrom hatchet_sdk import Hatchet, TriggerWorkflowOptions\n\n\ndef test_run() -> None:\n N = 2\n\n result = sync_fanout_parent.run(ParentInput(n=N))\n\n assert len(result[\"spawn\"][\"results\"]) == N\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_additional_metadata_propagation_sync(hatchet: Hatchet) -> None:\n test_run_id = uuid4().hex\n\n ref = await sync_fanout_parent.aio_run_no_wait(\n ParentInput(n=2),\n options=TriggerWorkflowOptions(\n additional_metadata={\"test_run_id\": test_run_id}\n ),\n )\n\n await ref.aio_result()\n await asyncio.sleep(1)\n\n runs = await hatchet.runs.aio_list(\n parent_task_external_id=ref.workflow_run_id,\n additional_metadata={\"test_run_id\": test_run_id},\n )\n\n print(runs.model_dump_json(indent=2))\n\n assert runs.rows\n\n \"\"\"Assert that the additional metadata is propagated to the child runs.\"\"\"\n for run in runs.rows:\n assert run.additional_metadata\n assert run.additional_metadata[\"test_run_id\"] == test_run_id\n\n assert run.children\n for child in run.children:\n assert child.additional_metadata\n assert child.additional_metadata[\"test_run_id\"] == test_run_id\n", "source": "out/python/fanout_sync/test_fanout_sync.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/fanout_sync/worker.ts b/frontend/docs/lib/generated/snips/python/fanout_sync/worker.ts index a2a4597f4..b5b3f6f75 100644 --- a/frontend/docs/lib/generated/snips/python/fanout_sync/worker.ts +++ b/frontend/docs/lib/generated/snips/python/fanout_sync/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\nclass ParentInput(BaseModel):\n n: int = 5\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nsync_fanout_parent = hatchet.workflow(\n name=\"SyncFanoutParent\", input_validator=ParentInput\n)\nsync_fanout_child = hatchet.workflow(name=\"SyncFanoutChild\", input_validator=ChildInput)\n\n\n@sync_fanout_parent.task(execution_timeout=timedelta(minutes=5))\ndef spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:\n print(\"spawning child\")\n\n results = sync_fanout_child.run_many(\n [\n sync_fanout_child.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n key=f\"child{i}\",\n options=TriggerWorkflowOptions(additional_metadata={\"hello\": \"earth\"}),\n )\n for i in range(input.n)\n ],\n )\n\n print(f\"results {results}\")\n\n return {\"results\": results}\n\n\n@sync_fanout_child.task()\ndef process(input: ChildInput, ctx: Context) -> dict[str, str]:\n return {\"status\": \"success \" + input.a}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"sync-fanout-worker\",\n slots=40,\n workflows=[sync_fanout_parent, sync_fanout_child],\n )\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "from datetime import timedelta\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions\n\nhatchet = Hatchet(debug=True)\n\n\nclass ParentInput(BaseModel):\n n: int = 5\n\n\nclass ChildInput(BaseModel):\n a: str\n\n\nsync_fanout_parent = hatchet.workflow(\n name=\"SyncFanoutParent\", input_validator=ParentInput\n)\nsync_fanout_child = hatchet.workflow(name=\"SyncFanoutChild\", input_validator=ChildInput)\n\n\n@sync_fanout_parent.task(execution_timeout=timedelta(minutes=5))\ndef spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:\n print(\"spawning child\")\n\n results = sync_fanout_child.run_many(\n [\n sync_fanout_child.create_bulk_run_item(\n input=ChildInput(a=str(i)),\n key=f\"child{i}\",\n options=TriggerWorkflowOptions(additional_metadata={\"hello\": \"earth\"}),\n )\n for i in range(input.n)\n ],\n )\n\n print(f\"results {results}\")\n\n return {\"results\": results}\n\n\n@sync_fanout_child.task()\ndef process(input: ChildInput, ctx: Context) -> dict[str, str]:\n return {\"status\": \"success \" + input.a}\n\n\n@sync_fanout_child.task(parents=[process])\ndef process2(input: ChildInput, ctx: Context) -> dict[str, str]:\n process_output = ctx.task_output(process)\n a = process_output[\"status\"]\n\n return {\"status2\": a + \"2\"}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"sync-fanout-worker\",\n slots=40,\n workflows=[sync_fanout_parent, sync_fanout_child],\n )\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/fanout_sync/worker.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/index.ts b/frontend/docs/lib/generated/snips/python/index.ts index 8762d1d8a..c31d5dc1a 100644 --- a/frontend/docs/lib/generated/snips/python/index.ts +++ b/frontend/docs/lib/generated/snips/python/index.ts @@ -36,11 +36,13 @@ import * as priority from './priority'; import * as quickstart from './quickstart'; import * as rate_limit from './rate_limit'; import * as retries from './retries'; +import * as return_exceptions from './return_exceptions'; import * as scheduled from './scheduled'; import * as simple from './simple'; import * as sticky_workers from './sticky_workers'; import * as streaming from './streaming'; import * as timeout from './timeout'; +import * as unit_testing from './unit_testing'; import * as worker_existing_loop from './worker_existing_loop'; import * as workflow_registration from './workflow_registration'; @@ -82,10 +84,12 @@ export { priority }; export { quickstart }; export { rate_limit }; export { retries }; +export { return_exceptions }; export { scheduled }; export { simple }; export { sticky_workers }; export { streaming }; export { timeout }; +export { unit_testing }; export { worker_existing_loop }; export { workflow_registration }; diff --git a/frontend/docs/lib/generated/snips/python/rate_limit/index.ts b/frontend/docs/lib/generated/snips/python/rate_limit/index.ts index 759db08bd..19ada7351 100644 --- a/frontend/docs/lib/generated/snips/python/rate_limit/index.ts +++ b/frontend/docs/lib/generated/snips/python/rate_limit/index.ts @@ -1,9 +1,7 @@ import dynamic from './dynamic'; -import test_rate_limit from './test_rate_limit'; import trigger from './trigger'; import worker from './worker'; export { dynamic } -export { test_rate_limit } export { trigger } export { worker } diff --git a/frontend/docs/lib/generated/snips/python/rate_limit/test_rate_limit.ts b/frontend/docs/lib/generated/snips/python/rate_limit/test_rate_limit.ts deleted file mode 100644 index de5895bb9..000000000 --- a/frontend/docs/lib/generated/snips/python/rate_limit/test_rate_limit.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { Snippet } from '@/lib/generated/snips/types'; - -const snippet: Snippet = { - "language": "python", - "content": "import asyncio\nimport time\n\nimport pytest\n\nfrom examples.rate_limit.worker import rate_limit_workflow\n\n\n@pytest.mark.skip(reason=\"The timing for this test is not reliable\")\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_run() -> None:\n\n run1 = rate_limit_workflow.run_no_wait()\n run2 = rate_limit_workflow.run_no_wait()\n run3 = rate_limit_workflow.run_no_wait()\n\n start_time = time.time()\n\n await asyncio.gather(run1.aio_result(), run2.aio_result(), run3.aio_result())\n\n end_time = time.time()\n\n total_time = end_time - start_time\n\n assert (\n 1 <= total_time <= 5\n ), f\"Expected runtime to be a bit more than 1 seconds, but it took {total_time:.2f} seconds\"\n", - "source": "out/python/rate_limit/test_rate_limit.py", - "blocks": {}, - "highlights": {} -}; - -export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/return_exceptions/index.ts b/frontend/docs/lib/generated/snips/python/return_exceptions/index.ts new file mode 100644 index 000000000..be3bd6a32 --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/return_exceptions/index.ts @@ -0,0 +1,5 @@ +import test_return_exceptions from './test_return_exceptions'; +import worker from './worker'; + +export { test_return_exceptions } +export { worker } diff --git a/frontend/docs/lib/generated/snips/python/return_exceptions/test_return_exceptions.ts b/frontend/docs/lib/generated/snips/python/return_exceptions/test_return_exceptions.ts new file mode 100644 index 000000000..592103ae2 --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/return_exceptions/test_return_exceptions.ts @@ -0,0 +1,11 @@ +import { Snippet } from '@/lib/generated/snips/types'; + +const snippet: Snippet = { + "language": "python", + "content": "import asyncio\n\nimport pytest\n\nfrom examples.return_exceptions.worker import Input, return_exceptions_task\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_return_exceptions_async() -> None:\n results = await return_exceptions_task.aio_run_many(\n [\n return_exceptions_task.create_bulk_run_item(input=Input(index=i))\n for i in range(10)\n ],\n return_exceptions=True,\n )\n\n for i, result in enumerate(results):\n if i % 2 == 0:\n assert isinstance(result, Exception)\n assert f\"error in task with index {i}\" in str(result)\n else:\n assert result == {\"message\": \"this is a successful task.\"}\n\n\ndef test_return_exceptions_sync() -> None:\n results = return_exceptions_task.run_many(\n [\n return_exceptions_task.create_bulk_run_item(input=Input(index=i))\n for i in range(10)\n ],\n return_exceptions=True,\n )\n\n for i, result in enumerate(results):\n if i % 2 == 0:\n assert isinstance(result, Exception)\n assert f\"error in task with index {i}\" in str(result)\n else:\n assert result == {\"message\": \"this is a successful task.\"}\n", + "source": "out/python/return_exceptions/test_return_exceptions.py", + "blocks": {}, + "highlights": {} +}; + +export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/return_exceptions/worker.ts b/frontend/docs/lib/generated/snips/python/return_exceptions/worker.ts new file mode 100644 index 000000000..70612f9d5 --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/return_exceptions/worker.ts @@ -0,0 +1,11 @@ +import { Snippet } from '@/lib/generated/snips/types'; + +const snippet: Snippet = { + "language": "python", + "content": "from pydantic import BaseModel\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet()\n\n\nclass Input(EmptyModel):\n index: int\n\n\n@hatchet.task(input_validator=Input)\nasync def return_exceptions_task(input: Input, ctx: Context) -> dict[str, str]:\n if input.index % 2 == 0:\n raise ValueError(f\"error in task with index {input.index}\")\n\n return {\"message\": \"this is a successful task.\"}\n", + "source": "out/python/return_exceptions/worker.py", + "blocks": {}, + "highlights": {} +}; + +export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/timeout/worker.ts b/frontend/docs/lib/generated/snips/python/timeout/worker.ts index d570caecf..832c82fbc 100644 --- a/frontend/docs/lib/generated/snips/python/timeout/worker.ts +++ b/frontend/docs/lib/generated/snips/python/timeout/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import time\nfrom datetime import timedelta\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TaskDefaults\n\nhatchet = Hatchet(debug=True)\n\n# > ScheduleTimeout\ntimeout_wf = hatchet.workflow(\n name=\"TimeoutWorkflow\",\n task_defaults=TaskDefaults(execution_timeout=timedelta(minutes=2)),\n)\n\n\n# > ExecutionTimeout\n# 👀 Specify an execution timeout on a task\n@timeout_wf.task(\n execution_timeout=timedelta(seconds=4), schedule_timeout=timedelta(minutes=10)\n)\ndef timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n time.sleep(5)\n return {\"status\": \"success\"}\n\n\n\nrefresh_timeout_wf = hatchet.workflow(name=\"RefreshTimeoutWorkflow\")\n\n\n# > RefreshTimeout\n@refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4))\ndef refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n\n ctx.refresh_timeout(timedelta(seconds=10))\n time.sleep(5)\n\n return {\"status\": \"success\"}\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"timeout-worker\", slots=4, workflows=[timeout_wf, refresh_timeout_wf]\n )\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "import time\nfrom datetime import timedelta\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TaskDefaults\n\nhatchet = Hatchet(debug=True)\n\n# > ScheduleTimeout\ntimeout_wf = hatchet.workflow(\n name=\"TimeoutWorkflow\",\n task_defaults=TaskDefaults(execution_timeout=timedelta(minutes=2)),\n)\n\n\n# > ExecutionTimeout\n# 👀 Specify an execution timeout on a task\n@timeout_wf.task(\n execution_timeout=timedelta(seconds=5), schedule_timeout=timedelta(minutes=10)\n)\ndef timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n time.sleep(30)\n return {\"status\": \"success\"}\n\n\n\nrefresh_timeout_wf = hatchet.workflow(name=\"RefreshTimeoutWorkflow\")\n\n\n# > RefreshTimeout\n@refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4))\ndef refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:\n ctx.refresh_timeout(timedelta(seconds=10))\n time.sleep(5)\n\n return {\"status\": \"success\"}\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"timeout-worker\", slots=4, workflows=[timeout_wf, refresh_timeout_wf]\n )\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/timeout/worker.py", "blocks": { "scheduletimeout": { @@ -15,7 +15,7 @@ const snippet: Snippet = { }, "refreshtimeout": { "start": 30, - "stop": 38 + "stop": 37 } }, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/unit_testing/index.ts b/frontend/docs/lib/generated/snips/python/unit_testing/index.ts new file mode 100644 index 000000000..f497eecd5 --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/unit_testing/index.ts @@ -0,0 +1,5 @@ +import test_unit from './test_unit'; +import workflows from './workflows'; + +export { test_unit } +export { workflows } diff --git a/frontend/docs/lib/generated/snips/python/unit_testing/test_unit.ts b/frontend/docs/lib/generated/snips/python/unit_testing/test_unit.ts new file mode 100644 index 000000000..99eb7594b --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/unit_testing/test_unit.ts @@ -0,0 +1,11 @@ +import { Snippet } from '@/lib/generated/snips/types'; + +const snippet: Snippet = { + "language": "python", + "content": "import pytest\n\nfrom examples.unit_testing.workflows import (\n Lifespan,\n UnitTestInput,\n UnitTestOutput,\n async_complex_workflow,\n async_simple_workflow,\n async_standalone,\n durable_async_complex_workflow,\n durable_async_simple_workflow,\n durable_async_standalone,\n durable_sync_complex_workflow,\n durable_sync_simple_workflow,\n durable_sync_standalone,\n start,\n sync_complex_workflow,\n sync_simple_workflow,\n sync_standalone,\n)\nfrom hatchet_sdk import Task\n\n\n@pytest.mark.parametrize(\n \"func\",\n [\n sync_standalone,\n durable_sync_standalone,\n sync_simple_workflow,\n durable_sync_simple_workflow,\n sync_complex_workflow,\n durable_sync_complex_workflow,\n ],\n)\ndef test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None:\n input = UnitTestInput(key=\"test_key\", number=42)\n additional_metadata = {\"meta_key\": \"meta_value\"}\n lifespan = Lifespan(mock_db_url=\"sqlite:///:memory:\")\n retry_count = 1\n\n expected_output = UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=additional_metadata,\n retry_count=retry_count,\n mock_db_url=lifespan.mock_db_url,\n )\n\n assert (\n func.mock_run(\n input=input,\n additional_metadata=additional_metadata,\n lifespan=lifespan,\n retry_count=retry_count,\n parent_outputs={start.name: expected_output.model_dump()},\n )\n == expected_output\n )\n\n\n@pytest.mark.parametrize(\n \"func\",\n [\n async_standalone,\n durable_async_standalone,\n async_simple_workflow,\n durable_async_simple_workflow,\n async_complex_workflow,\n durable_async_complex_workflow,\n ],\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_simple_unit_async(func: Task[UnitTestInput, UnitTestOutput]) -> None:\n input = UnitTestInput(key=\"test_key\", number=42)\n additional_metadata = {\"meta_key\": \"meta_value\"}\n lifespan = Lifespan(mock_db_url=\"sqlite:///:memory:\")\n retry_count = 1\n\n expected_output = UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=additional_metadata,\n retry_count=retry_count,\n mock_db_url=lifespan.mock_db_url,\n )\n\n assert (\n await func.aio_mock_run(\n input=input,\n additional_metadata=additional_metadata,\n lifespan=lifespan,\n retry_count=retry_count,\n parent_outputs={start.name: expected_output.model_dump()},\n )\n == expected_output\n )\n", + "source": "out/python/unit_testing/test_unit.py", + "blocks": {}, + "highlights": {} +}; + +export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/unit_testing/workflows.ts b/frontend/docs/lib/generated/snips/python/unit_testing/workflows.ts new file mode 100644 index 000000000..be793957e --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/unit_testing/workflows.ts @@ -0,0 +1,11 @@ +import { Snippet } from '@/lib/generated/snips/types'; + +const snippet: Snippet = { + "language": "python", + "content": "from typing import cast\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet\n\n\nclass UnitTestInput(BaseModel):\n key: str\n number: int\n\n\nclass Lifespan(BaseModel):\n mock_db_url: str\n\n\nclass UnitTestOutput(UnitTestInput, Lifespan):\n additional_metadata: dict[str, str]\n retry_count: int\n\n\nhatchet = Hatchet()\n\n\n@hatchet.task(input_validator=UnitTestInput)\ndef sync_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@hatchet.task(input_validator=UnitTestInput)\nasync def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@hatchet.durable_task(input_validator=UnitTestInput)\ndef durable_sync_standalone(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@hatchet.durable_task(input_validator=UnitTestInput)\nasync def durable_async_standalone(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\nsimple_workflow = hatchet.workflow(\n name=\"simple-unit-test-workflow\", input_validator=UnitTestInput\n)\n\n\n@simple_workflow.task()\ndef sync_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@simple_workflow.task()\nasync def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@simple_workflow.durable_task()\ndef durable_sync_simple_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@simple_workflow.durable_task()\nasync def durable_async_simple_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\ncomplex_workflow = hatchet.workflow(\n name=\"complex-unit-test-workflow\", input_validator=UnitTestInput\n)\n\n\n@complex_workflow.task()\nasync def start(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return UnitTestOutput(\n key=input.key,\n number=input.number,\n additional_metadata=ctx.additional_metadata,\n retry_count=ctx.retry_count,\n mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url,\n )\n\n\n@complex_workflow.task(\n parents=[start],\n)\ndef sync_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return ctx.task_output(start)\n\n\n@complex_workflow.task(\n parents=[start],\n)\nasync def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput:\n return ctx.task_output(start)\n\n\n@complex_workflow.durable_task(\n parents=[start],\n)\ndef durable_sync_complex_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return ctx.task_output(start)\n\n\n@complex_workflow.durable_task(\n parents=[start],\n)\nasync def durable_async_complex_workflow(\n input: UnitTestInput, ctx: DurableContext\n) -> UnitTestOutput:\n return ctx.task_output(start)\n", + "source": "out/python/unit_testing/workflows.py", + "blocks": {}, + "highlights": {} +}; + +export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/worker.ts b/frontend/docs/lib/generated/snips/python/worker.ts index 83ac45536..350789b96 100644 --- a/frontend/docs/lib/generated/snips/python/worker.ts +++ b/frontend/docs/lib/generated/snips/python/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from examples.affinity_workers.worker import affinity_worker_workflow\nfrom examples.bulk_fanout.worker import bulk_child_wf, bulk_parent_wf\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom examples.cancellation.worker import cancellation_workflow\nfrom examples.concurrency_limit.worker import concurrency_limit_workflow\nfrom examples.concurrency_limit_rr.worker import concurrency_limit_rr_workflow\nfrom examples.concurrency_multiple_keys.worker import concurrency_multiple_keys_workflow\nfrom examples.concurrency_workflow_level.worker import (\n concurrency_workflow_level_workflow,\n)\nfrom examples.conditions.worker import task_condition_workflow\nfrom examples.dag.worker import dag_workflow\nfrom examples.dedupe.worker import dedupe_child_wf, dedupe_parent_wf\nfrom examples.durable.worker import durable_workflow\nfrom examples.events.worker import event_workflow\nfrom examples.fanout.worker import child_wf, parent_wf\nfrom examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent\nfrom examples.lifespans.simple import lifespan, lifespan_task\nfrom examples.logger.workflow import logging_workflow\nfrom examples.non_retryable.worker import non_retryable_workflow\nfrom examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details\nfrom examples.simple.worker import simple, simple_durable\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\nfrom hatchet_sdk import Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"e2e-test-worker\",\n slots=100,\n workflows=[\n affinity_worker_workflow,\n bulk_child_wf,\n bulk_parent_wf,\n concurrency_limit_workflow,\n concurrency_limit_rr_workflow,\n concurrency_multiple_keys_workflow,\n dag_workflow,\n dedupe_child_wf,\n dedupe_parent_wf,\n durable_workflow,\n child_wf,\n event_workflow,\n parent_wf,\n on_failure_wf,\n on_failure_wf_with_details,\n logging_workflow,\n timeout_wf,\n refresh_timeout_wf,\n task_condition_workflow,\n cancellation_workflow,\n sync_fanout_parent,\n sync_fanout_child,\n non_retryable_workflow,\n concurrency_workflow_level_workflow,\n lifespan_task,\n simple,\n simple_durable,\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n ],\n lifespan=lifespan,\n )\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "from examples.affinity_workers.worker import affinity_worker_workflow\nfrom examples.bulk_fanout.worker import bulk_child_wf, bulk_parent_wf\nfrom examples.bulk_operations.worker import (\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n)\nfrom examples.cancellation.worker import cancellation_workflow\nfrom examples.concurrency_limit.worker import concurrency_limit_workflow\nfrom examples.concurrency_limit_rr.worker import concurrency_limit_rr_workflow\nfrom examples.concurrency_multiple_keys.worker import concurrency_multiple_keys_workflow\nfrom examples.concurrency_workflow_level.worker import (\n concurrency_workflow_level_workflow,\n)\nfrom examples.conditions.worker import task_condition_workflow\nfrom examples.dag.worker import dag_workflow\nfrom examples.dedupe.worker import dedupe_child_wf, dedupe_parent_wf\nfrom examples.durable.worker import durable_workflow\nfrom examples.events.worker import event_workflow\nfrom examples.fanout.worker import child_wf, parent_wf\nfrom examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent\nfrom examples.lifespans.simple import lifespan, lifespan_task\nfrom examples.logger.workflow import logging_workflow\nfrom examples.non_retryable.worker import non_retryable_workflow\nfrom examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details\nfrom examples.return_exceptions.worker import return_exceptions_task\nfrom examples.simple.worker import simple, simple_durable\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\nfrom hatchet_sdk import Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"e2e-test-worker\",\n slots=100,\n workflows=[\n affinity_worker_workflow,\n bulk_child_wf,\n bulk_parent_wf,\n concurrency_limit_workflow,\n concurrency_limit_rr_workflow,\n concurrency_multiple_keys_workflow,\n dag_workflow,\n dedupe_child_wf,\n dedupe_parent_wf,\n durable_workflow,\n child_wf,\n event_workflow,\n parent_wf,\n on_failure_wf,\n on_failure_wf_with_details,\n logging_workflow,\n timeout_wf,\n refresh_timeout_wf,\n task_condition_workflow,\n cancellation_workflow,\n sync_fanout_parent,\n sync_fanout_child,\n non_retryable_workflow,\n concurrency_workflow_level_workflow,\n lifespan_task,\n simple,\n simple_durable,\n bulk_replay_test_1,\n bulk_replay_test_2,\n bulk_replay_test_3,\n return_exceptions_task,\n ],\n lifespan=lifespan,\n )\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/worker.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/pages/sdks/python/feature-clients/runs.mdx b/frontend/docs/pages/sdks/python/feature-clients/runs.mdx index 7de5e6db6..350c4cb44 100644 --- a/frontend/docs/pages/sdks/python/feature-clients/runs.mdx +++ b/frontend/docs/pages/sdks/python/feature-clients/runs.mdx @@ -6,27 +6,29 @@ The runs client is a client for interacting with task and workflow runs within H Methods: -| Name | Description | -| ----------------- | -------------------------------------------------------------------- | -| `get` | Get workflow run details for a given workflow run ID. | -| `aio_get` | Get workflow run details for a given workflow run ID. | -| `get_status` | Get workflow run status for a given workflow run ID. | -| `aio_get_status` | Get workflow run status for a given workflow run ID. | -| `list` | List task runs according to a set of filters. | -| `aio_list` | List task runs according to a set of filters. | -| `create` | Trigger a new workflow run. | -| `aio_create` | Trigger a new workflow run. | -| `replay` | Replay a task or workflow run. | -| `aio_replay` | Replay a task or workflow run. | -| `bulk_replay` | Replay task or workflow runs in bulk, according to a set of filters. | -| `aio_bulk_replay` | Replay task or workflow runs in bulk, according to a set of filters. | -| `cancel` | Cancel a task or workflow run. | -| `aio_cancel` | Cancel a task or workflow run. | -| `bulk_cancel` | Cancel task or workflow runs in bulk, according to a set of filters. | -| `aio_bulk_cancel` | Cancel task or workflow runs in bulk, according to a set of filters. | -| `get_result` | Get the result of a workflow run by its external ID. | -| `aio_get_result` | Get the result of a workflow run by its external ID. | -| `get_run_ref` | Get a reference to a workflow run. | +| Name | Description | +| ------------------ | -------------------------------------------------------------------- | +| `get` | Get workflow run details for a given workflow run ID. | +| `aio_get` | Get workflow run details for a given workflow run ID. | +| `get_status` | Get workflow run status for a given workflow run ID. | +| `aio_get_status` | Get workflow run status for a given workflow run ID. | +| `list` | List task runs according to a set of filters. | +| `aio_list` | List task runs according to a set of filters. | +| `create` | Trigger a new workflow run. | +| `aio_create` | Trigger a new workflow run. | +| `replay` | Replay a task or workflow run. | +| `aio_replay` | Replay a task or workflow run. | +| `bulk_replay` | Replay task or workflow runs in bulk, according to a set of filters. | +| `aio_bulk_replay` | Replay task or workflow runs in bulk, according to a set of filters. | +| `cancel` | Cancel a task or workflow run. | +| `aio_cancel` | Cancel a task or workflow run. | +| `bulk_cancel` | Cancel task or workflow runs in bulk, according to a set of filters. | +| `aio_bulk_cancel` | Cancel task or workflow runs in bulk, according to a set of filters. | +| `get_result` | Get the result of a workflow run by its external ID. | +| `aio_get_result` | Get the result of a workflow run by its external ID. | +| `get_run_ref` | Get a reference to a workflow run. | +| `get_task_run` | Get task run details for a given task run ID. | +| `aio_get_task_run` | Get task run details for a given task run ID. | ### Functions @@ -363,3 +365,35 @@ Returns: | Type | Description | | ---------------- | ------------------------------------------ | | `WorkflowRunRef` | A reference to the specified workflow run. | + +#### `get_task_run` + +Get task run details for a given task run ID. + +Parameters: + +| Name | Type | Description | Default | +| ------------- | ----- | ----------------------------------------------- | ---------- | +| `task_run_id` | `str` | The ID of the task run to retrieve details for. | _required_ | + +Returns: + +| Type | Description | +| --------------- | ----------------------------------------------- | +| `V1TaskSummary` | Task run details for the specified task run ID. | + +#### `aio_get_task_run` + +Get task run details for a given task run ID. + +Parameters: + +| Name | Type | Description | Default | +| ------------- | ----- | ----------------------------------------------- | ---------- | +| `task_run_id` | `str` | The ID of the task run to retrieve details for. | _required_ | + +Returns: + +| Type | Description | +| --------------- | ----------------------------------------------- | +| `V1TaskSummary` | Task run details for the specified task run ID. | diff --git a/frontend/docs/pages/sdks/python/runnables.mdx b/frontend/docs/pages/sdks/python/runnables.mdx index 137c6b35f..8342e5f4a 100644 --- a/frontend/docs/pages/sdks/python/runnables.mdx +++ b/frontend/docs/pages/sdks/python/runnables.mdx @@ -2,7 +2,7 @@ `Runnables` in the Hatchet SDK are things that can be run, namely tasks and workflows. The two main types of runnables you'll encounter are: -- `Workflow`, which lets you define tasks and call all of the run, schedule, etc. methods +- `Workflow`, which lets you define tasks and call all of the run, schedule, etc. methods. - `Standalone`, which is a single task that's returned by `hatchet.task` and can be run, scheduled, etc. ## Workflow @@ -42,7 +42,7 @@ Workflows support various execution patterns including: Tasks within workflows can be defined with `@workflow.task()` or `@workflow.durable_task()` decorators and can be arranged into complex dependency patterns. -Methods: +### Methods | Name | Description | | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | @@ -169,7 +169,7 @@ Parameters: | `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the on-failure task. | `None` | | `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | | `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | -| `concurrency` | `list[ConcurrencyExpression] \| None` | A list of concurrency expressions for the on-success task. | `None` | +| `concurrency` | `list[ConcurrencyExpression] \| None` | A list of concurrency expressions for the on-failure task. | `None` | Returns: @@ -188,7 +188,7 @@ Parameters: | `name` | `str \| None` | The name of the on-success task. If not specified, defaults to the name of the function being wrapped by the `on_success_task` decorator. | `None` | | `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time. | `timedelta(minutes=5)` | | `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time. | `timedelta(seconds=60)` | -| `retries` | `int` | The number of times to retry the on-success task before failing | `0` | +| `retries` | `int` | The number of times to retry the on-success task before failing. | `0` | | `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the on-success task. | `None` | | `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | | `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | @@ -196,9 +196,9 @@ Parameters: Returns: -| Type | Description | -| ------------------------------------------------------------------------------------------------- | ---------------------------------------- | -| `Callable[[Callable[[TWorkflowInput, Context], R \| CoroutineLike[R]]], Task[TWorkflowInput, R]]` | A decorator which creates a Task object. | +| Type | Description | +| ------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `Callable[[Callable[[TWorkflowInput, Context], R \| CoroutineLike[R]]], Task[TWorkflowInput, R]]` | A decorator which creates a `Task` object. | #### `run` @@ -278,15 +278,16 @@ Run a workflow in bulk and wait for all runs to complete. This method triggers m Parameters: -| Name | Type | Description | Default | -| ----------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | -| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| Name | Type | Description | Default | +| ------------------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | +| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| `return_exceptions` | `bool` | If `True`, exceptions will be returned as part of the results instead of raising them. | `False` | Returns: -| Type | Description | -| ---------------------- | ---------------------------------------- | -| `list[dict[str, Any]]` | A list of results for each workflow run. | +| Type | Description | +| --------------------------------------------------------------- | ---------------------------------------- | +| `list[dict[str, Any]] \| list[dict[str, Any] \| BaseException]` | A list of results for each workflow run. | #### `aio_run_many` @@ -294,15 +295,16 @@ Run a workflow in bulk and wait for all runs to complete. This method triggers m Parameters: -| Name | Type | Description | Default | -| ----------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | -| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| Name | Type | Description | Default | +| ------------------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | +| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| `return_exceptions` | `bool` | If `True`, exceptions will be returned as part of the results instead of raising them. | `False` | Returns: -| Type | Description | -| ---------------------- | ---------------------------------------- | -| `list[dict[str, Any]]` | A list of results for each workflow run. | +| Type | Description | +| --------------------------------------------------------------- | ---------------------------------------- | +| `list[dict[str, Any]] \| list[dict[str, Any] \| BaseException]` | A list of results for each workflow run. | #### `run_many_no_wait` @@ -520,11 +522,76 @@ Returns: | ---------- | ------------------- | | `V1Filter` | The created filter. | +## Task + +Bases: `Generic[TWorkflowInput, R]` + +### Methods + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------- | +| `mock_run` | Mimic the execution of a task. This method is intended to be used to unit test. | +| `aio_mock_run` | Mimic the execution of a task. This method is intended to be used to unit test. | + +### Functions + +#### `mock_run` + +Mimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks. + +Parameters: + +| Name | Type | Description | Default | +| --------------------- | -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `input` | `TWorkflowInput \| None` | The input to the task. | `None` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata to attach to the task. | `None` | +| `parent_outputs` | `dict[str, JSONSerializableMapping] \| None` | Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. | `None` | +| `retry_count` | `int` | The number of times the task has been retried. | `0` | +| `lifespan` | `Any` | The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. | `None` | + +Returns: + +| Type | Description | +| ---- | ----------------------- | +| `R` | The output of the task. | + +Raises: + +| Type | Description | +| ----------- | -------------------------------------------------------------------------------------------------------------------------- | +| `TypeError` | If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called. | + +#### `aio_mock_run` + +Mimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks. + +Parameters: + +| Name | Type | Description | Default | +| --------------------- | -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `input` | `TWorkflowInput \| None` | The input to the task. | `None` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata to attach to the task. | `None` | +| `parent_outputs` | `dict[str, JSONSerializableMapping] \| None` | Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. | `None` | +| `retry_count` | `int` | The number of times the task has been retried. | `0` | +| `lifespan` | `Any` | The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. | `None` | + +Returns: + +| Type | Description | +| ---- | ----------------------- | +| `R` | The output of the task. | + +Raises: + +| Type | Description | +| ----------- | -------------------------------------------------------------------------------------------------------------------------- | +| `TypeError` | If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called. | + ## Standalone Bases: `BaseWorkflow[TWorkflowInput]`, `Generic[TWorkflowInput, R]` -Methods: +### Methods | Name | Description | | ---------------------- | ------------------------------------------------------------------------------------------------------------------------ | @@ -547,6 +614,11 @@ Methods: | `aio_create_filter` | Create a new filter. | | `delete` | Permanently delete the workflow. | | `aio_delete` | Permanently delete the workflow. | +| `get_run_ref` | Get a reference to a task run by its run ID. | +| `get_result` | Get the result of a task run by its run ID. | +| `aio_get_result` | Get the result of a task run by its run ID. | +| `mock_run` | Mimic the execution of a task. This method is intended to be used to unit test. | +| `aio_mock_run` | Mimic the execution of a task. This method is intended to be used to unit test. | ### Functions @@ -630,15 +702,16 @@ Run a workflow in bulk and wait for all runs to complete. This method triggers m Parameters: -| Name | Type | Description | Default | -| ----------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | -| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| Name | Type | Description | Default | +| ------------------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | +| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| `return_exceptions` | `bool` | If `True`, exceptions will be returned as part of the results instead of raising them. | `False` | Returns: -| Type | Description | -| --------- | ---------------------------------------- | -| `list[R]` | A list of results for each workflow run. | +| Type | Description | +| ------------------------------------- | ---------------------------------------- | +| `list[R] \| list[R \| BaseException]` | A list of results for each workflow run. | #### `aio_run_many` @@ -646,15 +719,16 @@ Run a workflow in bulk and wait for all runs to complete. This method triggers m Parameters: -| Name | Type | Description | Default | -| ----------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | -| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| Name | Type | Description | Default | +| ------------------- | -------------------------------- | ----------------------------------------------------------------------------------------------- | ---------- | +| `workflows` | `list[WorkflowRunTriggerConfig]` | A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. | _required_ | +| `return_exceptions` | `bool` | If `True`, exceptions will be returned as part of the results instead of raising them. | `False` | Returns: -| Type | Description | -| --------- | ---------------------------------------- | -| `list[R]` | A list of results for each workflow run. | +| Type | Description | +| ------------------------------------- | ---------------------------------------- | +| `list[R] \| list[R \| BaseException]` | A list of results for each workflow run. | #### `run_many_no_wait` @@ -883,3 +957,91 @@ Permanently delete the workflow. Permanently delete the workflow. **DANGEROUS: This will delete a workflow and all of its data** + +#### `get_run_ref` + +Get a reference to a task run by its run ID. + +Parameters: + +| Name | Type | Description | Default | +| -------- | ----- | ------------------------------------------- | ---------- | +| `run_id` | `str` | The ID of the run to get the reference for. | _required_ | + +Returns: + +| Type | Description | +| ------------------------------- | ----------------------------------------------------------------- | +| `TaskRunRef[TWorkflowInput, R]` | A `TaskRunRef` object representing the reference to the task run. | + +#### `get_result` + +Get the result of a task run by its run ID. + +Parameters: + +| Name | Type | Description | Default | +| -------- | ----- | ---------------------------------------- | ---------- | +| `run_id` | `str` | The ID of the run to get the result for. | _required_ | + +Returns: + +| Type | Description | +| ---- | --------------------------- | +| `R` | The result of the task run. | + +#### `aio_get_result` + +Get the result of a task run by its run ID. + +Parameters: + +| Name | Type | Description | Default | +| -------- | ----- | ---------------------------------------- | ---------- | +| `run_id` | `str` | The ID of the run to get the result for. | _required_ | + +Returns: + +| Type | Description | +| ---- | --------------------------- | +| `R` | The result of the task run. | + +#### `mock_run` + +Mimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks. + +Parameters: + +| Name | Type | Description | Default | +| --------------------- | -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `input` | `TWorkflowInput \| None` | The input to the task. | `None` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata to attach to the task. | `None` | +| `parent_outputs` | `dict[str, JSONSerializableMapping] \| None` | Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. | `None` | +| `retry_count` | `int` | The number of times the task has been retried. | `0` | +| `lifespan` | `Any` | The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. | `None` | + +Returns: + +| Type | Description | +| ---- | ----------------------- | +| `R` | The output of the task. | + +#### `aio_mock_run` + +Mimic the execution of a task. This method is intended to be used to unit test tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync tasks and `aio_mock_run` for async tasks. + +Parameters: + +| Name | Type | Description | Default | +| --------------------- | -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `input` | `TWorkflowInput \| None` | The input to the task. | `None` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata to attach to the task. | `None` | +| `parent_outputs` | `dict[str, JSONSerializableMapping] \| None` | Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. | `None` | +| `retry_count` | `int` | The number of times the task has been retried. | `0` | +| `lifespan` | `Any` | The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. | `None` | + +Returns: + +| Type | Description | +| ---- | ----------------------- | +| `R` | The output of the task. | diff --git a/sdks/python/CHANGELOG.md b/sdks/python/CHANGELOG.md index e7224eb26..3443c8b6e 100644 --- a/sdks/python/CHANGELOG.md +++ b/sdks/python/CHANGELOG.md @@ -5,6 +5,19 @@ All notable changes to Hatchet's Python SDK will be documented in this changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.16.0] - 2025-07-17 + +### Added + +- Adds new methods for unit testing tasks and standalones, called `mock_run` and `aio_mock_run`, which allow you to run tasks and standalones in a mocked environment without needing to start a worker or connect to the engine. +- Improves exception logs throughout the SDK to provide more context for what went wrong when an exception is thrown. +- Adds `create_run_ref`, `get_result`, and `aio_get_result` methods to the `Standalone` class, to allow for getting typed results of a run more easily. +- Adds `return_exceptions` option to the `run_many` and `aio_run_many` methods to be more similar to e.g. `asyncio.gather`. If `True`, exceptions will be returned as part of the results instead of raising them. + +### Changed + +- Correctly propagates additional metadata through the various `run` methods to spawned children. + ## [1.15.3] - 2025-07-14 ### Changed diff --git a/sdks/python/docs/feature-clients/runs.md b/sdks/python/docs/feature-clients/runs.md index fb7c7fc24..6e9c516e9 100644 --- a/sdks/python/docs/feature-clients/runs.md +++ b/sdks/python/docs/feature-clients/runs.md @@ -22,3 +22,5 @@ - get_result - aio_get_result - get_run_ref + - get_task_run + - aio_get_task_run diff --git a/sdks/python/docs/runnables.md b/sdks/python/docs/runnables.md index 9d8df1c13..08420fa91 100644 --- a/sdks/python/docs/runnables.md +++ b/sdks/python/docs/runnables.md @@ -36,6 +36,15 @@ - create_filter - aio_create_filter +## Task + +::: runnables.task.Task + options: + inherited_members: true + members: + - mock_run + - aio_mock_run + ## Standalone ::: runnables.workflow.Standalone @@ -61,3 +70,8 @@ - aio_create_filter - delete - aio_delete + - get_run_ref + - get_result + - aio_get_result + - mock_run + - aio_mock_run diff --git a/sdks/python/examples/bulk_operations/test_bulk_replay.py b/sdks/python/examples/bulk_operations/test_bulk_replay.py index 62afc32bc..834758ae3 100644 --- a/sdks/python/examples/bulk_operations/test_bulk_replay.py +++ b/sdks/python/examples/bulk_operations/test_bulk_replay.py @@ -77,7 +77,7 @@ async def test_bulk_replay(hatchet: Hatchet) -> None: ) ) - await asyncio.sleep(5) + await asyncio.sleep(10) runs = await hatchet.runs.aio_list( workflow_ids=workflow_ids, diff --git a/sdks/python/examples/concurrency_limit/test_concurrency_limit.py b/sdks/python/examples/concurrency_limit/test_concurrency_limit.py deleted file mode 100644 index 6e820fad3..000000000 --- a/sdks/python/examples/concurrency_limit/test_concurrency_limit.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - -from examples.concurrency_limit.worker import WorkflowInput, concurrency_limit_workflow -from hatchet_sdk.workflow_run import WorkflowRunRef - - -@pytest.mark.asyncio(loop_scope="session") -@pytest.mark.skip(reason="The timing for this test is not reliable") -async def test_run() -> None: - num_runs = 6 - runs: list[WorkflowRunRef] = [] - - # Start all runs - for i in range(1, num_runs + 1): - run = concurrency_limit_workflow.run_no_wait( - WorkflowInput(run=i, group_key=str(i)) - ) - runs.append(run) - - # Wait for all results - successful_runs = [] - cancelled_runs = [] - - # Process each run individually - for i, run in enumerate(runs, start=1): - try: - result = await run.aio_result() - successful_runs.append((i, result)) - except Exception as e: - if "CANCELLED_BY_CONCURRENCY_LIMIT" in str(e): - cancelled_runs.append((i, str(e))) - else: - raise # Re-raise if it's an unexpected error - - # Check that we have the correct number of successful and cancelled runs - assert ( - len(successful_runs) == 5 - ), f"Expected 5 successful runs, got {len(successful_runs)}" - assert ( - len(cancelled_runs) == 1 - ), f"Expected 1 cancelled run, got {len(cancelled_runs)}" diff --git a/sdks/python/examples/fanout/test_fanout.py b/sdks/python/examples/fanout/test_fanout.py index c12d8a7a3..9831c0a52 100644 --- a/sdks/python/examples/fanout/test_fanout.py +++ b/sdks/python/examples/fanout/test_fanout.py @@ -1,10 +1,50 @@ +import asyncio +from uuid import uuid4 + import pytest from examples.fanout.worker import ParentInput, parent_wf +from hatchet_sdk import Hatchet, TriggerWorkflowOptions @pytest.mark.asyncio(loop_scope="session") -async def test_run() -> None: - result = await parent_wf.aio_run(ParentInput(n=2)) +async def test_run(hatchet: Hatchet) -> None: + ref = await parent_wf.aio_run_no_wait( + ParentInput(n=2), + ) + + result = await ref.aio_result() assert len(result["spawn"]["results"]) == 2 + + +@pytest.mark.asyncio(loop_scope="session") +async def test_additional_metadata_propagation(hatchet: Hatchet) -> None: + test_run_id = uuid4().hex + + ref = await parent_wf.aio_run_no_wait( + ParentInput(n=2), + options=TriggerWorkflowOptions( + additional_metadata={"test_run_id": test_run_id} + ), + ) + + await ref.aio_result() + await asyncio.sleep(1) + + runs = await hatchet.runs.aio_list( + parent_task_external_id=ref.workflow_run_id, + additional_metadata={"test_run_id": test_run_id}, + ) + + assert runs.rows + + """Assert that the additional metadata is propagated to the child runs.""" + for run in runs.rows: + assert run.additional_metadata + assert run.additional_metadata["test_run_id"] == test_run_id + + assert run.children + for child in run.children: + assert child.additional_metadata + assert child.additional_metadata["test_run_id"] == test_run_id diff --git a/sdks/python/examples/fanout/worker.py b/sdks/python/examples/fanout/worker.py index 006d2931e..cd313b3eb 100644 --- a/sdks/python/examples/fanout/worker.py +++ b/sdks/python/examples/fanout/worker.py @@ -34,7 +34,7 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]: ), ) for i in range(input.n) - ] + ], ) print(f"results {result}") @@ -47,13 +47,13 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]: # > FanoutChild @child_wf.task() -def process(input: ChildInput, ctx: Context) -> dict[str, str]: +async def process(input: ChildInput, ctx: Context) -> dict[str, str]: print(f"child process {input.a}") return {"status": input.a} @child_wf.task(parents=[process]) -def process2(input: ChildInput, ctx: Context) -> dict[str, str]: +async def process2(input: ChildInput, ctx: Context) -> dict[str, str]: process_output = ctx.task_output(process) a = process_output["status"] diff --git a/sdks/python/examples/fanout_sync/test_fanout_sync.py b/sdks/python/examples/fanout_sync/test_fanout_sync.py index d6dd76c80..ea93d080f 100644 --- a/sdks/python/examples/fanout_sync/test_fanout_sync.py +++ b/sdks/python/examples/fanout_sync/test_fanout_sync.py @@ -1,4 +1,10 @@ +import asyncio +from uuid import uuid4 + +import pytest + from examples.fanout_sync.worker import ParentInput, sync_fanout_parent +from hatchet_sdk import Hatchet, TriggerWorkflowOptions def test_run() -> None: @@ -7,3 +13,35 @@ def test_run() -> None: result = sync_fanout_parent.run(ParentInput(n=N)) assert len(result["spawn"]["results"]) == N + + +@pytest.mark.asyncio(loop_scope="session") +async def test_additional_metadata_propagation_sync(hatchet: Hatchet) -> None: + test_run_id = uuid4().hex + + ref = await sync_fanout_parent.aio_run_no_wait( + ParentInput(n=2), + options=TriggerWorkflowOptions( + additional_metadata={"test_run_id": test_run_id} + ), + ) + + await ref.aio_result() + await asyncio.sleep(1) + + runs = await hatchet.runs.aio_list( + parent_task_external_id=ref.workflow_run_id, + additional_metadata={"test_run_id": test_run_id}, + ) + + assert runs.rows + + """Assert that the additional metadata is propagated to the child runs.""" + for run in runs.rows: + assert run.additional_metadata + assert run.additional_metadata["test_run_id"] == test_run_id + + assert run.children + for child in run.children: + assert child.additional_metadata + assert child.additional_metadata["test_run_id"] == test_run_id diff --git a/sdks/python/examples/fanout_sync/worker.py b/sdks/python/examples/fanout_sync/worker.py index 095403eae..5e7d68b12 100644 --- a/sdks/python/examples/fanout_sync/worker.py +++ b/sdks/python/examples/fanout_sync/worker.py @@ -47,6 +47,14 @@ def process(input: ChildInput, ctx: Context) -> dict[str, str]: return {"status": "success " + input.a} +@sync_fanout_child.task(parents=[process]) +def process2(input: ChildInput, ctx: Context) -> dict[str, str]: + process_output = ctx.task_output(process) + a = process_output["status"] + + return {"status2": a + "2"} + + def main() -> None: worker = hatchet.worker( "sync-fanout-worker", diff --git a/sdks/python/examples/rate_limit/test_rate_limit.py b/sdks/python/examples/rate_limit/test_rate_limit.py deleted file mode 100644 index 18e7b0454..000000000 --- a/sdks/python/examples/rate_limit/test_rate_limit.py +++ /dev/null @@ -1,27 +0,0 @@ -import asyncio -import time - -import pytest - -from examples.rate_limit.worker import rate_limit_workflow - - -@pytest.mark.skip(reason="The timing for this test is not reliable") -@pytest.mark.asyncio(loop_scope="session") -async def test_run() -> None: - - run1 = rate_limit_workflow.run_no_wait() - run2 = rate_limit_workflow.run_no_wait() - run3 = rate_limit_workflow.run_no_wait() - - start_time = time.time() - - await asyncio.gather(run1.aio_result(), run2.aio_result(), run3.aio_result()) - - end_time = time.time() - - total_time = end_time - start_time - - assert ( - 1 <= total_time <= 5 - ), f"Expected runtime to be a bit more than 1 seconds, but it took {total_time:.2f} seconds" diff --git a/sdks/python/examples/return_exceptions/test_return_exceptions.py b/sdks/python/examples/return_exceptions/test_return_exceptions.py new file mode 100644 index 000000000..2dca8d679 --- /dev/null +++ b/sdks/python/examples/return_exceptions/test_return_exceptions.py @@ -0,0 +1,40 @@ +import asyncio + +import pytest + +from examples.return_exceptions.worker import Input, return_exceptions_task + + +@pytest.mark.asyncio(loop_scope="session") +async def test_return_exceptions_async() -> None: + results = await return_exceptions_task.aio_run_many( + [ + return_exceptions_task.create_bulk_run_item(input=Input(index=i)) + for i in range(10) + ], + return_exceptions=True, + ) + + for i, result in enumerate(results): + if i % 2 == 0: + assert isinstance(result, Exception) + assert f"error in task with index {i}" in str(result) + else: + assert result == {"message": "this is a successful task."} + + +def test_return_exceptions_sync() -> None: + results = return_exceptions_task.run_many( + [ + return_exceptions_task.create_bulk_run_item(input=Input(index=i)) + for i in range(10) + ], + return_exceptions=True, + ) + + for i, result in enumerate(results): + if i % 2 == 0: + assert isinstance(result, Exception) + assert f"error in task with index {i}" in str(result) + else: + assert result == {"message": "this is a successful task."} diff --git a/sdks/python/examples/return_exceptions/worker.py b/sdks/python/examples/return_exceptions/worker.py new file mode 100644 index 000000000..10f5db8fa --- /dev/null +++ b/sdks/python/examples/return_exceptions/worker.py @@ -0,0 +1,17 @@ +from pydantic import BaseModel + +from hatchet_sdk import Context, EmptyModel, Hatchet + +hatchet = Hatchet() + + +class Input(EmptyModel): + index: int + + +@hatchet.task(input_validator=Input) +async def return_exceptions_task(input: Input, ctx: Context) -> dict[str, str]: + if input.index % 2 == 0: + raise ValueError(f"error in task with index {input.index}") + + return {"message": "this is a successful task."} diff --git a/sdks/python/examples/timeout/worker.py b/sdks/python/examples/timeout/worker.py index bfddb1274..c3468c329 100644 --- a/sdks/python/examples/timeout/worker.py +++ b/sdks/python/examples/timeout/worker.py @@ -16,10 +16,10 @@ timeout_wf = hatchet.workflow( # > ExecutionTimeout # 👀 Specify an execution timeout on a task @timeout_wf.task( - execution_timeout=timedelta(seconds=4), schedule_timeout=timedelta(minutes=10) + execution_timeout=timedelta(seconds=5), schedule_timeout=timedelta(minutes=10) ) def timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]: - time.sleep(5) + time.sleep(30) return {"status": "success"} @@ -31,7 +31,6 @@ refresh_timeout_wf = hatchet.workflow(name="RefreshTimeoutWorkflow") # > RefreshTimeout @refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4)) def refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]: - ctx.refresh_timeout(timedelta(seconds=10)) time.sleep(5) diff --git a/sdks/python/examples/unit_testing/test_unit.py b/sdks/python/examples/unit_testing/test_unit.py new file mode 100644 index 000000000..cebc84f17 --- /dev/null +++ b/sdks/python/examples/unit_testing/test_unit.py @@ -0,0 +1,96 @@ +import pytest + +from examples.unit_testing.workflows import ( + Lifespan, + UnitTestInput, + UnitTestOutput, + async_complex_workflow, + async_simple_workflow, + async_standalone, + durable_async_complex_workflow, + durable_async_simple_workflow, + durable_async_standalone, + durable_sync_complex_workflow, + durable_sync_simple_workflow, + durable_sync_standalone, + start, + sync_complex_workflow, + sync_simple_workflow, + sync_standalone, +) +from hatchet_sdk import Task + + +@pytest.mark.parametrize( + "func", + [ + sync_standalone, + durable_sync_standalone, + sync_simple_workflow, + durable_sync_simple_workflow, + sync_complex_workflow, + durable_sync_complex_workflow, + ], +) +def test_simple_unit_sync(func: Task[UnitTestInput, UnitTestOutput]) -> None: + input = UnitTestInput(key="test_key", number=42) + additional_metadata = {"meta_key": "meta_value"} + lifespan = Lifespan(mock_db_url="sqlite:///:memory:") + retry_count = 1 + + expected_output = UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=additional_metadata, + retry_count=retry_count, + mock_db_url=lifespan.mock_db_url, + ) + + assert ( + func.mock_run( + input=input, + additional_metadata=additional_metadata, + lifespan=lifespan, + retry_count=retry_count, + parent_outputs={start.name: expected_output.model_dump()}, + ) + == expected_output + ) + + +@pytest.mark.parametrize( + "func", + [ + async_standalone, + durable_async_standalone, + async_simple_workflow, + durable_async_simple_workflow, + async_complex_workflow, + durable_async_complex_workflow, + ], +) +@pytest.mark.asyncio(loop_scope="session") +async def test_simple_unit_async(func: Task[UnitTestInput, UnitTestOutput]) -> None: + input = UnitTestInput(key="test_key", number=42) + additional_metadata = {"meta_key": "meta_value"} + lifespan = Lifespan(mock_db_url="sqlite:///:memory:") + retry_count = 1 + + expected_output = UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=additional_metadata, + retry_count=retry_count, + mock_db_url=lifespan.mock_db_url, + ) + + assert ( + await func.aio_mock_run( + input=input, + additional_metadata=additional_metadata, + lifespan=lifespan, + retry_count=retry_count, + parent_outputs={start.name: expected_output.model_dump()}, + ) + == expected_output + ) diff --git a/sdks/python/examples/unit_testing/workflows.py b/sdks/python/examples/unit_testing/workflows.py new file mode 100644 index 000000000..ae42e61c9 --- /dev/null +++ b/sdks/python/examples/unit_testing/workflows.py @@ -0,0 +1,171 @@ +from typing import cast + +from pydantic import BaseModel + +from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet + + +class UnitTestInput(BaseModel): + key: str + number: int + + +class Lifespan(BaseModel): + mock_db_url: str + + +class UnitTestOutput(UnitTestInput, Lifespan): + additional_metadata: dict[str, str] + retry_count: int + + +hatchet = Hatchet() + + +@hatchet.task(input_validator=UnitTestInput) +def sync_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@hatchet.task(input_validator=UnitTestInput) +async def async_standalone(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@hatchet.durable_task(input_validator=UnitTestInput) +def durable_sync_standalone( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@hatchet.durable_task(input_validator=UnitTestInput) +async def durable_async_standalone( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +simple_workflow = hatchet.workflow( + name="simple-unit-test-workflow", input_validator=UnitTestInput +) + + +@simple_workflow.task() +def sync_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@simple_workflow.task() +async def async_simple_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@simple_workflow.durable_task() +def durable_sync_simple_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@simple_workflow.durable_task() +async def durable_async_simple_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +complex_workflow = hatchet.workflow( + name="complex-unit-test-workflow", input_validator=UnitTestInput +) + + +@complex_workflow.task() +async def start(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return UnitTestOutput( + key=input.key, + number=input.number, + additional_metadata=ctx.additional_metadata, + retry_count=ctx.retry_count, + mock_db_url=cast(Lifespan, ctx.lifespan).mock_db_url, + ) + + +@complex_workflow.task( + parents=[start], +) +def sync_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return ctx.task_output(start) + + +@complex_workflow.task( + parents=[start], +) +async def async_complex_workflow(input: UnitTestInput, ctx: Context) -> UnitTestOutput: + return ctx.task_output(start) + + +@complex_workflow.durable_task( + parents=[start], +) +def durable_sync_complex_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return ctx.task_output(start) + + +@complex_workflow.durable_task( + parents=[start], +) +async def durable_async_complex_workflow( + input: UnitTestInput, ctx: DurableContext +) -> UnitTestOutput: + return ctx.task_output(start) diff --git a/sdks/python/examples/worker.py b/sdks/python/examples/worker.py index ce248513f..26f226598 100644 --- a/sdks/python/examples/worker.py +++ b/sdks/python/examples/worker.py @@ -23,6 +23,7 @@ from examples.lifespans.simple import lifespan, lifespan_task from examples.logger.workflow import logging_workflow from examples.non_retryable.worker import non_retryable_workflow from examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details +from examples.return_exceptions.worker import return_exceptions_task from examples.simple.worker import simple, simple_durable from examples.timeout.worker import refresh_timeout_wf, timeout_wf from hatchet_sdk import Hatchet @@ -65,6 +66,7 @@ def main() -> None: bulk_replay_test_1, bulk_replay_test_2, bulk_replay_test_3, + return_exceptions_task, ], lifespan=lifespan, ) diff --git a/sdks/python/hatchet_sdk/clients/admin.py b/sdks/python/hatchet_sdk/clients/admin.py index dcc2e8606..3997e33cd 100644 --- a/sdks/python/hatchet_sdk/clients/admin.py +++ b/sdks/python/hatchet_sdk/clients/admin.py @@ -23,6 +23,7 @@ from hatchet_sdk.metadata import get_metadata from hatchet_sdk.rate_limit import RateLimitDuration from hatchet_sdk.runnables.contextvars import ( ctx_action_key, + ctx_additional_metadata, ctx_step_run_id, ctx_worker_id, ctx_workflow_run_id, @@ -278,6 +279,7 @@ class AdminClient: step_run_id = ctx_step_run_id.get() worker_id = ctx_worker_id.get() action_key = ctx_action_key.get() + additional_metadata = ctx_additional_metadata.get() or {} spawn_index = workflow_spawn_indices[action_key] if action_key else 0 ## Increment the spawn_index for the parent workflow @@ -296,7 +298,7 @@ class AdminClient: parent_step_run_id=options.parent_step_run_id or step_run_id, child_key=options.child_key, child_index=child_index, - additional_metadata=options.additional_metadata, + additional_metadata={**additional_metadata, **options.additional_metadata}, desired_worker_id=desired_worker_id, priority=options.priority, namespace=options.namespace, diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py index 854d903c2..67a644b12 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py @@ -140,15 +140,15 @@ class ActionListener: # todo case on "recvmsg:Connection reset by peer" for updates? if self.missed_heartbeats >= 3: # we don't reraise the error here, as we don't want to stop the heartbeat thread - logger.error( - f"⛔️ failed heartbeat ({self.missed_heartbeats}): {e.details()}" + logger.exception( + f"⛔️ failed heartbeat ({self.missed_heartbeats})" ) elif self.missed_heartbeats > 1: logger.warning( f"failed to send heartbeat ({self.missed_heartbeats}): {e.details()}" ) else: - logger.error(f"failed to send heartbeat: {e}") + logger.exception("failed to send heartbeat") if self.interrupt is not None: self.interrupt.set() @@ -195,7 +195,7 @@ class ActionListener: if not t.done(): logger.warning( - "Interrupted read_with_interrupt task of action listener" + "interrupted read_with_interrupt task of action listener" ) t.cancel() @@ -206,7 +206,7 @@ class ActionListener: result = t.result() if isinstance(result, UnexpectedEOF): - logger.debug("Handling EOF in Action Listener") + logger.debug("handling EOF in Action Listener") self.retries = self.retries + 1 break @@ -222,8 +222,8 @@ class ActionListener: assigned_action.actionPayload ) ) - except (ValueError, json.JSONDecodeError) as e: - logger.error(f"Error decoding payload: {e}") + except (ValueError, json.JSONDecodeError): + logger.exception("error decoding payload") action_payload = ActionPayload() @@ -263,9 +263,9 @@ class ActionListener: # Handle different types of errors if e.code() == grpc.StatusCode.CANCELLED: # Context cancelled, unsubscribe and close - logger.debug("Context cancelled, closing listener") + logger.debug("context cancelled, closing listener") elif e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: - logger.info("Deadline exceeded, retrying subscription") + logger.info("deadline exceeded, retrying subscription") elif ( self.listen_strategy == "v2" and e.code() == grpc.StatusCode.UNIMPLEMENTED @@ -277,10 +277,10 @@ class ActionListener: else: # TODO retry if e.code() == grpc.StatusCode.UNAVAILABLE: - logger.error(f"action listener error: {e.details()}") + logger.exception("action listener error") else: # Unknown error, report and break - logger.error(f"action listener error: {e}") + logger.exception("action listener error") self.retries = self.retries + 1 @@ -346,8 +346,8 @@ class ActionListener: try: self.unregister() - except Exception as e: - logger.error(f"failed to unregister: {e}") + except Exception: + logger.exception("failed to unregister") if self.interrupt: # type: ignore[truthy-bool] self.interrupt.set() diff --git a/sdks/python/hatchet_sdk/clients/event_ts.py b/sdks/python/hatchet_sdk/clients/event_ts.py index ab9a1aecd..68f95e9dd 100644 --- a/sdks/python/hatchet_sdk/clients/event_ts.py +++ b/sdks/python/hatchet_sdk/clients/event_ts.py @@ -66,7 +66,7 @@ async def read_with_interrupt( result = cast(TResponse, await listener.read()) if result is cygrpc.EOF: - logger.warning("Received EOF from engine") + logger.warning("received EOF from engine") return UnexpectedEOF() key = key_generator(result) if key_generator else "" diff --git a/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py b/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py index 69d3fa0eb..8a99d8fdc 100644 --- a/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py +++ b/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py @@ -103,7 +103,7 @@ class PooledListener(Generic[R, T, L], ABC): try: self.listener = await self._retry_subscribe() - logger.debug("Listener connected.") + logger.debug("listener connected.") # spawn an interrupter task if self.interrupter is not None and not self.interrupter.done(): @@ -125,7 +125,7 @@ class PooledListener(Generic[R, T, L], ABC): if not t.done(): logger.warning( - "Interrupted read_with_interrupt task of listener" + "interrupted read_with_interrupt task of listener" ) t.cancel() @@ -138,7 +138,7 @@ class PooledListener(Generic[R, T, L], ABC): if isinstance(event, UnexpectedEOF): logger.debug( - f"Handling EOF in Pooled Listener {self.__class__.__name__}" + f"handling EOF in Pooled Listener {self.__class__.__name__}" ) break @@ -153,7 +153,7 @@ class PooledListener(Generic[R, T, L], ABC): continue except Exception as e: - logger.error(f"Error in listener: {e}") + logger.exception("error in listener") self.listener = None diff --git a/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py b/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py index 4b687b0cb..3098d3481 100644 --- a/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py +++ b/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py @@ -23,7 +23,7 @@ def tenacity_retry(func: Callable[P, R]) -> Callable[P, R]: def tenacity_alert_retry(retry_state: tenacity.RetryCallState) -> None: """Called between tenacity retries.""" logger.debug( - f"Retrying {retry_state.fn}: attempt " + f"retrying {retry_state.fn}: attempt " f"{retry_state.attempt_number} ended with: {retry_state.outcome}", ) diff --git a/sdks/python/hatchet_sdk/clients/v1/api_client.py b/sdks/python/hatchet_sdk/clients/v1/api_client.py index e38e9a4cb..d660b4b3c 100644 --- a/sdks/python/hatchet_sdk/clients/v1/api_client.py +++ b/sdks/python/hatchet_sdk/clients/v1/api_client.py @@ -5,7 +5,7 @@ import tenacity from hatchet_sdk.clients.rest.api_client import ApiClient from hatchet_sdk.clients.rest.configuration import Configuration -from hatchet_sdk.clients.rest.exceptions import ServiceException +from hatchet_sdk.clients.rest.exceptions import NotFoundException, ServiceException from hatchet_sdk.config import ClientConfig from hatchet_sdk.logger import logger from hatchet_sdk.utils.typing import JSONSerializableMapping @@ -61,10 +61,10 @@ def retry(func: Callable[P, R]) -> Callable[P, R]: def _alert_on_retry(retry_state: tenacity.RetryCallState) -> None: logger.debug( - f"Retrying {retry_state.fn}: attempt " + f"retrying {retry_state.fn}: attempt " f"{retry_state.attempt_number} ended with: {retry_state.outcome}", ) def _should_retry(ex: BaseException) -> bool: - return isinstance(ex, ServiceException) + return isinstance(ex, ServiceException | NotFoundException) diff --git a/sdks/python/hatchet_sdk/context/context.py b/sdks/python/hatchet_sdk/context/context.py index eb516b7e7..6e62c6e45 100644 --- a/sdks/python/hatchet_sdk/context/context.py +++ b/sdks/python/hatchet_sdk/context/context.py @@ -236,8 +236,8 @@ class Context: step_run_id=self.step_run_id, index=ix, ) - except Exception as e: - logger.error(f"Error putting stream event: {e}") + except Exception: + logger.exception("error putting stream event") async def aio_put_stream(self, data: str | bytes) -> None: """ @@ -262,8 +262,8 @@ class Context: return self.dispatcher_client.refresh_timeout( step_run_id=self.step_run_id, increment_by=increment_by ) - except Exception as e: - logger.error(f"Error refreshing timeout: {e}") + except Exception: + logger.exception("error refreshing timeout") @property def retry_count(self) -> int: @@ -285,7 +285,7 @@ class Context: return self.retry_count + 1 @property - def additional_metadata(self) -> JSONSerializableMapping | None: + def additional_metadata(self) -> JSONSerializableMapping: """ The additional metadata sent with the current task run. @@ -350,7 +350,7 @@ class Context: if not errors: logger.error( - "No step run errors found. `context.task_run_errors` is intended to be run in an on-failure step, and will only work on engine versions more recent than v0.53.10" + "no step run errors found. `context.task_run_errors` is intended to be run in an on-failure step, and will only work on engine versions more recent than v0.53.10" ) return errors diff --git a/sdks/python/hatchet_sdk/features/runs.py b/sdks/python/hatchet_sdk/features/runs.py index ff4e44cca..499015fae 100644 --- a/sdks/python/hatchet_sdk/features/runs.py +++ b/sdks/python/hatchet_sdk/features/runs.py @@ -119,6 +119,26 @@ class RunsClient(BaseRestClient): def _ta(self, client: ApiClient) -> TaskApi: return TaskApi(client) + @retry + def get_task_run(self, task_run_id: str) -> V1TaskSummary: + """ + Get task run details for a given task run ID. + + :param task_run_id: The ID of the task run to retrieve details for. + :return: Task run details for the specified task run ID. + """ + with self.client() as client: + return self._ta(client).v1_task_get(task_run_id) + + async def aio_get_task_run(self, task_run_id: str) -> V1TaskSummary: + """ + Get task run details for a given task run ID. + + :param task_run_id: The ID of the task run to retrieve details for. + :return: Task run details for the specified task run ID. + """ + return await asyncio.to_thread(self.get_task_run, task_run_id) + @retry def get(self, workflow_run_id: str) -> V1WorkflowRunDetails: """ @@ -148,7 +168,7 @@ class RunsClient(BaseRestClient): :return: The task status """ with self.client() as client: - return self._wra(client).v1_workflow_run_get_status(str(workflow_run_id)) + return self._wra(client).v1_workflow_run_get_status(workflow_run_id) async def aio_get_status(self, workflow_run_id: str) -> V1TaskStatus: """ diff --git a/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py b/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py index 601b3e5ae..97393b538 100644 --- a/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py +++ b/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py @@ -64,7 +64,7 @@ OTEL_TRACEPARENT_KEY = "traceparent" def create_traceparent() -> str | None: logger.warning( - "As of SDK version 1.11.0, you no longer need to call `create_traceparent` manually. The traceparent will be automatically created by the instrumentor and injected into the metadata of actions and events when appropriate. This method will be removed in a future version.", + "as of SDK version 1.11.0, you no longer need to call `create_traceparent` manually. The traceparent will be automatically created by the instrumentor and injected into the metadata of actions and events when appropriate. This method will be removed in a future version.", ) return _create_traceparent() @@ -91,7 +91,7 @@ def parse_carrier_from_metadata( metadata: JSONSerializableMapping | None, ) -> Context | None: logger.warning( - "As of SDK version 1.11.0, you no longer need to call `parse_carrier_from_metadata` manually. This method will be removed in a future version.", + "as of SDK version 1.11.0, you no longer need to call `parse_carrier_from_metadata` manually. This method will be removed in a future version.", ) return _parse_carrier_from_metadata(metadata) @@ -133,7 +133,7 @@ def inject_traceparent_into_metadata( metadata: dict[str, str], traceparent: str | None = None ) -> dict[str, str]: logger.warning( - "As of SDK version 1.11.0, you no longer need to call `inject_traceparent_into_metadata` manually. The traceparent will automatically be injected by the instrumentor. This method will be removed in a future version.", + "as of SDK version 1.11.0, you no longer need to call `inject_traceparent_into_metadata` manually. The traceparent will automatically be injected by the instrumentor. This method will be removed in a future version.", ) return _inject_traceparent_into_metadata(metadata, traceparent) diff --git a/sdks/python/hatchet_sdk/runnables/contextvars.py b/sdks/python/hatchet_sdk/runnables/contextvars.py index 0b6f3e832..839cb81d0 100644 --- a/sdks/python/hatchet_sdk/runnables/contextvars.py +++ b/sdks/python/hatchet_sdk/runnables/contextvars.py @@ -4,6 +4,7 @@ from collections import Counter from contextvars import ContextVar from hatchet_sdk.runnables.action import ActionKey +from hatchet_sdk.utils.typing import JSONSerializableMapping ctx_workflow_run_id: ContextVar[str | None] = ContextVar( "ctx_workflow_run_id", default=None @@ -13,6 +14,9 @@ ctx_action_key: ContextVar[ActionKey | None] = ContextVar( ) ctx_step_run_id: ContextVar[str | None] = ContextVar("ctx_step_run_id", default=None) ctx_worker_id: ContextVar[str | None] = ContextVar("ctx_worker_id", default=None) +ctx_additional_metadata: ContextVar[JSONSerializableMapping | None] = ContextVar( + "ctx_additional_metadata", default=None +) workflow_spawn_indices = Counter[ActionKey]() spawn_index_lock = asyncio.Lock() diff --git a/sdks/python/hatchet_sdk/runnables/task.py b/sdks/python/hatchet_sdk/runnables/task.py index 39c802617..f7a844c66 100644 --- a/sdks/python/hatchet_sdk/runnables/task.py +++ b/sdks/python/hatchet_sdk/runnables/task.py @@ -11,6 +11,7 @@ from hatchet_sdk.conditions import ( flatten_conditions, ) from hatchet_sdk.context.context import Context, DurableContext +from hatchet_sdk.context.worker_context import WorkerContext from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions from hatchet_sdk.contracts.v1.workflows_pb2 import ( CreateTaskOpts, @@ -19,6 +20,7 @@ from hatchet_sdk.contracts.v1.workflows_pb2 import ( ) from hatchet_sdk.runnables.types import ( ConcurrencyExpression, + EmptyModel, R, StepType, TWorkflowInput, @@ -30,9 +32,11 @@ from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_exp from hatchet_sdk.utils.typing import ( AwaitableLike, CoroutineLike, + JSONSerializableMapping, TaskIOValidator, is_basemodel_subclass, ) +from hatchet_sdk.worker.runner.utils.capture_logs import AsyncLogSender if TYPE_CHECKING: from hatchet_sdk.runnables.workflow import Workflow @@ -186,3 +190,132 @@ class Task(Generic[TWorkflowInput, R]): sleep_conditions=sleep_conditions, user_event_conditions=user_events, ) + + def _create_mock_context( + self, + input: TWorkflowInput | None, + additional_metadata: JSONSerializableMapping | None = None, + parent_outputs: dict[str, JSONSerializableMapping] | None = None, + retry_count: int = 0, + lifespan_context: Any = None, + ) -> Context | DurableContext: + from hatchet_sdk.runnables.action import Action, ActionPayload, ActionType + + additional_metadata = additional_metadata or {} + parent_outputs = parent_outputs or {} + + if input is None: + input = cast(TWorkflowInput, EmptyModel()) + + action_payload = ActionPayload(input=input.model_dump(), parents=parent_outputs) + + action = Action( + tenant_id=self.workflow.client.config.tenant_id, + worker_id="mock-worker-id", + workflow_run_id="mock-workflow-run-id", + get_group_key_run_id="mock-get-group-key-run-id", + job_id="mock-job-id", + job_name="mock-job-name", + job_run_id="mock-job-run-id", + step_id="mock-step-id", + step_run_id="mock-step-run-id", + action_id="mock:action", + action_payload=action_payload, + action_type=ActionType.START_STEP_RUN, + retry_count=retry_count, + additional_metadata=additional_metadata, + child_workflow_index=None, + child_workflow_key=None, + parent_workflow_run_id=None, + priority=1, + workflow_version_id="mock-workflow-version-id", + workflow_id="mock-workflow-id", + ) + + constructor = DurableContext if self.is_durable else Context + + return constructor( + action=action, + dispatcher_client=self.workflow.client._client.dispatcher, + admin_client=self.workflow.client._client.admin, + event_client=self.workflow.client._client.event, + durable_event_listener=None, + worker=WorkerContext( + labels={}, client=self.workflow.client._client.dispatcher + ), + runs_client=self.workflow.client._client.runs, + lifespan_context=lifespan_context, + log_sender=AsyncLogSender(self.workflow.client._client.event), + ) + + def mock_run( + self, + input: TWorkflowInput | None = None, + additional_metadata: JSONSerializableMapping | None = None, + parent_outputs: dict[str, JSONSerializableMapping] | None = None, + retry_count: int = 0, + lifespan: Any = None, + ) -> R: + """ + Mimic the execution of a task. This method is intended to be used to unit test + tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync + tasks and `aio_mock_run` for async tasks. + + :param input: The input to the task. + :param additional_metadata: Additional metadata to attach to the task. + :param parent_outputs: Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. + :param retry_count: The number of times the task has been retried. + :param lifespan: The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. + + :return: The output of the task. + :raises TypeError: If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called. + """ + + if self.is_async_function: + raise TypeError( + f"{self.name} is not a sync function. Use `aio_mock_run` instead." + ) + + ctx = self._create_mock_context( + input, additional_metadata, parent_outputs, retry_count, lifespan + ) + + return self.call(ctx) + + async def aio_mock_run( + self, + input: TWorkflowInput | None = None, + additional_metadata: JSONSerializableMapping | None = None, + parent_outputs: dict[str, JSONSerializableMapping] | None = None, + retry_count: int = 0, + lifespan: Any = None, + ) -> R: + """ + Mimic the execution of a task. This method is intended to be used to unit test + tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync + tasks and `aio_mock_run` for async tasks. + + :param input: The input to the task. + :param additional_metadata: Additional metadata to attach to the task. + :param parent_outputs: Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. + :param retry_count: The number of times the task has been retried. + :param lifespan: The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. + + :return: The output of the task. + :raises TypeError: If the task is an async function and `mock_run` is called, or if the task is a sync function and `aio_mock_run` is called. + """ + + if not self.is_async_function: + raise TypeError( + f"{self.name} is not an async function. Use `mock_run` instead." + ) + + ctx = self._create_mock_context( + input, + additional_metadata, + parent_outputs, + retry_count, + lifespan, + ) + + return await self.aio_call(ctx) diff --git a/sdks/python/hatchet_sdk/runnables/workflow.py b/sdks/python/hatchet_sdk/runnables/workflow.py index 2e095d179..587ccdd57 100644 --- a/sdks/python/hatchet_sdk/runnables/workflow.py +++ b/sdks/python/hatchet_sdk/runnables/workflow.py @@ -2,7 +2,16 @@ import asyncio from collections.abc import Callable from datetime import datetime, timedelta from functools import cached_property -from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, get_type_hints +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Literal, + TypeVar, + cast, + get_type_hints, + overload, +) from google.protobuf import timestamp_pb2 from pydantic import BaseModel, model_validator @@ -651,39 +660,83 @@ class Workflow(BaseWorkflow[TWorkflowInput]): return await ref.aio_result() + def _get_result( + self, ref: WorkflowRunRef, return_exceptions: bool + ) -> dict[str, Any] | BaseException: + try: + return ref.result() + except Exception as e: + if return_exceptions: + return e + raise e + + @overload def run_many( self, workflows: list[WorkflowRunTriggerConfig], - ) -> list[dict[str, Any]]: + return_exceptions: Literal[True], + ) -> list[dict[str, Any] | BaseException]: ... + + @overload + def run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[False] = False, + ) -> list[dict[str, Any]]: ... + + def run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: bool = False, + ) -> list[dict[str, Any]] | list[dict[str, Any] | BaseException]: """ Run a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results. :param workflows: A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. + :param return_exceptions: If `True`, exceptions will be returned as part of the results instead of raising them. :returns: A list of results for each workflow run. """ refs = self.client._client.admin.run_workflows( workflows=workflows, ) - return [ref.result() for ref in refs] + return [self._get_result(ref, return_exceptions) for ref in refs] + + @overload + async def aio_run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[True], + ) -> list[dict[str, Any] | BaseException]: ... + + @overload + async def aio_run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[False] = False, + ) -> list[dict[str, Any]]: ... async def aio_run_many( self, workflows: list[WorkflowRunTriggerConfig], - ) -> list[dict[str, Any]]: + return_exceptions: bool = False, + ) -> list[dict[str, Any]] | list[dict[str, Any] | BaseException]: """ Run a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results. :param workflows: A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. + :param return_exceptions: If `True`, exceptions will be returned as part of the results instead of raising them. :returns: A list of results for each workflow run. """ refs = await self.client._client.admin.aio_run_workflows( workflows=workflows, ) - return await asyncio.gather(*[ref.aio_result() for ref in refs]) + return await asyncio.gather( + *[ref.aio_result() for ref in refs], return_exceptions=return_exceptions + ) def run_many_no_wait( self, @@ -946,7 +999,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): :param backoff_max_seconds: The maximum number of seconds to allow retries with exponential backoff to continue. - :param concurrency: A list of concurrency expressions for the on-success task. + :param concurrency: A list of concurrency expressions for the on-failure task. :returns: A decorator which creates a `Task` object. """ @@ -1137,7 +1190,18 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): self.config = self._workflow.config - def _extract_result(self, result: dict[str, Any]) -> R: + @overload + def _extract_result(self, result: dict[str, Any]) -> R: ... + + @overload + def _extract_result(self, result: BaseException) -> BaseException: ... + + def _extract_result( + self, result: dict[str, Any] | BaseException + ) -> R | BaseException: + if isinstance(result, BaseException): + return result + output = result.get(self._task.name) if not self._output_validator: @@ -1217,30 +1281,72 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): return TaskRunRef[TWorkflowInput, R](self, ref) - def run_many(self, workflows: list[WorkflowRunTriggerConfig]) -> list[R]: + @overload + def run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[True], + ) -> list[R | BaseException]: ... + + @overload + def run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[False] = False, + ) -> list[R]: ... + + def run_many( + self, workflows: list[WorkflowRunTriggerConfig], return_exceptions: bool = False + ) -> list[R] | list[R | BaseException]: """ Run a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results. :param workflows: A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. + :param return_exceptions: If `True`, exceptions will be returned as part of the results instead of raising them. :returns: A list of results for each workflow run. """ return [ self._extract_result(result) - for result in self._workflow.run_many(workflows) + for result in self._workflow.run_many( + workflows, + ## hack: typing needs literal + True if return_exceptions else False, # noqa: SIM210 + ) ] - async def aio_run_many(self, workflows: list[WorkflowRunTriggerConfig]) -> list[R]: + @overload + async def aio_run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[True], + ) -> list[R | BaseException]: ... + + @overload + async def aio_run_many( + self, + workflows: list[WorkflowRunTriggerConfig], + return_exceptions: Literal[False] = False, + ) -> list[R]: ... + + async def aio_run_many( + self, workflows: list[WorkflowRunTriggerConfig], return_exceptions: bool = False + ) -> list[R] | list[R | BaseException]: """ Run a workflow in bulk and wait for all runs to complete. This method triggers multiple workflow runs, blocks until all of them complete, and returns the final results. :param workflows: A list of `WorkflowRunTriggerConfig` objects, each representing a workflow run to be triggered. + :param return_exceptions: If `True`, exceptions will be returned as part of the results instead of raising them. :returns: A list of results for each workflow run. """ return [ self._extract_result(result) - for result in await self._workflow.aio_run_many(workflows) + for result in await self._workflow.aio_run_many( + workflows, + ## hack: typing needs literal + True if return_exceptions else False, # noqa: SIM210 + ) ] def run_many_no_wait( @@ -1273,3 +1379,104 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): refs = await self._workflow.aio_run_many_no_wait(workflows) return [TaskRunRef[TWorkflowInput, R](self, ref) for ref in refs] + + def mock_run( + self, + input: TWorkflowInput | None = None, + additional_metadata: JSONSerializableMapping | None = None, + parent_outputs: dict[str, JSONSerializableMapping] | None = None, + retry_count: int = 0, + lifespan: Any = None, + ) -> R: + """ + Mimic the execution of a task. This method is intended to be used to unit test + tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync + tasks and `aio_mock_run` for async tasks. + + :param input: The input to the task. + :param additional_metadata: Additional metadata to attach to the task. + :param parent_outputs: Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. + :param retry_count: The number of times the task has been retried. + :param lifespan: The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. + + :return: The output of the task. + """ + + return self._task.mock_run( + input=input, + additional_metadata=additional_metadata, + parent_outputs=parent_outputs, + retry_count=retry_count, + lifespan=lifespan, + ) + + async def aio_mock_run( + self, + input: TWorkflowInput | None = None, + additional_metadata: JSONSerializableMapping | None = None, + parent_outputs: dict[str, JSONSerializableMapping] | None = None, + retry_count: int = 0, + lifespan: Any = None, + ) -> R: + """ + Mimic the execution of a task. This method is intended to be used to unit test + tasks without needing to interact with the Hatchet engine. Use `mock_run` for sync + tasks and `aio_mock_run` for async tasks. + + :param input: The input to the task. + :param additional_metadata: Additional metadata to attach to the task. + :param parent_outputs: Outputs from parent tasks, if any. This is useful for mimicking DAG functionality. For instance, if you have a task `step_2` that has a `parent` which is `step_1`, you can pass `parent_outputs={"step_1": {"result": "Hello, world!"}}` to `step_2.mock_run()` to be able to access `ctx.task_output(step_1)` in `step_2`. + :param retry_count: The number of times the task has been retried. + :param lifespan: The lifespan to be used in the task, which is useful if one was set on the worker. This will allow you to access `ctx.lifespan` inside of your task. + + :return: The output of the task. + """ + + return await self._task.aio_mock_run( + input=input, + additional_metadata=additional_metadata, + parent_outputs=parent_outputs, + retry_count=retry_count, + lifespan=lifespan, + ) + + @property + def is_async_function(self) -> bool: + """ + Check if the task is an async function. + + :returns: True if the task is an async function, False otherwise. + """ + return self._task.is_async_function + + def get_run_ref(self, run_id: str) -> TaskRunRef[TWorkflowInput, R]: + """ + Get a reference to a task run by its run ID. + + :param run_id: The ID of the run to get the reference for. + :returns: A `TaskRunRef` object representing the reference to the task run. + """ + wrr = self._workflow.client._client.runs.get_run_ref(run_id) + return TaskRunRef[TWorkflowInput, R](self, wrr) + + async def aio_get_result(self, run_id: str) -> R: + """ + Get the result of a task run by its run ID. + + :param run_id: The ID of the run to get the result for. + :returns: The result of the task run. + """ + run_ref = self.get_run_ref(run_id) + + return await run_ref.aio_result() + + def get_result(self, run_id: str) -> R: + """ + Get the result of a task run by its run ID. + + :param run_id: The ID of the run to get the result for. + :returns: The result of the task run. + """ + run_ref = self.get_run_ref(run_id) + + return run_ref.result() diff --git a/sdks/python/hatchet_sdk/worker/action_listener_process.py b/sdks/python/hatchet_sdk/worker/action_listener_process.py index 9540a3032..9128819ec 100644 --- a/sdks/python/hatchet_sdk/worker/action_listener_process.py +++ b/sdks/python/hatchet_sdk/worker/action_listener_process.py @@ -132,8 +132,8 @@ class WorkerActionListenerProcess: ) logger.debug(f"acquired action listener: {self.listener.worker_id}") - except grpc.RpcError as rpc_error: - logger.error(f"could not start action listener: {rpc_error}") + except grpc.RpcError: + logger.exception("could not start action listener") return # Start both loops as background tasks @@ -168,7 +168,7 @@ class WorkerActionListenerProcess: count += 1 if count > 0: - logger.warning(f"{BLOCKED_THREAD_WARNING}: Waiting Steps {count}") + logger.warning(f"{BLOCKED_THREAD_WARNING} Waiting Steps {count}") await asyncio.sleep(1) async def send_event(self, event: ActionEvent, retry_attempt: int = 1) -> None: @@ -188,7 +188,7 @@ class WorkerActionListenerProcess: ) if diff > 0.1: logger.warning( - f"{BLOCKED_THREAD_WARNING}: time to start: {diff}s" + f"{BLOCKED_THREAD_WARNING} time to start: {diff}s" ) else: logger.debug(f"start time: {diff}") @@ -225,9 +225,9 @@ class WorkerActionListenerProcess: ) case _: logger.error("unknown action type for event send") - except Exception as e: - logger.error( - f"could not send action event ({retry_attempt}/{ACTION_EVENT_RETRY_COUNT}): {e}" + except Exception: + logger.exception( + f"could not send action event ({retry_attempt}/{ACTION_EVENT_RETRY_COUNT})" ) if retry_attempt <= ACTION_EVENT_RETRY_COUNT: await exp_backoff_sleep(retry_attempt, 1) @@ -291,11 +291,11 @@ class WorkerActionListenerProcess: ) try: self.action_queue.put(action) - except Exception as e: - logger.error(f"error putting action: {e}") + except Exception: + logger.exception("error putting action") - except Exception as e: - logger.error(f"error in action loop: {e}") + except Exception: + logger.exception("error in action loop") finally: logger.info("action loop closed") if not self.killing: diff --git a/sdks/python/hatchet_sdk/worker/runner/runner.py b/sdks/python/hatchet_sdk/worker/runner/runner.py index 9955748c5..0dbec9b59 100644 --- a/sdks/python/hatchet_sdk/worker/runner/runner.py +++ b/sdks/python/hatchet_sdk/worker/runner/runner.py @@ -40,6 +40,7 @@ from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action, ActionKey, ActionType from hatchet_sdk.runnables.contextvars import ( ctx_action_key, + ctx_additional_metadata, ctx_step_run_id, ctx_worker_id, ctx_workflow_run_id, @@ -54,6 +55,8 @@ from hatchet_sdk.worker.action_listener_process import ActionEvent from hatchet_sdk.worker.runner.utils.capture_logs import ( AsyncLogSender, ContextVarToCopy, + ContextVarToCopyDict, + ContextVarToCopyStr, copy_context_vars, ) @@ -295,6 +298,7 @@ class Runner: ctx_workflow_run_id.set(action.workflow_run_id) ctx_worker_id.set(action.worker_id) ctx_action_key.set(action.key) + ctx_additional_metadata.set(action.additional_metadata) try: if task.is_async_function: @@ -305,20 +309,34 @@ class Runner: copy_context_vars, [ ContextVarToCopy( - name="ctx_step_run_id", - value=action.step_run_id, + var=ContextVarToCopyStr( + name="ctx_step_run_id", + value=action.step_run_id, + ) ), ContextVarToCopy( - name="ctx_workflow_run_id", - value=action.workflow_run_id, + var=ContextVarToCopyStr( + name="ctx_workflow_run_id", + value=action.workflow_run_id, + ) ), ContextVarToCopy( - name="ctx_worker_id", - value=action.worker_id, + var=ContextVarToCopyStr( + name="ctx_worker_id", + value=action.worker_id, + ) ), ContextVarToCopy( - name="ctx_action_key", - value=action.key, + var=ContextVarToCopyStr( + name="ctx_action_key", + value=action.key, + ) + ), + ContextVarToCopy( + var=ContextVarToCopyDict( + name="ctx_additional_metadata", + value=action.additional_metadata, + ) ), ], self.thread_action_func, @@ -344,34 +362,34 @@ class Runner: "threads_daemon": sum(1 for t in self.thread_pool._threads if t.daemon), } - logger.warning("Thread pool detailed status %s", thread_pool_details) + logger.warning("thread pool detailed status %s", thread_pool_details) async def _start_monitoring(self) -> None: - logger.debug("Thread pool monitoring started") + logger.debug("thread pool monitoring started") try: while True: await self.log_thread_pool_status() for key in self.threads: if key not in self.tasks: - logger.debug(f"Potential zombie thread found for key {key}") + logger.debug(f"potential zombie thread found for key {key}") for key, task in self.tasks.items(): if task.done() and key in self.threads: logger.debug( - f"Task is done but thread still exists for key {key}" + f"task is done but thread still exists for key {key}" ) await asyncio.sleep(60) except asyncio.CancelledError: - logger.warning("Thread pool monitoring task cancelled") + logger.warning("thread pool monitoring task cancelled") except Exception as e: - logger.exception(f"Error in thread pool monitoring: {e}") + logger.exception(f"error in thread pool monitoring: {e}") def start_background_monitoring(self) -> None: loop = asyncio.get_event_loop() self.monitoring_task = loop.create_task(self._start_monitoring()) - logger.debug("Started thread pool monitoring background task") + logger.debug("started thread pool monitoring background task") def cleanup_run_id(self, key: ActionKey) -> None: if key in self.tasks: @@ -503,7 +521,7 @@ class Runner: ident = cast(int, thread.ident) - logger.info(f"Forcefully terminating thread {ident}") + logger.info(f"forcefully terminating thread {ident}") exc = ctypes.py_object(SystemExit) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(ident), exc) @@ -516,13 +534,13 @@ class Runner: ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, 0) raise SystemError("PyThreadState_SetAsyncExc failed") - logger.info(f"Successfully terminated thread {ident}") + logger.info(f"successfully terminated thread {ident}") # Immediately add a new thread to the thread pool, because we've actually killed a worker # in the ThreadPoolExecutor self.thread_pool.submit(lambda: None) except Exception as e: - logger.exception(f"Failed to terminate thread: {e}") + logger.exception(f"failed to terminate thread: {e}") ## IMPORTANT: Keep this method's signature in sync with the wrapper in the OTel instrumentor async def handle_cancel_action(self, action: Action) -> None: @@ -546,7 +564,7 @@ class Runner: await asyncio.sleep(1) logger.warning( - f"Thread {self.threads[key].ident} with key {key} is still running after cancellation. This could cause the thread pool to get blocked and prevent new tasks from running." + f"thread {self.threads[key].ident} with key {key} is still running after cancellation. This could cause the thread pool to get blocked and prevent new tasks from running." ) finally: self.cleanup_run_id(key) @@ -568,8 +586,8 @@ class Runner: try: serialized_output = json.dumps(output, default=str) - except Exception as e: - logger.error(f"Could not serialize output: {e}") + except Exception: + logger.exception("could not serialize output") serialized_output = str(output) if "\\u0000" in serialized_output: diff --git a/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py b/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py index 25d43205e..df249b250 100644 --- a/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py +++ b/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py @@ -5,29 +5,42 @@ from collections.abc import Awaitable, Callable from io import StringIO from typing import Literal, ParamSpec, TypeVar -from pydantic import BaseModel +from pydantic import BaseModel, Field from hatchet_sdk.clients.events import EventClient from hatchet_sdk.logger import logger from hatchet_sdk.runnables.contextvars import ( ctx_action_key, + ctx_additional_metadata, ctx_step_run_id, ctx_worker_id, ctx_workflow_run_id, ) -from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE +from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE, JSONSerializableMapping T = TypeVar("T") P = ParamSpec("P") -class ContextVarToCopy(BaseModel): +class ContextVarToCopyStr(BaseModel): name: Literal[ - "ctx_workflow_run_id", "ctx_step_run_id", "ctx_action_key", "ctx_worker_id" + "ctx_workflow_run_id", + "ctx_step_run_id", + "ctx_action_key", + "ctx_worker_id", ] value: str | None +class ContextVarToCopyDict(BaseModel): + name: Literal["ctx_additional_metadata"] + value: JSONSerializableMapping | None + + +class ContextVarToCopy(BaseModel): + var: ContextVarToCopyStr | ContextVarToCopyDict = Field(discriminator="name") + + def copy_context_vars( ctx_vars: list[ContextVarToCopy], func: Callable[P, T], @@ -35,16 +48,18 @@ def copy_context_vars( **kwargs: P.kwargs, ) -> T: for var in ctx_vars: - if var.name == "ctx_workflow_run_id": - ctx_workflow_run_id.set(var.value) - elif var.name == "ctx_step_run_id": - ctx_step_run_id.set(var.value) - elif var.name == "ctx_action_key": - ctx_action_key.set(var.value) - elif var.name == "ctx_worker_id": - ctx_worker_id.set(var.value) + if var.var.name == "ctx_workflow_run_id": + ctx_workflow_run_id.set(var.var.value) + elif var.var.name == "ctx_step_run_id": + ctx_step_run_id.set(var.var.value) + elif var.var.name == "ctx_action_key": + ctx_action_key.set(var.var.value) + elif var.var.name == "ctx_worker_id": + ctx_worker_id.set(var.var.value) + elif var.var.name == "ctx_additional_metadata": + ctx_additional_metadata.set(var.var.value or {}) else: - raise ValueError(f"Unknown context variable name: {var.name}") + raise ValueError(f"Unknown context variable name: {var.var.name}") return func(*args, **kwargs) @@ -73,13 +88,13 @@ class AsyncLogSender: step_run_id=record.step_run_id, ) except Exception: - logger.exception("Failed to send log to Hatchet") + logger.exception("failed to send log to Hatchet") def publish(self, record: LogRecord | STOP_LOOP_TYPE) -> None: try: self.q.put_nowait(record) except asyncio.QueueFull: - logger.warning("Log queue is full, dropping log message") + logger.warning("log queue is full, dropping log message") class CustomLogHandler(logging.StreamHandler): # type: ignore[type-arg] diff --git a/sdks/python/hatchet_sdk/worker/worker.py b/sdks/python/hatchet_sdk/worker/worker.py index 1d7b434de..a1952157d 100644 --- a/sdks/python/hatchet_sdk/worker/worker.py +++ b/sdks/python/hatchet_sdk/worker/worker.py @@ -143,9 +143,8 @@ class Worker: def register_workflow_from_opts(self, opts: CreateWorkflowVersionRequest) -> None: try: self.client.admin.put_workflow(opts) - except Exception as e: - logger.error(f"failed to register workflow: {opts.name}") - logger.error(e) + except Exception: + logger.exception(f"failed to register workflow: {opts.name}") sys.exit(1) def register_workflow(self, workflow: BaseWorkflow[Any]) -> None: @@ -156,9 +155,8 @@ class Worker: try: self.client.admin.put_workflow(workflow.to_proto()) - except Exception as e: - logger.error(f"failed to register workflow: {workflow.name}") - logger.error(e) + except Exception: + logger.exception(f"failed to register workflow: {workflow.name}") sys.exit(1) for step in workflow.tasks: @@ -189,7 +187,7 @@ class Worker: except RuntimeError: pass - logger.debug("Creating new event loop") + logger.debug("creating new event loop") self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) @@ -226,9 +224,8 @@ class Worker: try: await runner.setup() await web.TCPSite(runner, "0.0.0.0", port).start() - except Exception as e: - logger.error("failed to start healthcheck server") - logger.error(str(e)) + except Exception: + logger.exception("failed to start healthcheck server") return logger.info(f"healthcheck server running on port {port}") @@ -371,8 +368,8 @@ class Worker: logger.debug(f"action listener starting on PID: {process.pid}") return process - except Exception as e: - logger.error(f"failed to start action listener: {e}") + except Exception: + logger.exception("failed to start action listener") sys.exit(1) async def _check_listener_health(self) -> None: @@ -404,8 +401,8 @@ class Worker: self._status = WorkerStatus.HEALTHY await asyncio.sleep(1) - except Exception as e: - logger.error(f"error checking listener health: {e}") + except Exception: + logger.exception("error checking listener health") def _setup_signal_handlers(self) -> None: signal.signal(signal.SIGTERM, self._handle_exit_signal) diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index c2570ce43..184590ec5 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "hatchet-sdk" -version = "1.15.3" +version = "1.16.0" description = "" authors = ["Alexander Belanger "] readme = "README.md" @@ -176,6 +176,9 @@ select = [ "FIX", ## Performance-related rules "PERF", + + # Print statements + "T201", ] ignore = [ @@ -207,6 +210,7 @@ exclude = [ "hatchet_sdk/clients/rest/exceptions.py", "hatchet_sdk/clients/rest/rest.py", "hatchet_sdk/v0/*", + "apply_patches.py", "site/*", "tests/*", ]