diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 41ce22308..58e6ff935 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -4,6 +4,7 @@ on: paths-ignore: - 'sdks/**' - 'frontend/docs/**' + - 'frontend/**/generated/**' - 'examples/**' jobs: diff --git a/api-contracts/dispatcher/dispatcher.proto b/api-contracts/dispatcher/dispatcher.proto index 44ce4a4af..12cc7d086 100644 --- a/api-contracts/dispatcher/dispatcher.proto +++ b/api-contracts/dispatcher/dispatcher.proto @@ -328,6 +328,8 @@ message WorkflowEvent { // (optional) the retry count of this step optional int32 retryCount = 9; + + optional int64 eventIndex = 10; } enum WorkflowRunEventType { diff --git a/api-contracts/events/events.proto b/api-contracts/events/events.proto index d13b92185..30d5939d2 100644 --- a/api-contracts/events/events.proto +++ b/api-contracts/events/events.proto @@ -78,6 +78,8 @@ message PutStreamEventRequest { // associated stream event metadata string metadata = 5; + + optional int64 eventIndex = 6; } message PutStreamEventResponse {} diff --git a/api-contracts/v1/dispatcher.proto b/api-contracts/v1/dispatcher.proto index 3e5d6c70f..6c92f90ad 100644 --- a/api-contracts/v1/dispatcher.proto +++ b/api-contracts/v1/dispatcher.proto @@ -10,8 +10,11 @@ service V1Dispatcher { rpc RegisterDurableEvent(RegisterDurableEventRequest) returns (RegisterDurableEventResponse) {} rpc ListenForDurableEvent(stream ListenForDurableEventRequest) returns (stream DurableEvent) {} + } + + message RegisterDurableEventRequest { string task_id = 1; // external uuid for the task run string signal_key = 2; // the signal key for the event diff --git a/examples/go/workflows/on-event.go b/examples/go/workflows/on-event.go index bea5aa68c..93d012f60 100644 --- a/examples/go/workflows/on-event.go +++ b/examples/go/workflows/on-event.go @@ -43,6 +43,7 @@ func Lower(hatchet v1.HatchetClient) workflow.WorkflowDeclaration[EventInput, Lo ) } + // > Accessing the filter payload func accessFilterPayload(ctx worker.HatchetContext, input EventInput) (*LowerTaskOutput, error) { fmt.Println(ctx.FilterPayload()) @@ -51,6 +52,7 @@ func accessFilterPayload(ctx worker.HatchetContext, input EventInput) (*LowerTas }, nil } + // > Declare with filter func LowerWithFilter(hatchet v1.HatchetClient) workflow.WorkflowDeclaration[EventInput, LowerTaskOutput] { return factory.NewTask( @@ -71,6 +73,7 @@ func LowerWithFilter(hatchet v1.HatchetClient) workflow.WorkflowDeclaration[Even ) } + func Upper(hatchet v1.HatchetClient) workflow.WorkflowDeclaration[EventInput, UpperTaskOutput] { return factory.NewTask( create.StandaloneTask{ diff --git a/examples/python/concurrency_limit_rr/worker.py b/examples/python/concurrency_limit_rr/worker.py index f07acc1b7..1da085583 100644 --- a/examples/python/concurrency_limit_rr/worker.py +++ b/examples/python/concurrency_limit_rr/worker.py @@ -33,7 +33,6 @@ def step1(input: WorkflowInput, ctx: Context) -> None: print("starting step1") time.sleep(2) print("finished step1") - pass def main() -> None: diff --git a/examples/python/dedupe/worker.py b/examples/python/dedupe/worker.py index 82e063e9a..68c99acbb 100644 --- a/examples/python/dedupe/worker.py +++ b/examples/python/dedupe/worker.py @@ -3,7 +3,7 @@ from datetime import timedelta from typing import Any from hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions -from hatchet_sdk.clients.admin import DedupeViolationErr +from hatchet_sdk.exceptions import DedupeViolationError hatchet = Hatchet(debug=True) @@ -20,15 +20,13 @@ async def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]: for i in range(2): try: results.append( - ( - dedupe_child_wf.aio_run( - options=TriggerWorkflowOptions( - additional_metadata={"dedupe": "test"}, key=f"child{i}" - ), - ) + dedupe_child_wf.aio_run( + options=TriggerWorkflowOptions( + additional_metadata={"dedupe": "test"}, key=f"child{i}" + ), ) ) - except DedupeViolationErr as e: + except DedupeViolationError as e: print(f"dedupe violation {e}") continue diff --git a/examples/python/durable/test_durable.py b/examples/python/durable/test_durable.py index 193add56a..4698df37a 100644 --- a/examples/python/durable/test_durable.py +++ b/examples/python/durable/test_durable.py @@ -1,5 +1,4 @@ import asyncio -import os import pytest @@ -7,10 +6,6 @@ from examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow from hatchet_sdk import Hatchet -@pytest.mark.skipif( - os.getenv("CI", "false").lower() == "true", - reason="Skipped in CI because of unreliability", -) @pytest.mark.asyncio(loop_scope="session") async def test_durable(hatchet: Hatchet) -> None: ref = durable_workflow.run_no_wait() @@ -28,6 +23,12 @@ async def test_durable(hatchet: Hatchet) -> None: active_workers = [w for w in workers.rows if w.status == "ACTIVE"] assert len(active_workers) == 2 - assert any(w.name == "e2e-test-worker" for w in active_workers) - assert any(w.name.endswith("e2e-test-worker_durable") for w in active_workers) + assert any( + w.name == hatchet.config.apply_namespace("e2e-test-worker") + for w in active_workers + ) + assert any( + w.name == hatchet.config.apply_namespace("e2e-test-worker_durable") + for w in active_workers + ) assert result["durable_task"]["status"] == "success" diff --git a/examples/python/events/filter.py b/examples/python/events/filter.py index 6364e94de..2ea6ed344 100644 --- a/examples/python/events/filter.py +++ b/examples/python/events/filter.py @@ -30,7 +30,7 @@ hatchet.event.push( hatchet.event.push( event_key=EVENT_KEY, payload={ - "should_skip": True, + "should_skip": False, }, options=PushEventOptions( scope="foobarbaz", diff --git a/examples/python/events/test_event.py b/examples/python/events/test_event.py index aa6de1855..9abb531cb 100644 --- a/examples/python/events/test_event.py +++ b/examples/python/events/test_event.py @@ -1,8 +1,9 @@ import asyncio import json +from collections.abc import AsyncGenerator from contextlib import asynccontextmanager from datetime import datetime, timedelta, timezone -from typing import AsyncGenerator, cast +from typing import cast from uuid import uuid4 import pytest @@ -255,7 +256,9 @@ async def test_async_event_bulk_push(hatchet: Hatchet) -> None: namespace = "bulk-test" # Check that the returned events match the original events - for original_event, returned_event in zip(sorted_events, sorted_returned_events): + for original_event, returned_event in zip( + sorted_events, sorted_returned_events, strict=False + ): assert returned_event.key == namespace + original_event.key diff --git a/examples/python/events/worker.py b/examples/python/events/worker.py index 163e97002..458fabd9c 100644 --- a/examples/python/events/worker.py +++ b/examples/python/events/worker.py @@ -44,7 +44,7 @@ event_workflow_with_filter = hatchet.workflow( def task(input: EventWorkflowInput, ctx: Context) -> dict[str, str]: print("event received") - return dict(ctx.filter_payload) + return ctx.filter_payload # > Accessing the filter payload diff --git a/examples/python/lifespans/simple.py b/examples/python/lifespans/simple.py index 11dc57f62..f2ec49e3c 100644 --- a/examples/python/lifespans/simple.py +++ b/examples/python/lifespans/simple.py @@ -1,6 +1,7 @@ # > Lifespan -from typing import AsyncGenerator, cast +from collections.abc import AsyncGenerator +from typing import cast from pydantic import BaseModel diff --git a/examples/python/lifespans/worker.py b/examples/python/lifespans/worker.py index 953af19f4..3c340964c 100644 --- a/examples/python/lifespans/worker.py +++ b/examples/python/lifespans/worker.py @@ -1,4 +1,5 @@ -from typing import AsyncGenerator, cast +from collections.abc import AsyncGenerator +from typing import cast from uuid import UUID from psycopg_pool import ConnectionPool diff --git a/examples/python/logger/workflow.py b/examples/python/logger/workflow.py index 2d49548fd..390648417 100644 --- a/examples/python/logger/workflow.py +++ b/examples/python/logger/workflow.py @@ -16,7 +16,7 @@ logging_workflow = hatchet.workflow( @logging_workflow.task() def root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]: for i in range(12): - logger.info("executed step1 - {}".format(i)) + logger.info(f"executed step1 - {i}") logger.info({"step1": "step1"}) time.sleep(0.1) @@ -31,7 +31,7 @@ def root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]: @logging_workflow.task() def context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]: for i in range(12): - ctx.log("executed step1 - {}".format(i)) + ctx.log(f"executed step1 - {i}") ctx.log({"step1": "step1"}) time.sleep(0.1) diff --git a/examples/python/migration_guides/mergent.py b/examples/python/migration_guides/mergent.py index 0ac13852e..905304eb1 100644 --- a/examples/python/migration_guides/mergent.py +++ b/examples/python/migration_guides/mergent.py @@ -1,5 +1,6 @@ +from collections.abc import Mapping from datetime import datetime, timedelta, timezone -from typing import Any, Dict, List, Mapping +from typing import Any import requests from pydantic import BaseModel @@ -10,13 +11,13 @@ from hatchet_sdk.context.context import Context from .hatchet_client import hatchet -async def process_image(image_url: str, filters: List[str]) -> Dict[str, Any]: +async def process_image(image_url: str, filters: list[str]) -> dict[str, Any]: # Do some image processing return {"url": image_url, "size": 100, "format": "png"} # > Before (Mergent) -async def process_image_task(request: Any) -> Dict[str, Any]: +async def process_image_task(request: Any) -> dict[str, Any]: image_url = request.json["image_url"] filters = request.json["filters"] try: @@ -32,12 +33,12 @@ async def process_image_task(request: Any) -> Dict[str, Any]: # > After (Hatchet) class ImageProcessInput(BaseModel): image_url: str - filters: List[str] + filters: list[str] class ImageProcessOutput(BaseModel): processed_url: str - metadata: Dict[str, Any] + metadata: dict[str, Any] @hatchet.task( diff --git a/examples/python/non_retryable/test_no_retry.py b/examples/python/non_retryable/test_no_retry.py index 82a58a72a..f1414392e 100644 --- a/examples/python/non_retryable/test_no_retry.py +++ b/examples/python/non_retryable/test_no_retry.py @@ -1,3 +1,5 @@ +import asyncio + import pytest from examples.non_retryable.worker import ( @@ -9,6 +11,7 @@ from examples.non_retryable.worker import ( from hatchet_sdk import Hatchet from hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType from hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails +from hatchet_sdk.exceptions import FailedTaskRunExceptionGroup def find_id(runs: V1WorkflowRunDetails, match: str) -> str: @@ -19,9 +22,28 @@ def find_id(runs: V1WorkflowRunDetails, match: str) -> str: async def test_no_retry(hatchet: Hatchet) -> None: ref = await non_retryable_workflow.aio_run_no_wait() - with pytest.raises(Exception, match="retry"): + with pytest.raises(FailedTaskRunExceptionGroup) as exc_info: await ref.aio_result() + exception_group = exc_info.value + + assert len(exception_group.exceptions) == 2 + + exc_text = [e.exc for e in exception_group.exceptions] + + non_retries = [ + e + for e in exc_text + if "This task should retry because it's not a NonRetryableException" in e + ] + + other_errors = [e for e in exc_text if "This task should not retry" in e] + + assert len(non_retries) == 1 + assert len(other_errors) == 1 + + await asyncio.sleep(3) + runs = await hatchet.runs.aio_get(ref.workflow_run_id) task_to_id = { task: find_id(runs, task.name) @@ -40,9 +62,7 @@ async def test_no_retry(hatchet: Hatchet) -> None: assert len(retrying_events) == 1 """The task id of the retrying events should match the tasks that are retried""" - assert {e.task_id for e in retrying_events} == { - task_to_id[should_retry_wrong_exception_type], - } + assert retrying_events[0].task_id == task_to_id[should_retry_wrong_exception_type] """Three failed events should emit, one each for the two failing initial runs and one for the retry.""" assert ( diff --git a/examples/python/opentelemetry_instrumentation/langfuse/client.py b/examples/python/opentelemetry_instrumentation/langfuse/client.py index 349d6868b..346240e9e 100644 --- a/examples/python/opentelemetry_instrumentation/langfuse/client.py +++ b/examples/python/opentelemetry_instrumentation/langfuse/client.py @@ -1,8 +1,8 @@ import base64 import os -from langfuse import Langfuse # type: ignore[import-untyped] -from langfuse.openai import AsyncOpenAI # type: ignore[import-untyped] +from langfuse import Langfuse # type: ignore +from langfuse.openai import AsyncOpenAI # type: ignore # > Configure Langfuse LANGFUSE_AUTH = base64.b64encode( diff --git a/examples/python/opentelemetry_instrumentation/langfuse/trigger.py b/examples/python/opentelemetry_instrumentation/langfuse/trigger.py index 48cbd5f4f..1f168916a 100644 --- a/examples/python/opentelemetry_instrumentation/langfuse/trigger.py +++ b/examples/python/opentelemetry_instrumentation/langfuse/trigger.py @@ -1,6 +1,6 @@ import asyncio -from langfuse import get_client # type: ignore[import-untyped] +from langfuse import get_client # type: ignore from opentelemetry.trace import StatusCode from examples.opentelemetry_instrumentation.langfuse.worker import langfuse_task diff --git a/examples/python/priority/test_priority.py b/examples/python/priority/test_priority.py index 113ac20ca..7a5cbe10a 100644 --- a/examples/python/priority/test_priority.py +++ b/examples/python/priority/test_priority.py @@ -1,8 +1,9 @@ import asyncio +from collections.abc import AsyncGenerator from datetime import datetime, timedelta, timezone from random import choice from subprocess import Popen -from typing import Any, AsyncGenerator, Literal +from typing import Any, Literal from uuid import uuid4 import pytest @@ -58,7 +59,7 @@ async def dummy_runs() -> None: await asyncio.sleep(3) - return None + return @pytest.mark.parametrize( diff --git a/examples/python/quickstart/poetry.lock b/examples/python/quickstart/poetry.lock index 7ecee5201..0bb6a6dde 100644 --- a/examples/python/quickstart/poetry.lock +++ b/examples/python/quickstart/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -114,7 +114,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] [[package]] name = "aiohttp-retry" @@ -199,12 +199,12 @@ files = [ ] [package.extras] -benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "cel-python" @@ -460,14 +460,14 @@ setuptools = "*" [[package]] name = "hatchet-sdk" -version = "1.12.2" +version = "1.0.0a1" description = "" optional = false python-versions = "<4.0,>=3.10" groups = ["main"] files = [ - {file = "hatchet_sdk-1.12.2-py3-none-any.whl", hash = "sha256:a2701fc9fe277935346bc1f974bce075afa61d6aae1e43ef01d3e9a06abd30ce"}, - {file = "hatchet_sdk-1.12.2.tar.gz", hash = "sha256:77a91539640d732523bff6135593d56171f5cb0185b3e86bbc561e27b18e9a32"}, + {file = "hatchet_sdk-1.0.0a1-py3-none-any.whl", hash = "sha256:bfc84358c8842cecd0d95b30645109733b7292dff0db1a776ca862785ee93d7f"}, + {file = "hatchet_sdk-1.0.0a1.tar.gz", hash = "sha256:f0272bbaac6faed75ff727826e9f7b1ac42ae597f9b590e14d392aada9c9692f"}, ] [package.dependencies] @@ -483,11 +483,13 @@ grpcio-tools = [ {version = ">=1.64.1,<1.68.dev0 || >=1.69.dev0", markers = "python_version < \"3.13\""}, {version = ">=1.69.0", markers = "python_version >= \"3.13\""}, ] +nest-asyncio = ">=1.6.0,<2.0.0" prometheus-client = ">=0.21.1,<0.22.0" -protobuf = ">=5.29.5,<6.0.0" +protobuf = ">=5.29.1,<6.0.0" pydantic = ">=2.6.3,<3.0.0" pydantic-settings = ">=2.7.1,<3.0.0" python-dateutil = ">=2.9.0.post0,<3.0.0" +pyyaml = ">=6.0.1,<7.0.0" tenacity = ">=8.4.1" urllib3 = ">=1.26.20" @@ -643,6 +645,18 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + [[package]] name = "prometheus-client" version = "0.21.1" @@ -768,23 +782,23 @@ files = [ [[package]] name = "protobuf" -version = "5.29.5" +version = "5.29.4" description = "" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079"}, - {file = "protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc"}, - {file = "protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671"}, - {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015"}, - {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61"}, - {file = "protobuf-5.29.5-cp38-cp38-win32.whl", hash = "sha256:ef91363ad4faba7b25d844ef1ada59ff1604184c0bcd8b39b8a6bef15e1af238"}, - {file = "protobuf-5.29.5-cp38-cp38-win_amd64.whl", hash = "sha256:7318608d56b6402d2ea7704ff1e1e4597bee46d760e7e4dd42a3d45e24b87f2e"}, - {file = "protobuf-5.29.5-cp39-cp39-win32.whl", hash = "sha256:6f642dc9a61782fa72b90878af134c5afe1917c89a568cd3476d758d3c3a0736"}, - {file = "protobuf-5.29.5-cp39-cp39-win_amd64.whl", hash = "sha256:470f3af547ef17847a28e1f47200a1cbf0ba3ff57b7de50d22776607cd2ea353"}, - {file = "protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5"}, - {file = "protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84"}, + {file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"}, + {file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"}, + {file = "protobuf-5.29.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:307ecba1d852ec237e9ba668e087326a67564ef83e45a0189a772ede9e854dd0"}, + {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:aec4962f9ea93c431d5714ed1be1c93f13e1a8618e70035ba2b0564d9e633f2e"}, + {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:d7d3f7d1d5a66ed4942d4fefb12ac4b14a29028b209d4bfb25c68ae172059922"}, + {file = "protobuf-5.29.4-cp38-cp38-win32.whl", hash = "sha256:1832f0515b62d12d8e6ffc078d7e9eb06969aa6dc13c13e1036e39d73bebc2de"}, + {file = "protobuf-5.29.4-cp38-cp38-win_amd64.whl", hash = "sha256:476cb7b14914c780605a8cf62e38c2a85f8caff2e28a6a0bad827ec7d6c85d68"}, + {file = "protobuf-5.29.4-cp39-cp39-win32.whl", hash = "sha256:fd32223020cb25a2cc100366f1dedc904e2d71d9322403224cdde5fdced0dabe"}, + {file = "protobuf-5.29.4-cp39-cp39-win_amd64.whl", hash = "sha256:678974e1e3a9b975b8bc2447fca458db5f93a2fb6b0c8db46b6675b5b5346812"}, + {file = "protobuf-5.29.4-py3-none-any.whl", hash = "sha256:3fde11b505e1597f71b875ef2fc52062b6a9740e5f7c8997ce878b6009145862"}, + {file = "protobuf-5.29.4.tar.gz", hash = "sha256:4f1dfcd7997b31ef8f53ec82781ff434a28bf71d9102ddde14d076adcfc78c99"}, ] [[package]] @@ -806,7 +820,7 @@ typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] +timezone = ["tzdata"] [[package]] name = "pydantic-core" @@ -1048,13 +1062,13 @@ files = [ ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] -core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] +core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" @@ -1133,7 +1147,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -1238,4 +1252,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "fe1fd90bab8fe4470ec4afbd5c1331962a821f6751a8f02bea567f9b9f44b815" +content-hash = "74c12e499aa797ca5c8559af579f1212b0e4e3a77f068f9385db39d70ba304e0" diff --git a/examples/python/streaming/async_stream.py b/examples/python/streaming/async_stream.py index 289b57c08..ab3d6ba02 100644 --- a/examples/python/streaming/async_stream.py +++ b/examples/python/streaming/async_stream.py @@ -1,19 +1,16 @@ import asyncio -from examples.streaming.worker import streaming_workflow +from examples.streaming.worker import stream_task +from hatchet_sdk.clients.listeners.run_event_listener import StepRunEventType async def main() -> None: - ref = await streaming_workflow.aio_run_no_wait() - await asyncio.sleep(1) + ref = await stream_task.aio_run_no_wait() - stream = ref.stream() - - async for chunk in stream: - print(chunk) + async for chunk in ref.stream(): + if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM: + print(chunk.payload, flush=True, end="") if __name__ == "__main__": - import asyncio - asyncio.run(main()) diff --git a/examples/python/streaming/sync_stream.py b/examples/python/streaming/sync_stream.py index 8566de7de..888777b8d 100644 --- a/examples/python/streaming/sync_stream.py +++ b/examples/python/streaming/sync_stream.py @@ -1,10 +1,10 @@ import time -from examples.streaming.worker import streaming_workflow +from examples.streaming.worker import stream_task def main() -> None: - ref = streaming_workflow.run_no_wait() + ref = stream_task.run_no_wait() time.sleep(1) stream = ref.stream() diff --git a/examples/python/streaming/test_streaming.py b/examples/python/streaming/test_streaming.py new file mode 100644 index 000000000..9b31d7aad --- /dev/null +++ b/examples/python/streaming/test_streaming.py @@ -0,0 +1,47 @@ +import asyncio +from datetime import datetime, timedelta, timezone +from subprocess import Popen +from typing import Any + +import pytest + +from examples.streaming.worker import chunks, stream_task +from hatchet_sdk import Hatchet +from hatchet_sdk.clients.listeners.run_event_listener import ( + StepRunEvent, + StepRunEventType, +) + + +@pytest.mark.parametrize( + "on_demand_worker", + [ + ( + ["poetry", "run", "python", "examples/streaming/worker.py", "--slots", "1"], + 8008, + ) + ], + indirect=True, +) +@pytest.mark.parametrize("execution_number", range(1)) +@pytest.mark.asyncio(loop_scope="session") +async def test_streaming_ordering_and_completeness( + execution_number: int, + hatchet: Hatchet, + on_demand_worker: Popen[Any], +) -> None: + ref = await stream_task.aio_run_no_wait() + + ix = 0 + anna_karenina = "" + + async for chunk in ref.stream(): + if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM: + assert chunks[ix] == chunk.payload + ix += 1 + anna_karenina += chunk.payload + + assert ix == len(chunks) + assert anna_karenina == "".join(chunks) + + await ref.aio_result() diff --git a/examples/python/streaming/worker.py b/examples/python/streaming/worker.py index 6389a24b1..8c2eaed90 100644 --- a/examples/python/streaming/worker.py +++ b/examples/python/streaming/worker.py @@ -1,23 +1,39 @@ import asyncio +from datetime import datetime, timedelta, timezone +from typing import Generator from hatchet_sdk import Context, EmptyModel, Hatchet -hatchet = Hatchet(debug=True) +hatchet = Hatchet(debug=False) # > Streaming -streaming_workflow = hatchet.workflow(name="StreamingWorkflow") +content = """ +Happy families are all alike; every unhappy family is unhappy in its own way. + +Everything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him. This position of affairs had now lasted three days, and not only the husband and wife themselves, but all the members of their family and household, were painfully conscious of it. Every person in the house felt that there was so sense in their living together, and that the stray people brought together by chance in any inn had more in common with one another than they, the members of the family and household of the Oblonskys. The wife did not leave her own room, the husband had not been at home for three days. The children ran wild all over the house; the English governess quarreled with the housekeeper, and wrote to a friend asking her to look out for a new situation for her; the man-cook had walked off the day before just at dinner time; the kitchen-maid, and the coachman had given warning. +""" -@streaming_workflow.task() -async def step1(input: EmptyModel, ctx: Context) -> None: - for i in range(10): - await asyncio.sleep(1) - ctx.put_stream(f"Processing {i}") +def create_chunks(content: str, n: int) -> Generator[str, None, None]: + for i in range(0, len(content), n): + yield content[i : i + n] + + +chunks = list(create_chunks(content, 10)) + + +@hatchet.task() +async def stream_task(input: EmptyModel, ctx: Context) -> None: + await asyncio.sleep(2) + + for chunk in chunks: + ctx.put_stream(chunk) + await asyncio.sleep(0.05) def main() -> None: - worker = hatchet.worker("test-worker", workflows=[streaming_workflow]) + worker = hatchet.worker("test-worker", workflows=[stream_task]) worker.start() diff --git a/examples/python/timeout/test_timeout.py b/examples/python/timeout/test_timeout.py index 1942716f3..1f46e52a1 100644 --- a/examples/python/timeout/test_timeout.py +++ b/examples/python/timeout/test_timeout.py @@ -7,7 +7,10 @@ from examples.timeout.worker import refresh_timeout_wf, timeout_wf async def test_execution_timeout() -> None: run = timeout_wf.run_no_wait() - with pytest.raises(Exception, match="(Task exceeded timeout|TIMED_OUT)"): + with pytest.raises( + Exception, + match="(Task exceeded timeout|TIMED_OUT|Workflow run .* failed with multiple errors)", + ): await run.aio_result() diff --git a/examples/python/waits/test_waits.py b/examples/python/waits/test_waits.py index f0830fb57..2b91c5fb4 100644 --- a/examples/python/waits/test_waits.py +++ b/examples/python/waits/test_waits.py @@ -1,5 +1,4 @@ import asyncio -import os import pytest @@ -7,10 +6,6 @@ from examples.waits.worker import task_condition_workflow from hatchet_sdk import Hatchet -@pytest.mark.skipif( - os.getenv("CI", "false").lower() == "true", - reason="Skipped in CI because of unreliability", -) @pytest.mark.asyncio(loop_scope="session") async def test_waits(hatchet: Hatchet) -> None: diff --git a/examples/typescript/simple/workflow-with-child.ts b/examples/typescript/simple/workflow-with-child.ts index f160a8055..2647b3ef8 100644 --- a/examples/typescript/simple/workflow-with-child.ts +++ b/examples/typescript/simple/workflow-with-child.ts @@ -48,7 +48,7 @@ export const child3 = child.task({ export const parent = hatchet.task({ name: 'parent', fn: async (input: ParentInput, ctx) => { - const c = await child.run({ + const c = await ctx.runChild(child, { Message: input.Message, }); diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit_rr/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit_rr/worker.ts index 6d3a39b53..522cac6d1 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit_rr/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/concurrency_limit_rr/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import time\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import (\n ConcurrencyExpression,\n ConcurrencyLimitStrategy,\n Context,\n Hatchet,\n)\n\nhatchet = Hatchet(debug=True)\n\n\n# > Concurrency Strategy With Key\nclass WorkflowInput(BaseModel):\n group: str\n\n\nconcurrency_limit_rr_workflow = hatchet.workflow(\n name="ConcurrencyDemoWorkflowRR",\n concurrency=ConcurrencyExpression(\n expression="input.group",\n max_runs=1,\n limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n ),\n input_validator=WorkflowInput,\n)\n\n\n@concurrency_limit_rr_workflow.task()\ndef step1(input: WorkflowInput, ctx: Context) -> None:\n print("starting step1")\n time.sleep(2)\n print("finished step1")\n pass\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "concurrency-demo-worker-rr",\n slots=10,\n workflows=[concurrency_limit_rr_workflow],\n )\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'import time\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import (\n ConcurrencyExpression,\n ConcurrencyLimitStrategy,\n Context,\n Hatchet,\n)\n\nhatchet = Hatchet(debug=True)\n\n\n# > Concurrency Strategy With Key\nclass WorkflowInput(BaseModel):\n group: str\n\n\nconcurrency_limit_rr_workflow = hatchet.workflow(\n name="ConcurrencyDemoWorkflowRR",\n concurrency=ConcurrencyExpression(\n expression="input.group",\n max_runs=1,\n limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n ),\n input_validator=WorkflowInput,\n)\n\n\n@concurrency_limit_rr_workflow.task()\ndef step1(input: WorkflowInput, ctx: Context) -> None:\n print("starting step1")\n time.sleep(2)\n print("finished step1")\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "concurrency-demo-worker-rr",\n slots=10,\n workflows=[concurrency_limit_rr_workflow],\n )\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/concurrency_limit_rr/worker.py', blocks: { concurrency_strategy_with_key: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/dedupe/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/dedupe/worker.ts index b72817d17..781698426 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/dedupe/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/dedupe/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\nfrom datetime import timedelta\nfrom typing import Any\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.admin import DedupeViolationErr\n\nhatchet = Hatchet(debug=True)\n\ndedupe_parent_wf = hatchet.workflow(name="DedupeParent")\ndedupe_child_wf = hatchet.workflow(name="DedupeChild")\n\n\n@dedupe_parent_wf.task(execution_timeout=timedelta(minutes=1))\nasync def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]:\n print("spawning child")\n\n results = []\n\n for i in range(2):\n try:\n results.append(\n (\n dedupe_child_wf.aio_run(\n options=TriggerWorkflowOptions(\n additional_metadata={"dedupe": "test"}, key=f"child{i}"\n ),\n )\n )\n )\n except DedupeViolationErr as e:\n print(f"dedupe violation {e}")\n continue\n\n result = await asyncio.gather(*results)\n print(f"results {result}")\n\n return {"results": result}\n\n\n@dedupe_child_wf.task()\nasync def process(input: EmptyModel, ctx: Context) -> dict[str, str]:\n await asyncio.sleep(3)\n\n print("child process")\n return {"status": "success"}\n\n\n@dedupe_child_wf.task()\nasync def process2(input: EmptyModel, ctx: Context) -> dict[str, str]:\n print("child process2")\n return {"status2": "success"}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "fanout-worker", slots=100, workflows=[dedupe_parent_wf, dedupe_child_wf]\n )\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'import asyncio\nfrom datetime import timedelta\nfrom typing import Any\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions\nfrom hatchet_sdk.exceptions import DedupeViolationError\n\nhatchet = Hatchet(debug=True)\n\ndedupe_parent_wf = hatchet.workflow(name="DedupeParent")\ndedupe_child_wf = hatchet.workflow(name="DedupeChild")\n\n\n@dedupe_parent_wf.task(execution_timeout=timedelta(minutes=1))\nasync def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]:\n print("spawning child")\n\n results = []\n\n for i in range(2):\n try:\n results.append(\n dedupe_child_wf.aio_run(\n options=TriggerWorkflowOptions(\n additional_metadata={"dedupe": "test"}, key=f"child{i}"\n ),\n )\n )\n except DedupeViolationError as e:\n print(f"dedupe violation {e}")\n continue\n\n result = await asyncio.gather(*results)\n print(f"results {result}")\n\n return {"results": result}\n\n\n@dedupe_child_wf.task()\nasync def process(input: EmptyModel, ctx: Context) -> dict[str, str]:\n await asyncio.sleep(3)\n\n print("child process")\n return {"status": "success"}\n\n\n@dedupe_child_wf.task()\nasync def process2(input: EmptyModel, ctx: Context) -> dict[str, str]:\n print("child process2")\n return {"status2": "success"}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "fanout-worker", slots=100, workflows=[dedupe_parent_wf, dedupe_child_wf]\n )\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/dedupe/worker.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/durable/test_durable.ts b/frontend/app/src/next/lib/docs/generated/snips/python/durable/test_durable.ts index 442876dd2..438d35717 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/durable/test_durable.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/durable/test_durable.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\nimport os\n\nimport pytest\n\nfrom examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.skipif(\n os.getenv("CI", "false").lower() == "true",\n reason="Skipped in CI because of unreliability",\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_durable(hatchet: Hatchet) -> None:\n ref = durable_workflow.run_no_wait()\n\n await asyncio.sleep(SLEEP_TIME + 10)\n\n hatchet.event.push(EVENT_KEY, {})\n\n result = await ref.aio_result()\n\n workers = await hatchet.workers.aio_list()\n\n assert workers.rows\n\n active_workers = [w for w in workers.rows if w.status == "ACTIVE"]\n\n assert len(active_workers) == 2\n assert any(w.name == "e2e-test-worker" for w in active_workers)\n assert any(w.name.endswith("e2e-test-worker_durable") for w in active_workers)\n assert result["durable_task"]["status"] == "success"\n', + 'import asyncio\n\nimport pytest\n\nfrom examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_durable(hatchet: Hatchet) -> None:\n ref = durable_workflow.run_no_wait()\n\n await asyncio.sleep(SLEEP_TIME + 10)\n\n hatchet.event.push(EVENT_KEY, {})\n\n result = await ref.aio_result()\n\n workers = await hatchet.workers.aio_list()\n\n assert workers.rows\n\n active_workers = [w for w in workers.rows if w.status == "ACTIVE"]\n\n assert len(active_workers) == 2\n assert any(\n w.name == hatchet.config.apply_namespace("e2e-test-worker")\n for w in active_workers\n )\n assert any(\n w.name == hatchet.config.apply_namespace("e2e-test-worker_durable")\n for w in active_workers\n )\n assert result["durable_task"]["status"] == "success"\n', source: 'out/python/durable/test_durable.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/events/filter.ts b/frontend/app/src/next/lib/docs/generated/snips/python/events/filter.ts index 45ac9f1c7..bf14d081a 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/events/filter.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/events/filter.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from examples.events.worker import EVENT_KEY, event_workflow\nfrom hatchet_sdk import Hatchet, PushEventOptions\n\nhatchet = Hatchet()\n\n# > Create a filter\nhatchet.filters.create(\n workflow_id=event_workflow.id,\n expression="input.should_skip == false",\n scope="foobarbaz",\n payload={\n "main_character": "Anna",\n "supporting_character": "Stiva",\n "location": "Moscow",\n },\n)\n\n# > Skip a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n "should_skip": True,\n },\n options=PushEventOptions(\n scope="foobarbaz",\n ),\n)\n\n# > Trigger a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n "should_skip": True,\n },\n options=PushEventOptions(\n scope="foobarbaz",\n ),\n)\n', + 'from examples.events.worker import EVENT_KEY, event_workflow\nfrom hatchet_sdk import Hatchet, PushEventOptions\n\nhatchet = Hatchet()\n\n# > Create a filter\nhatchet.filters.create(\n workflow_id=event_workflow.id,\n expression="input.should_skip == false",\n scope="foobarbaz",\n payload={\n "main_character": "Anna",\n "supporting_character": "Stiva",\n "location": "Moscow",\n },\n)\n\n# > Skip a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n "should_skip": True,\n },\n options=PushEventOptions(\n scope="foobarbaz",\n ),\n)\n\n# > Trigger a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n "should_skip": False,\n },\n options=PushEventOptions(\n scope="foobarbaz",\n ),\n)\n', source: 'out/python/events/filter.py', blocks: { create_a_filter: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/events/test_event.ts b/frontend/app/src/next/lib/docs/generated/snips/python/events/test_event.ts index 7c6f62a92..265f307bc 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/events/test_event.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/events/test_event.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\nimport json\nfrom contextlib import asynccontextmanager\nfrom datetime import datetime, timedelta, timezone\nfrom typing import AsyncGenerator, cast\nfrom uuid import uuid4\n\nimport pytest\nfrom pydantic import BaseModel\n\nfrom examples.events.worker import (\n EVENT_KEY,\n SECONDARY_KEY,\n WILDCARD_KEY,\n EventWorkflowInput,\n event_workflow,\n)\nfrom hatchet_sdk.clients.events import (\n BulkPushEventOptions,\n BulkPushEventWithMetadata,\n PushEventOptions,\n)\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\nfrom hatchet_sdk.clients.rest.models.v1_task_summary import V1TaskSummary\nfrom hatchet_sdk.contracts.events_pb2 import Event\nfrom hatchet_sdk.hatchet import Hatchet\n\n\nclass ProcessedEvent(BaseModel):\n id: str\n payload: dict[str, str | bool]\n meta: dict[str, str | bool | int]\n should_have_runs: bool\n test_run_id: str\n\n def __hash__(self) -> int:\n return hash(self.model_dump_json())\n\n\n@asynccontextmanager\nasync def event_filter(\n hatchet: Hatchet,\n test_run_id: str,\n expression: str | None = None,\n payload: dict[str, str] = {},\n) -> AsyncGenerator[None, None]:\n expression = (\n expression\n or f"input.should_skip == false && payload.test_run_id == \'{test_run_id}\'"\n )\n\n f = await hatchet.filters.aio_create(\n workflow_id=event_workflow.id,\n expression=expression,\n scope=test_run_id,\n payload={"test_run_id": test_run_id, **payload},\n )\n\n try:\n yield\n finally:\n await hatchet.filters.aio_delete(f.metadata.id)\n\n\nasync def fetch_runs_for_event(\n hatchet: Hatchet, event: Event\n) -> tuple[ProcessedEvent, list[V1TaskSummary]]:\n runs = await hatchet.runs.aio_list(triggering_event_external_id=event.eventId)\n\n meta = (\n cast(dict[str, str | int | bool], json.loads(event.additionalMetadata))\n if event.additionalMetadata\n else {}\n )\n payload = (\n cast(dict[str, str | bool], json.loads(event.payload)) if event.payload else {}\n )\n\n processed_event = ProcessedEvent(\n id=event.eventId,\n payload=payload,\n meta=meta,\n should_have_runs=meta.get("should_have_runs", False) is True,\n test_run_id=cast(str, meta["test_run_id"]),\n )\n\n if not all([r.output for r in runs.rows]):\n return (processed_event, [])\n\n return (\n processed_event,\n runs.rows or [],\n )\n\n\nasync def wait_for_result(\n hatchet: Hatchet, events: list[Event]\n) -> dict[ProcessedEvent, list[V1TaskSummary]]:\n await asyncio.sleep(3)\n\n since = datetime.now(tz=timezone.utc) - timedelta(minutes=2)\n\n persisted = (await hatchet.event.aio_list(limit=100, since=since)).rows or []\n\n assert {e.eventId for e in events}.issubset({e.metadata.id for e in persisted})\n\n iters = 0\n while True:\n print("Waiting for event runs to complete...")\n if iters > 15:\n print("Timed out waiting for event runs to complete.")\n return {\n ProcessedEvent(\n id=event.eventId,\n payload=json.loads(event.payload) if event.payload else {},\n meta=(\n json.loads(event.additionalMetadata)\n if event.additionalMetadata\n else {}\n ),\n should_have_runs=False,\n test_run_id=cast(\n str, json.loads(event.additionalMetadata).get("test_run_id", "")\n ),\n ): []\n for event in events\n }\n\n iters += 1\n\n event_runs = await asyncio.gather(\n *[fetch_runs_for_event(hatchet, event) for event in events]\n )\n\n all_empty = all(not event_run for _, event_run in event_runs)\n\n if all_empty:\n await asyncio.sleep(1)\n continue\n\n event_id_to_runs = {event_id: runs for (event_id, runs) in event_runs}\n\n any_queued_or_running = any(\n run.status in [V1TaskStatus.QUEUED, V1TaskStatus.RUNNING]\n for runs in event_id_to_runs.values()\n for run in runs\n )\n\n if any_queued_or_running:\n await asyncio.sleep(1)\n continue\n\n break\n\n return event_id_to_runs\n\n\nasync def wait_for_result_and_assert(hatchet: Hatchet, events: list[Event]) -> None:\n event_to_runs = await wait_for_result(hatchet, events)\n\n for event, runs in event_to_runs.items():\n await assert_event_runs_processed(event, runs)\n\n\nasync def assert_event_runs_processed(\n event: ProcessedEvent,\n runs: list[V1TaskSummary],\n) -> None:\n runs = [\n run\n for run in runs\n if (run.additional_metadata or {}).get("hatchet__event_id") == event.id\n ]\n\n if event.should_have_runs:\n assert len(runs) > 0\n\n for run in runs:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.output.get("test_run_id") == event.test_run_id\n else:\n assert len(runs) == 0\n\n\ndef bpi(\n index: int = 1,\n test_run_id: str = "",\n should_skip: bool = False,\n should_have_runs: bool = True,\n key: str = EVENT_KEY,\n payload: dict[str, str] = {},\n scope: str | None = None,\n) -> BulkPushEventWithMetadata:\n return BulkPushEventWithMetadata(\n key=key,\n payload={\n "should_skip": should_skip,\n **payload,\n },\n additional_metadata={\n "should_have_runs": should_have_runs,\n "test_run_id": test_run_id,\n "key": index,\n },\n scope=scope,\n )\n\n\ndef cp(should_skip: bool) -> dict[str, bool]:\n return EventWorkflowInput(should_skip=should_skip).model_dump()\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_push(hatchet: Hatchet) -> None:\n e = hatchet.event.push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_async_event_push(hatchet: Hatchet) -> None:\n e = await hatchet.event.aio_push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_async_event_bulk_push(hatchet: Hatchet) -> None:\n events = [\n BulkPushEventWithMetadata(\n key="event1",\n payload={"message": "This is event 1", "should_skip": False},\n additional_metadata={"source": "test", "user_id": "user123"},\n ),\n BulkPushEventWithMetadata(\n key="event2",\n payload={"message": "This is event 2", "should_skip": False},\n additional_metadata={"source": "test", "user_id": "user456"},\n ),\n BulkPushEventWithMetadata(\n key="event3",\n payload={"message": "This is event 3", "should_skip": False},\n additional_metadata={"source": "test", "user_id": "user789"},\n ),\n ]\n opts = BulkPushEventOptions(namespace="bulk-test")\n\n e = await hatchet.event.aio_bulk_push(events, opts)\n\n assert len(e) == 3\n\n # Sort both lists of events by their key to ensure comparison order\n sorted_events = sorted(events, key=lambda x: x.key)\n sorted_returned_events = sorted(e, key=lambda x: x.key)\n namespace = "bulk-test"\n\n # Check that the returned events match the original events\n for original_event, returned_event in zip(sorted_events, sorted_returned_events):\n assert returned_event.key == namespace + original_event.key\n\n\n@pytest.fixture(scope="function")\ndef test_run_id() -> str:\n return str(uuid4())\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_engine_behavior(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n events = [\n bpi(\n test_run_id=test_run_id,\n ),\n bpi(\n test_run_id=test_run_id,\n key="thisisafakeeventfoobarbaz",\n should_have_runs=False,\n ),\n ]\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\ndef gen_bulk_events(test_run_id: str) -> list[BulkPushEventWithMetadata]:\n return [\n ## No scope, so it shouldn\'t have any runs\n bpi(\n index=1,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n ),\n ## No scope, so it shouldn\'t have any runs\n bpi(\n index=2,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n ),\n ## Scope is set and `should_skip` is False, so it should have runs\n bpi(\n index=3,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=True,\n scope=test_run_id,\n ),\n ## Scope is set and `should_skip` is True, so it shouldn\'t have runs\n bpi(\n index=4,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn\'t have runs\n bpi(\n index=5,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n key="thisisafakeeventfoobarbaz",\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn\'t have runs\n bpi(\n index=6,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n scope=test_run_id,\n key="thisisafakeeventfoobarbaz",\n ),\n ]\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_skipping_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(hatchet, test_run_id):\n events = gen_bulk_events(test_run_id)\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\nasync def bulk_to_single(hatchet: Hatchet, event: BulkPushEventWithMetadata) -> Event:\n return await hatchet.event.aio_push(\n event_key=event.key,\n payload=event.payload,\n options=PushEventOptions(\n scope=event.scope,\n additional_metadata=event.additional_metadata,\n priority=event.priority,\n ),\n )\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_skipping_filtering_no_bulk(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(hatchet, test_run_id):\n raw_events = gen_bulk_events(test_run_id)\n events = await asyncio.gather(\n *[bulk_to_single(hatchet, event) for event in raw_events]\n )\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_payload_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n "input.should_skip == false && payload.foobar == \'baz\'",\n {"foobar": "qux"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={"message": "This is event 1", "should_skip": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": False,\n "test_run_id": test_run_id,\n "key": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_payload_filtering_with_payload_match(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n "input.should_skip == false && payload.foobar == \'baz\'",\n {"foobar": "baz"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={"message": "This is event 1", "should_skip": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": True,\n "test_run_id": test_run_id,\n "key": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_filtering_by_event_key(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n f"event_key == \'{SECONDARY_KEY}\'",\n ):\n event_1 = await hatchet.event.aio_push(\n event_key=SECONDARY_KEY,\n payload={\n "message": "Should run because filter matches",\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": True,\n "test_run_id": test_run_id,\n },\n ),\n )\n event_2 = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n "message": "Should skip because filter does not match",\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": False,\n "test_run_id": test_run_id,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event_1, event_2])\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_key_wildcards(hatchet: Hatchet, test_run_id: str) -> None:\n keys = [\n WILDCARD_KEY.replace("*", "1"),\n WILDCARD_KEY.replace("*", "2"),\n "foobar",\n EVENT_KEY,\n ]\n\n async with event_filter(\n hatchet,\n test_run_id,\n ):\n events = [\n await hatchet.event.aio_push(\n event_key=key,\n payload={\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": key != "foobar",\n "test_run_id": test_run_id,\n },\n ),\n )\n for key in keys\n ]\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_multiple_runs_for_multiple_scope_matches(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet, test_run_id, payload={"filter_id": "1"}, expression="1 == 1"\n ):\n async with event_filter(\n hatchet, test_run_id, payload={"filter_id": "2"}, expression="2 == 2"\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": True,\n "test_run_id": test_run_id,\n },\n ),\n )\n\n event_to_runs = await wait_for_result(hatchet, [event])\n\n assert len(event_to_runs.keys()) == 1\n\n runs = list(event_to_runs.values())[0]\n\n assert len(runs) == 2\n\n assert {r.output.get("filter_id") for r in runs} == {"1", "2"}\n', + 'import asyncio\nimport json\nfrom collections.abc import AsyncGenerator\nfrom contextlib import asynccontextmanager\nfrom datetime import datetime, timedelta, timezone\nfrom typing import cast\nfrom uuid import uuid4\n\nimport pytest\nfrom pydantic import BaseModel\n\nfrom examples.events.worker import (\n EVENT_KEY,\n SECONDARY_KEY,\n WILDCARD_KEY,\n EventWorkflowInput,\n event_workflow,\n)\nfrom hatchet_sdk.clients.events import (\n BulkPushEventOptions,\n BulkPushEventWithMetadata,\n PushEventOptions,\n)\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\nfrom hatchet_sdk.clients.rest.models.v1_task_summary import V1TaskSummary\nfrom hatchet_sdk.contracts.events_pb2 import Event\nfrom hatchet_sdk.hatchet import Hatchet\n\n\nclass ProcessedEvent(BaseModel):\n id: str\n payload: dict[str, str | bool]\n meta: dict[str, str | bool | int]\n should_have_runs: bool\n test_run_id: str\n\n def __hash__(self) -> int:\n return hash(self.model_dump_json())\n\n\n@asynccontextmanager\nasync def event_filter(\n hatchet: Hatchet,\n test_run_id: str,\n expression: str | None = None,\n payload: dict[str, str] = {},\n) -> AsyncGenerator[None, None]:\n expression = (\n expression\n or f"input.should_skip == false && payload.test_run_id == \'{test_run_id}\'"\n )\n\n f = await hatchet.filters.aio_create(\n workflow_id=event_workflow.id,\n expression=expression,\n scope=test_run_id,\n payload={"test_run_id": test_run_id, **payload},\n )\n\n try:\n yield\n finally:\n await hatchet.filters.aio_delete(f.metadata.id)\n\n\nasync def fetch_runs_for_event(\n hatchet: Hatchet, event: Event\n) -> tuple[ProcessedEvent, list[V1TaskSummary]]:\n runs = await hatchet.runs.aio_list(triggering_event_external_id=event.eventId)\n\n meta = (\n cast(dict[str, str | int | bool], json.loads(event.additionalMetadata))\n if event.additionalMetadata\n else {}\n )\n payload = (\n cast(dict[str, str | bool], json.loads(event.payload)) if event.payload else {}\n )\n\n processed_event = ProcessedEvent(\n id=event.eventId,\n payload=payload,\n meta=meta,\n should_have_runs=meta.get("should_have_runs", False) is True,\n test_run_id=cast(str, meta["test_run_id"]),\n )\n\n if not all([r.output for r in runs.rows]):\n return (processed_event, [])\n\n return (\n processed_event,\n runs.rows or [],\n )\n\n\nasync def wait_for_result(\n hatchet: Hatchet, events: list[Event]\n) -> dict[ProcessedEvent, list[V1TaskSummary]]:\n await asyncio.sleep(3)\n\n since = datetime.now(tz=timezone.utc) - timedelta(minutes=2)\n\n persisted = (await hatchet.event.aio_list(limit=100, since=since)).rows or []\n\n assert {e.eventId for e in events}.issubset({e.metadata.id for e in persisted})\n\n iters = 0\n while True:\n print("Waiting for event runs to complete...")\n if iters > 15:\n print("Timed out waiting for event runs to complete.")\n return {\n ProcessedEvent(\n id=event.eventId,\n payload=json.loads(event.payload) if event.payload else {},\n meta=(\n json.loads(event.additionalMetadata)\n if event.additionalMetadata\n else {}\n ),\n should_have_runs=False,\n test_run_id=cast(\n str, json.loads(event.additionalMetadata).get("test_run_id", "")\n ),\n ): []\n for event in events\n }\n\n iters += 1\n\n event_runs = await asyncio.gather(\n *[fetch_runs_for_event(hatchet, event) for event in events]\n )\n\n all_empty = all(not event_run for _, event_run in event_runs)\n\n if all_empty:\n await asyncio.sleep(1)\n continue\n\n event_id_to_runs = {event_id: runs for (event_id, runs) in event_runs}\n\n any_queued_or_running = any(\n run.status in [V1TaskStatus.QUEUED, V1TaskStatus.RUNNING]\n for runs in event_id_to_runs.values()\n for run in runs\n )\n\n if any_queued_or_running:\n await asyncio.sleep(1)\n continue\n\n break\n\n return event_id_to_runs\n\n\nasync def wait_for_result_and_assert(hatchet: Hatchet, events: list[Event]) -> None:\n event_to_runs = await wait_for_result(hatchet, events)\n\n for event, runs in event_to_runs.items():\n await assert_event_runs_processed(event, runs)\n\n\nasync def assert_event_runs_processed(\n event: ProcessedEvent,\n runs: list[V1TaskSummary],\n) -> None:\n runs = [\n run\n for run in runs\n if (run.additional_metadata or {}).get("hatchet__event_id") == event.id\n ]\n\n if event.should_have_runs:\n assert len(runs) > 0\n\n for run in runs:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.output.get("test_run_id") == event.test_run_id\n else:\n assert len(runs) == 0\n\n\ndef bpi(\n index: int = 1,\n test_run_id: str = "",\n should_skip: bool = False,\n should_have_runs: bool = True,\n key: str = EVENT_KEY,\n payload: dict[str, str] = {},\n scope: str | None = None,\n) -> BulkPushEventWithMetadata:\n return BulkPushEventWithMetadata(\n key=key,\n payload={\n "should_skip": should_skip,\n **payload,\n },\n additional_metadata={\n "should_have_runs": should_have_runs,\n "test_run_id": test_run_id,\n "key": index,\n },\n scope=scope,\n )\n\n\ndef cp(should_skip: bool) -> dict[str, bool]:\n return EventWorkflowInput(should_skip=should_skip).model_dump()\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_push(hatchet: Hatchet) -> None:\n e = hatchet.event.push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_async_event_push(hatchet: Hatchet) -> None:\n e = await hatchet.event.aio_push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_async_event_bulk_push(hatchet: Hatchet) -> None:\n events = [\n BulkPushEventWithMetadata(\n key="event1",\n payload={"message": "This is event 1", "should_skip": False},\n additional_metadata={"source": "test", "user_id": "user123"},\n ),\n BulkPushEventWithMetadata(\n key="event2",\n payload={"message": "This is event 2", "should_skip": False},\n additional_metadata={"source": "test", "user_id": "user456"},\n ),\n BulkPushEventWithMetadata(\n key="event3",\n payload={"message": "This is event 3", "should_skip": False},\n additional_metadata={"source": "test", "user_id": "user789"},\n ),\n ]\n opts = BulkPushEventOptions(namespace="bulk-test")\n\n e = await hatchet.event.aio_bulk_push(events, opts)\n\n assert len(e) == 3\n\n # Sort both lists of events by their key to ensure comparison order\n sorted_events = sorted(events, key=lambda x: x.key)\n sorted_returned_events = sorted(e, key=lambda x: x.key)\n namespace = "bulk-test"\n\n # Check that the returned events match the original events\n for original_event, returned_event in zip(\n sorted_events, sorted_returned_events, strict=False\n ):\n assert returned_event.key == namespace + original_event.key\n\n\n@pytest.fixture(scope="function")\ndef test_run_id() -> str:\n return str(uuid4())\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_engine_behavior(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n events = [\n bpi(\n test_run_id=test_run_id,\n ),\n bpi(\n test_run_id=test_run_id,\n key="thisisafakeeventfoobarbaz",\n should_have_runs=False,\n ),\n ]\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\ndef gen_bulk_events(test_run_id: str) -> list[BulkPushEventWithMetadata]:\n return [\n ## No scope, so it shouldn\'t have any runs\n bpi(\n index=1,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n ),\n ## No scope, so it shouldn\'t have any runs\n bpi(\n index=2,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n ),\n ## Scope is set and `should_skip` is False, so it should have runs\n bpi(\n index=3,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=True,\n scope=test_run_id,\n ),\n ## Scope is set and `should_skip` is True, so it shouldn\'t have runs\n bpi(\n index=4,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn\'t have runs\n bpi(\n index=5,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n key="thisisafakeeventfoobarbaz",\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn\'t have runs\n bpi(\n index=6,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n scope=test_run_id,\n key="thisisafakeeventfoobarbaz",\n ),\n ]\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_skipping_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(hatchet, test_run_id):\n events = gen_bulk_events(test_run_id)\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\nasync def bulk_to_single(hatchet: Hatchet, event: BulkPushEventWithMetadata) -> Event:\n return await hatchet.event.aio_push(\n event_key=event.key,\n payload=event.payload,\n options=PushEventOptions(\n scope=event.scope,\n additional_metadata=event.additional_metadata,\n priority=event.priority,\n ),\n )\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_skipping_filtering_no_bulk(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(hatchet, test_run_id):\n raw_events = gen_bulk_events(test_run_id)\n events = await asyncio.gather(\n *[bulk_to_single(hatchet, event) for event in raw_events]\n )\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_payload_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n "input.should_skip == false && payload.foobar == \'baz\'",\n {"foobar": "qux"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={"message": "This is event 1", "should_skip": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": False,\n "test_run_id": test_run_id,\n "key": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_event_payload_filtering_with_payload_match(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n "input.should_skip == false && payload.foobar == \'baz\'",\n {"foobar": "baz"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={"message": "This is event 1", "should_skip": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": True,\n "test_run_id": test_run_id,\n "key": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_filtering_by_event_key(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n f"event_key == \'{SECONDARY_KEY}\'",\n ):\n event_1 = await hatchet.event.aio_push(\n event_key=SECONDARY_KEY,\n payload={\n "message": "Should run because filter matches",\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": True,\n "test_run_id": test_run_id,\n },\n ),\n )\n event_2 = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n "message": "Should skip because filter does not match",\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": False,\n "test_run_id": test_run_id,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event_1, event_2])\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_key_wildcards(hatchet: Hatchet, test_run_id: str) -> None:\n keys = [\n WILDCARD_KEY.replace("*", "1"),\n WILDCARD_KEY.replace("*", "2"),\n "foobar",\n EVENT_KEY,\n ]\n\n async with event_filter(\n hatchet,\n test_run_id,\n ):\n events = [\n await hatchet.event.aio_push(\n event_key=key,\n payload={\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": key != "foobar",\n "test_run_id": test_run_id,\n },\n ),\n )\n for key in keys\n ]\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_multiple_runs_for_multiple_scope_matches(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet, test_run_id, payload={"filter_id": "1"}, expression="1 == 1"\n ):\n async with event_filter(\n hatchet, test_run_id, payload={"filter_id": "2"}, expression="2 == 2"\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n "should_skip": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n "should_have_runs": True,\n "test_run_id": test_run_id,\n },\n ),\n )\n\n event_to_runs = await wait_for_result(hatchet, [event])\n\n assert len(event_to_runs.keys()) == 1\n\n runs = list(event_to_runs.values())[0]\n\n assert len(runs) == 2\n\n assert {r.output.get("filter_id") for r in runs} == {"1", "2"}\n', source: 'out/python/events/test_event.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/events/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/events/worker.ts index 435faf3e1..c2c5c61ff 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/events/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/events/worker.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from pydantic import BaseModel\n\nfrom hatchet_sdk import Context, DefaultFilter, Hatchet\n\nhatchet = Hatchet()\n\n\n# > Event trigger\nEVENT_KEY = "user:create"\nSECONDARY_KEY = "foobarbaz"\nWILDCARD_KEY = "subscription:*"\n\n\nclass EventWorkflowInput(BaseModel):\n should_skip: bool\n\n\nevent_workflow = hatchet.workflow(\n name="EventWorkflow",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n)\n\n# > Event trigger with filter\nevent_workflow_with_filter = hatchet.workflow(\n name="EventWorkflow",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n default_filters=[\n DefaultFilter(\n expression="true",\n scope="example-scope",\n payload={\n "main_character": "Anna",\n "supporting_character": "Stiva",\n "location": "Moscow",\n },\n )\n ],\n)\n\n\n@event_workflow.task()\ndef task(input: EventWorkflowInput, ctx: Context) -> dict[str, str]:\n print("event received")\n\n return dict(ctx.filter_payload)\n\n\n# > Accessing the filter payload\n@event_workflow_with_filter.task()\ndef filtered_task(input: EventWorkflowInput, ctx: Context) -> None:\n print(ctx.filter_payload)\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(name="EventWorker", workflows=[event_workflow])\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'from pydantic import BaseModel\n\nfrom hatchet_sdk import Context, DefaultFilter, Hatchet\n\nhatchet = Hatchet()\n\n\n# > Event trigger\nEVENT_KEY = "user:create"\nSECONDARY_KEY = "foobarbaz"\nWILDCARD_KEY = "subscription:*"\n\n\nclass EventWorkflowInput(BaseModel):\n should_skip: bool\n\n\nevent_workflow = hatchet.workflow(\n name="EventWorkflow",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n)\n\n# > Event trigger with filter\nevent_workflow_with_filter = hatchet.workflow(\n name="EventWorkflow",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n default_filters=[\n DefaultFilter(\n expression="true",\n scope="example-scope",\n payload={\n "main_character": "Anna",\n "supporting_character": "Stiva",\n "location": "Moscow",\n },\n )\n ],\n)\n\n\n@event_workflow.task()\ndef task(input: EventWorkflowInput, ctx: Context) -> dict[str, str]:\n print("event received")\n\n return ctx.filter_payload\n\n\n# > Accessing the filter payload\n@event_workflow_with_filter.task()\ndef filtered_task(input: EventWorkflowInput, ctx: Context) -> None:\n print(ctx.filter_payload)\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(name="EventWorker", workflows=[event_workflow])\n\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/events/worker.py', blocks: { event_trigger: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/simple.ts b/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/simple.ts index b64970a1e..186bacf6f 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/simple.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/simple.ts @@ -3,12 +3,12 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - '# > Lifespan\n\nfrom typing import AsyncGenerator, cast\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\nclass Lifespan(BaseModel):\n foo: str\n pi: float\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n yield Lifespan(foo="bar", pi=3.14)\n\n\n@hatchet.task(name="LifespanWorkflow")\ndef lifespan_task(input: EmptyModel, ctx: Context) -> Lifespan:\n return cast(Lifespan, ctx.lifespan)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "test-worker", slots=1, workflows=[lifespan_task], lifespan=lifespan\n )\n worker.start()\n\n\n\nif __name__ == "__main__":\n main()\n', + '# > Lifespan\n\nfrom collections.abc import AsyncGenerator\nfrom typing import cast\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\nclass Lifespan(BaseModel):\n foo: str\n pi: float\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n yield Lifespan(foo="bar", pi=3.14)\n\n\n@hatchet.task(name="LifespanWorkflow")\ndef lifespan_task(input: EmptyModel, ctx: Context) -> Lifespan:\n return cast(Lifespan, ctx.lifespan)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n "test-worker", slots=1, workflows=[lifespan_task], lifespan=lifespan\n )\n worker.start()\n\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/lifespans/simple.py', blocks: { lifespan: { start: 2, - stop: 32, + stop: 33, }, }, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/worker.ts index 00ed3d1ce..7ffba68cc 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/lifespans/worker.ts @@ -3,16 +3,16 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from typing import AsyncGenerator, cast\nfrom uuid import UUID\n\nfrom psycopg_pool import ConnectionPool\nfrom pydantic import BaseModel, ConfigDict\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\n# > Use the lifespan in a task\nclass TaskOutput(BaseModel):\n num_rows: int\n external_ids: list[UUID]\n\n\nlifespan_workflow = hatchet.workflow(name="LifespanWorkflow")\n\n\n@lifespan_workflow.task()\ndef sync_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute("SELECT * FROM v1_lookup_table_olap LIMIT 5;")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print("executed sync task with lifespan", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n\n\n@lifespan_workflow.task()\nasync def async_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute("SELECT * FROM v1_lookup_table_olap LIMIT 5;")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print("executed async task with lifespan", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n# > Define a lifespan\nclass Lifespan(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n foo: str\n pool: ConnectionPool\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n print("Running lifespan!")\n with ConnectionPool("postgres://hatchet:hatchet@localhost:5431/hatchet") as pool:\n yield Lifespan(\n foo="bar",\n pool=pool,\n )\n\n print("Cleaning up lifespan!")\n\n\nworker = hatchet.worker(\n "test-worker", slots=1, workflows=[lifespan_workflow], lifespan=lifespan\n)\n\n\ndef main() -> None:\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', + 'from collections.abc import AsyncGenerator\nfrom typing import cast\nfrom uuid import UUID\n\nfrom psycopg_pool import ConnectionPool\nfrom pydantic import BaseModel, ConfigDict\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\n# > Use the lifespan in a task\nclass TaskOutput(BaseModel):\n num_rows: int\n external_ids: list[UUID]\n\n\nlifespan_workflow = hatchet.workflow(name="LifespanWorkflow")\n\n\n@lifespan_workflow.task()\ndef sync_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute("SELECT * FROM v1_lookup_table_olap LIMIT 5;")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print("executed sync task with lifespan", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n\n\n@lifespan_workflow.task()\nasync def async_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute("SELECT * FROM v1_lookup_table_olap LIMIT 5;")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print("executed async task with lifespan", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n# > Define a lifespan\nclass Lifespan(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n foo: str\n pool: ConnectionPool\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n print("Running lifespan!")\n with ConnectionPool("postgres://hatchet:hatchet@localhost:5431/hatchet") as pool:\n yield Lifespan(\n foo="bar",\n pool=pool,\n )\n\n print("Cleaning up lifespan!")\n\n\nworker = hatchet.worker(\n "test-worker", slots=1, workflows=[lifespan_workflow], lifespan=lifespan\n)\n\n\ndef main() -> None:\n worker.start()\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/lifespans/worker.py', blocks: { use_the_lifespan_in_a_task: { - start: 13, - stop: 39, + start: 14, + stop: 40, }, define_a_lifespan: { - start: 62, - stop: 82, + start: 63, + stop: 83, }, }, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/logger/workflow.ts b/frontend/app/src/next/lib/docs/generated/snips/python/logger/workflow.ts index 9606c00f9..48fe75854 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/logger/workflow.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/logger/workflow.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - '# > LoggingWorkflow\n\nimport logging\nimport time\n\nfrom examples.logger.client import hatchet\nfrom hatchet_sdk import Context, EmptyModel\n\nlogger = logging.getLogger(__name__)\n\nlogging_workflow = hatchet.workflow(\n name="LoggingWorkflow",\n)\n\n\n@logging_workflow.task()\ndef root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n logger.info("executed step1 - {}".format(i))\n logger.info({"step1": "step1"})\n\n time.sleep(0.1)\n\n return {"status": "success"}\n\n\n\n# > ContextLogger\n\n\n@logging_workflow.task()\ndef context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n ctx.log("executed step1 - {}".format(i))\n ctx.log({"step1": "step1"})\n\n time.sleep(0.1)\n\n return {"status": "success"}\n\n\n', + '# > LoggingWorkflow\n\nimport logging\nimport time\n\nfrom examples.logger.client import hatchet\nfrom hatchet_sdk import Context, EmptyModel\n\nlogger = logging.getLogger(__name__)\n\nlogging_workflow = hatchet.workflow(\n name="LoggingWorkflow",\n)\n\n\n@logging_workflow.task()\ndef root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n logger.info(f"executed step1 - {i}")\n logger.info({"step1": "step1"})\n\n time.sleep(0.1)\n\n return {"status": "success"}\n\n\n\n# > ContextLogger\n\n\n@logging_workflow.task()\ndef context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n ctx.log(f"executed step1 - {i}")\n ctx.log({"step1": "step1"})\n\n time.sleep(0.1)\n\n return {"status": "success"}\n\n\n', source: 'out/python/logger/workflow.py', blocks: { loggingworkflow: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/migration_guides/mergent.ts b/frontend/app/src/next/lib/docs/generated/snips/python/migration_guides/mergent.ts index 9b0480c75..5cb922f23 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/migration_guides/mergent.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/migration_guides/mergent.ts @@ -3,32 +3,32 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'from datetime import datetime, timedelta, timezone\nfrom typing import Any, Dict, List, Mapping\n\nimport requests\nfrom pydantic import BaseModel\nfrom requests import Response\n\nfrom hatchet_sdk.context.context import Context\n\nfrom .hatchet_client import hatchet\n\n\nasync def process_image(image_url: str, filters: List[str]) -> Dict[str, Any]:\n # Do some image processing\n return {"url": image_url, "size": 100, "format": "png"}\n\n\n# > Before (Mergent)\nasync def process_image_task(request: Any) -> Dict[str, Any]:\n image_url = request.json["image_url"]\n filters = request.json["filters"]\n try:\n result = await process_image(image_url, filters)\n return {"success": True, "processed_url": result["url"]}\n except Exception as e:\n print(f"Image processing failed: {e}")\n raise\n\n\n\n\n# > After (Hatchet)\nclass ImageProcessInput(BaseModel):\n image_url: str\n filters: List[str]\n\n\nclass ImageProcessOutput(BaseModel):\n processed_url: str\n metadata: Dict[str, Any]\n\n\n@hatchet.task(\n name="image-processor",\n retries=3,\n execution_timeout="10m",\n input_validator=ImageProcessInput,\n)\nasync def image_processor(input: ImageProcessInput, ctx: Context) -> ImageProcessOutput:\n # Do some image processing\n result = await process_image(input.image_url, input.filters)\n\n if not result["url"]:\n raise ValueError("Processing failed to generate URL")\n\n return ImageProcessOutput(\n processed_url=result["url"],\n metadata={\n "size": result["size"],\n "format": result["format"],\n "applied_filters": input.filters,\n },\n )\n\n\n\n\nasync def run() -> None:\n # > Running a task (Mergent)\n headers: Mapping[str, str] = {\n "Authorization": "Bearer ",\n "Content-Type": "application/json",\n }\n\n task_data = {\n "name": "4cf95241-fa19-47ef-8a67-71e483747649",\n "queue": "default",\n "request": {\n "url": "https://example.com",\n "headers": {\n "Authorization": "fake-secret-token",\n "Content-Type": "application/json",\n },\n "body": "Hello, world!",\n },\n }\n\n try:\n response: Response = requests.post(\n "https://api.mergent.co/v2/tasks",\n headers=headers,\n json=task_data,\n )\n print(response.json())\n except Exception as e:\n print(f"Error: {e}")\n\n # > Running a task (Hatchet)\n result = await image_processor.aio_run(\n ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"])\n )\n\n # you can await fully typed results\n print(result)\n\n\nasync def schedule() -> None:\n # > Scheduling tasks (Mergent)\n options = {\n # same options as before\n "json": {\n # same body as before\n "delay": "5m"\n }\n }\n\n print(options)\n\n # > Scheduling tasks (Hatchet)\n # Schedule the task to run at a specific time\n run_at = datetime.now(tz=timezone.utc) + timedelta(days=1)\n await image_processor.aio_schedule(\n run_at,\n ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"]),\n )\n\n # Schedule the task to run every hour\n await image_processor.aio_create_cron(\n "run-hourly",\n "0 * * * *",\n ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"]),\n )\n', + 'from collections.abc import Mapping\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Any\n\nimport requests\nfrom pydantic import BaseModel\nfrom requests import Response\n\nfrom hatchet_sdk.context.context import Context\n\nfrom .hatchet_client import hatchet\n\n\nasync def process_image(image_url: str, filters: list[str]) -> dict[str, Any]:\n # Do some image processing\n return {"url": image_url, "size": 100, "format": "png"}\n\n\n# > Before (Mergent)\nasync def process_image_task(request: Any) -> dict[str, Any]:\n image_url = request.json["image_url"]\n filters = request.json["filters"]\n try:\n result = await process_image(image_url, filters)\n return {"success": True, "processed_url": result["url"]}\n except Exception as e:\n print(f"Image processing failed: {e}")\n raise\n\n\n\n\n# > After (Hatchet)\nclass ImageProcessInput(BaseModel):\n image_url: str\n filters: list[str]\n\n\nclass ImageProcessOutput(BaseModel):\n processed_url: str\n metadata: dict[str, Any]\n\n\n@hatchet.task(\n name="image-processor",\n retries=3,\n execution_timeout="10m",\n input_validator=ImageProcessInput,\n)\nasync def image_processor(input: ImageProcessInput, ctx: Context) -> ImageProcessOutput:\n # Do some image processing\n result = await process_image(input.image_url, input.filters)\n\n if not result["url"]:\n raise ValueError("Processing failed to generate URL")\n\n return ImageProcessOutput(\n processed_url=result["url"],\n metadata={\n "size": result["size"],\n "format": result["format"],\n "applied_filters": input.filters,\n },\n )\n\n\n\n\nasync def run() -> None:\n # > Running a task (Mergent)\n headers: Mapping[str, str] = {\n "Authorization": "Bearer ",\n "Content-Type": "application/json",\n }\n\n task_data = {\n "name": "4cf95241-fa19-47ef-8a67-71e483747649",\n "queue": "default",\n "request": {\n "url": "https://example.com",\n "headers": {\n "Authorization": "fake-secret-token",\n "Content-Type": "application/json",\n },\n "body": "Hello, world!",\n },\n }\n\n try:\n response: Response = requests.post(\n "https://api.mergent.co/v2/tasks",\n headers=headers,\n json=task_data,\n )\n print(response.json())\n except Exception as e:\n print(f"Error: {e}")\n\n # > Running a task (Hatchet)\n result = await image_processor.aio_run(\n ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"])\n )\n\n # you can await fully typed results\n print(result)\n\n\nasync def schedule() -> None:\n # > Scheduling tasks (Mergent)\n options = {\n # same options as before\n "json": {\n # same body as before\n "delay": "5m"\n }\n }\n\n print(options)\n\n # > Scheduling tasks (Hatchet)\n # Schedule the task to run at a specific time\n run_at = datetime.now(tz=timezone.utc) + timedelta(days=1)\n await image_processor.aio_schedule(\n run_at,\n ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"]),\n )\n\n # Schedule the task to run every hour\n await image_processor.aio_create_cron(\n "run-hourly",\n "0 * * * *",\n ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"]),\n )\n', source: 'out/python/migration_guides/mergent.py', blocks: { before_mergent: { - start: 19, - stop: 29, + start: 20, + stop: 30, }, after_hatchet: { - start: 33, - stop: 65, + start: 34, + stop: 66, }, running_a_task_mergent: { - start: 70, - stop: 96, + start: 71, + stop: 97, }, running_a_task_hatchet: { - start: 99, - stop: 104, + start: 100, + stop: 105, }, scheduling_tasks_mergent: { - start: 109, - stop: 115, + start: 110, + stop: 116, }, scheduling_tasks_hatchet: { - start: 120, - stop: 132, + start: 121, + stop: 133, }, }, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/non_retryable/test_no_retry.ts b/frontend/app/src/next/lib/docs/generated/snips/python/non_retryable/test_no_retry.ts index 5c2fbaee8..d903b0047 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/non_retryable/test_no_retry.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/non_retryable/test_no_retry.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import pytest\n\nfrom examples.non_retryable.worker import (\n non_retryable_workflow,\n should_not_retry,\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n)\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType\nfrom hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails\n\n\ndef find_id(runs: V1WorkflowRunDetails, match: str) -> str:\n return next(t.metadata.id for t in runs.tasks if match in t.display_name)\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_no_retry(hatchet: Hatchet) -> None:\n ref = await non_retryable_workflow.aio_run_no_wait()\n\n with pytest.raises(Exception, match="retry"):\n await ref.aio_result()\n\n runs = await hatchet.runs.aio_get(ref.workflow_run_id)\n task_to_id = {\n task: find_id(runs, task.name)\n for task in [\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n should_not_retry,\n ]\n }\n\n retrying_events = [\n e for e in runs.task_events if e.event_type == V1TaskEventType.RETRYING\n ]\n\n """Only one task should be retried."""\n assert len(retrying_events) == 1\n\n """The task id of the retrying events should match the tasks that are retried"""\n assert {e.task_id for e in retrying_events} == {\n task_to_id[should_retry_wrong_exception_type],\n }\n\n """Three failed events should emit, one each for the two failing initial runs and one for the retry."""\n assert (\n len([e for e in runs.task_events if e.event_type == V1TaskEventType.FAILED])\n == 3\n )\n', + 'import asyncio\n\nimport pytest\n\nfrom examples.non_retryable.worker import (\n non_retryable_workflow,\n should_not_retry,\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n)\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType\nfrom hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails\nfrom hatchet_sdk.exceptions import FailedTaskRunExceptionGroup\n\n\ndef find_id(runs: V1WorkflowRunDetails, match: str) -> str:\n return next(t.metadata.id for t in runs.tasks if match in t.display_name)\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_no_retry(hatchet: Hatchet) -> None:\n ref = await non_retryable_workflow.aio_run_no_wait()\n\n with pytest.raises(FailedTaskRunExceptionGroup) as exc_info:\n await ref.aio_result()\n\n exception_group = exc_info.value\n\n assert len(exception_group.exceptions) == 2\n\n exc_text = [e.exc for e in exception_group.exceptions]\n\n non_retries = [\n e\n for e in exc_text\n if "This task should retry because it\'s not a NonRetryableException" in e\n ]\n\n other_errors = [e for e in exc_text if "This task should not retry" in e]\n\n assert len(non_retries) == 1\n assert len(other_errors) == 1\n\n await asyncio.sleep(3)\n\n runs = await hatchet.runs.aio_get(ref.workflow_run_id)\n task_to_id = {\n task: find_id(runs, task.name)\n for task in [\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n should_not_retry,\n ]\n }\n\n retrying_events = [\n e for e in runs.task_events if e.event_type == V1TaskEventType.RETRYING\n ]\n\n """Only one task should be retried."""\n assert len(retrying_events) == 1\n\n """The task id of the retrying events should match the tasks that are retried"""\n assert retrying_events[0].task_id == task_to_id[should_retry_wrong_exception_type]\n\n """Three failed events should emit, one each for the two failing initial runs and one for the retry."""\n assert (\n len([e for e in runs.task_events if e.event_type == V1TaskEventType.FAILED])\n == 3\n )\n', source: 'out/python/non_retryable/test_no_retry.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts b/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts index 4c1bd0f9f..d5c0900db 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import base64\nimport os\n\nfrom langfuse import Langfuse # type: ignore[import-untyped]\nfrom langfuse.openai import AsyncOpenAI # type: ignore[import-untyped]\n\n# > Configure Langfuse\nLANGFUSE_AUTH = base64.b64encode(\n f"{os.getenv(\'LANGFUSE_PUBLIC_KEY\')}:{os.getenv(\'LANGFUSE_SECRET_KEY\')}".encode()\n).decode()\n\nos.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = (\n os.getenv("LANGFUSE_HOST", "https://us.cloud.langfuse.com") + "/api/public/otel"\n)\nos.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"\n\n## Note: Langfuse sets the global tracer provider, so you don\'t need to worry about it\nlf = Langfuse(\n public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),\n secret_key=os.getenv("LANGFUSE_SECRET_KEY"),\n host=os.getenv("LANGFUSE_HOST", "https://app.langfuse.com"),\n)\n\n# > Create OpenAI client\nopenai = AsyncOpenAI(\n api_key=os.getenv("OPENAI_API_KEY"),\n)\n', + 'import base64\nimport os\n\nfrom langfuse import Langfuse # type: ignore\nfrom langfuse.openai import AsyncOpenAI # type: ignore\n\n# > Configure Langfuse\nLANGFUSE_AUTH = base64.b64encode(\n f"{os.getenv(\'LANGFUSE_PUBLIC_KEY\')}:{os.getenv(\'LANGFUSE_SECRET_KEY\')}".encode()\n).decode()\n\nos.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = (\n os.getenv("LANGFUSE_HOST", "https://us.cloud.langfuse.com") + "/api/public/otel"\n)\nos.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"\n\n## Note: Langfuse sets the global tracer provider, so you don\'t need to worry about it\nlf = Langfuse(\n public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),\n secret_key=os.getenv("LANGFUSE_SECRET_KEY"),\n host=os.getenv("LANGFUSE_HOST", "https://app.langfuse.com"),\n)\n\n# > Create OpenAI client\nopenai = AsyncOpenAI(\n api_key=os.getenv("OPENAI_API_KEY"),\n)\n', source: 'out/python/opentelemetry_instrumentation/langfuse/client.py', blocks: { configure_langfuse: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts b/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts index 4a19b2561..4d485006a 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\n\nfrom langfuse import get_client # type: ignore[import-untyped]\nfrom opentelemetry.trace import StatusCode\n\nfrom examples.opentelemetry_instrumentation.langfuse.worker import langfuse_task\n\n# > Trigger task\ntracer = get_client()\n\n\nasync def main() -> None:\n # Traces will send to Langfuse\n # Use `_otel_tracer` to access the OpenTelemetry tracer if you need\n # to e.g. log statuses or attributes manually.\n with tracer._otel_tracer.start_as_current_span(name="trigger") as span:\n result = await langfuse_task.aio_run()\n location = result.get("location")\n\n if not location:\n span.set_status(StatusCode.ERROR)\n return\n\n span.set_attribute("location", location)\n\n\n\nif __name__ == "__main__":\n asyncio.run(main())\n', + 'import asyncio\n\nfrom langfuse import get_client # type: ignore\nfrom opentelemetry.trace import StatusCode\n\nfrom examples.opentelemetry_instrumentation.langfuse.worker import langfuse_task\n\n# > Trigger task\ntracer = get_client()\n\n\nasync def main() -> None:\n # Traces will send to Langfuse\n # Use `_otel_tracer` to access the OpenTelemetry tracer if you need\n # to e.g. log statuses or attributes manually.\n with tracer._otel_tracer.start_as_current_span(name="trigger") as span:\n result = await langfuse_task.aio_run()\n location = result.get("location")\n\n if not location:\n span.set_status(StatusCode.ERROR)\n return\n\n span.set_attribute("location", location)\n\n\n\nif __name__ == "__main__":\n asyncio.run(main())\n', source: 'out/python/opentelemetry_instrumentation/langfuse/trigger.py', blocks: { trigger_task: { diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/priority/test_priority.ts b/frontend/app/src/next/lib/docs/generated/snips/python/priority/test_priority.ts index dd2577a1d..a1ac0988f 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/priority/test_priority.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/priority/test_priority.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom random import choice\nfrom subprocess import Popen\nfrom typing import Any, AsyncGenerator, Literal\nfrom uuid import uuid4\n\nimport pytest\nimport pytest_asyncio\nfrom pydantic import BaseModel\n\nfrom examples.priority.worker import DEFAULT_PRIORITY, SLEEP_TIME, priority_workflow\nfrom hatchet_sdk import Hatchet, ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\nPriority = Literal["low", "medium", "high", "default"]\n\n\nclass RunPriorityStartedAt(BaseModel):\n priority: Priority\n started_at: datetime\n finished_at: datetime\n\n\ndef priority_to_int(priority: Priority) -> int:\n match priority:\n case "high":\n return 3\n case "medium":\n return 2\n case "low":\n return 1\n case "default":\n return DEFAULT_PRIORITY\n case _:\n raise ValueError(f"Invalid priority: {priority}")\n\n\n@pytest_asyncio.fixture(loop_scope="session", scope="function")\nasync def dummy_runs() -> None:\n priority: Priority = "high"\n\n await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority)),\n additional_metadata={\n "priority": priority,\n "key": ix,\n "type": "dummy",\n },\n )\n )\n for ix in range(40)\n ]\n )\n\n await asyncio.sleep(3)\n\n return None\n\n\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/priority/worker.py", "--slots", "1"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_priority(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n choices: list[Priority] = ["low", "medium", "high", "default"]\n N = 30\n\n run_refs = await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n "priority": priority,\n "key": ix,\n "test_run_id": test_run_id,\n },\n )\n )\n for ix in range(N)\n ]\n )\n\n await asyncio.gather(*[r.aio_result() for r in run_refs])\n\n workflows = (\n await hatchet.workflows.aio_list(workflow_name=priority_workflow.name)\n ).rows\n\n assert workflows\n\n workflow = next((w for w in workflows if w.name == priority_workflow.name), None)\n\n assert workflow\n\n assert workflow.name == priority_workflow.name\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow.metadata.id],\n additional_metadata={\n "test_run_id": test_run_id,\n },\n limit=1_000,\n )\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get("priority") or "low",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(run_refs)\n assert len(runs_ids_started_ats) == N\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n """Run start times should be in order of priority"""\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n """Runs should proceed one at a time"""\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n """Runs should finish after starting (this is mostly a test for engine datetime handling bugs)"""\n assert curr.finished_at >= curr.started_at\n\n\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/priority/worker.py", "--slots", "1"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_priority_via_scheduling(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n sleep_time = 3\n n = 30\n choices: list[Priority] = ["low", "medium", "high", "default"]\n run_at = datetime.now(tz=timezone.utc) + timedelta(seconds=sleep_time)\n\n versions = await asyncio.gather(\n *[\n priority_workflow.aio_schedule(\n run_at=run_at,\n options=ScheduleTriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n "priority": priority,\n "key": ix,\n "test_run_id": test_run_id,\n },\n ),\n )\n for ix in range(n)\n ]\n )\n\n await asyncio.sleep(sleep_time * 2)\n\n workflow_id = versions[0].workflow_id\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError("Timed out waiting for runs to finish")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n "test_run_id": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError("One or more runs failed or were cancelled")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get("priority") or "low",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(versions)\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n """Run start times should be in order of priority"""\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n """Runs should proceed one at a time"""\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n """Runs should finish after starting (this is mostly a test for engine datetime handling bugs)"""\n assert curr.finished_at >= curr.started_at\n\n\n@pytest_asyncio.fixture(loop_scope="session", scope="function")\nasync def crons(\n hatchet: Hatchet, dummy_runs: None\n) -> AsyncGenerator[tuple[str, str, int], None]:\n test_run_id = str(uuid4())\n choices: list[Priority] = ["low", "medium", "high"]\n n = 30\n\n crons = await asyncio.gather(\n *[\n hatchet.cron.aio_create(\n workflow_name=priority_workflow.name,\n cron_name=f"{test_run_id}-cron-{i}",\n expression="* * * * *",\n input={},\n additional_metadata={\n "trigger": "cron",\n "test_run_id": test_run_id,\n "priority": (priority := choice(choices)),\n "key": str(i),\n },\n priority=(priority_to_int(priority)),\n )\n for i in range(n)\n ]\n )\n\n yield crons[0].workflow_id, test_run_id, n\n\n await asyncio.gather(*[hatchet.cron.aio_delete(cron.metadata.id) for cron in crons])\n\n\ndef time_until_next_minute() -> float:\n now = datetime.now(tz=timezone.utc)\n next_minute = (now + timedelta(minutes=1)).replace(second=0, microsecond=0)\n\n return (next_minute - now).total_seconds()\n\n\n@pytest.mark.skip(\n reason="Test is flaky because the first jobs that are picked up don\'t necessarily go in priority order"\n)\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/priority/worker.py", "--slots", "1"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_priority_via_cron(\n hatchet: Hatchet, crons: tuple[str, str, int], on_demand_worker: Popen[Any]\n) -> None:\n workflow_id, test_run_id, n = crons\n\n await asyncio.sleep(time_until_next_minute() + 10)\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError("Timed out waiting for runs to finish")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n "test_run_id": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError("One or more runs failed or were cancelled")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get("priority") or "low",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == n\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n """Run start times should be in order of priority"""\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n """Runs should proceed one at a time"""\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n """Runs should finish after starting (this is mostly a test for engine datetime handling bugs)"""\n assert curr.finished_at >= curr.started_at\n', + 'import asyncio\nfrom collections.abc import AsyncGenerator\nfrom datetime import datetime, timedelta, timezone\nfrom random import choice\nfrom subprocess import Popen\nfrom typing import Any, Literal\nfrom uuid import uuid4\n\nimport pytest\nimport pytest_asyncio\nfrom pydantic import BaseModel\n\nfrom examples.priority.worker import DEFAULT_PRIORITY, SLEEP_TIME, priority_workflow\nfrom hatchet_sdk import Hatchet, ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\nPriority = Literal["low", "medium", "high", "default"]\n\n\nclass RunPriorityStartedAt(BaseModel):\n priority: Priority\n started_at: datetime\n finished_at: datetime\n\n\ndef priority_to_int(priority: Priority) -> int:\n match priority:\n case "high":\n return 3\n case "medium":\n return 2\n case "low":\n return 1\n case "default":\n return DEFAULT_PRIORITY\n case _:\n raise ValueError(f"Invalid priority: {priority}")\n\n\n@pytest_asyncio.fixture(loop_scope="session", scope="function")\nasync def dummy_runs() -> None:\n priority: Priority = "high"\n\n await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority)),\n additional_metadata={\n "priority": priority,\n "key": ix,\n "type": "dummy",\n },\n )\n )\n for ix in range(40)\n ]\n )\n\n await asyncio.sleep(3)\n\n return\n\n\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/priority/worker.py", "--slots", "1"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_priority(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n choices: list[Priority] = ["low", "medium", "high", "default"]\n N = 30\n\n run_refs = await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n "priority": priority,\n "key": ix,\n "test_run_id": test_run_id,\n },\n )\n )\n for ix in range(N)\n ]\n )\n\n await asyncio.gather(*[r.aio_result() for r in run_refs])\n\n workflows = (\n await hatchet.workflows.aio_list(workflow_name=priority_workflow.name)\n ).rows\n\n assert workflows\n\n workflow = next((w for w in workflows if w.name == priority_workflow.name), None)\n\n assert workflow\n\n assert workflow.name == priority_workflow.name\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow.metadata.id],\n additional_metadata={\n "test_run_id": test_run_id,\n },\n limit=1_000,\n )\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get("priority") or "low",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(run_refs)\n assert len(runs_ids_started_ats) == N\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n """Run start times should be in order of priority"""\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n """Runs should proceed one at a time"""\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n """Runs should finish after starting (this is mostly a test for engine datetime handling bugs)"""\n assert curr.finished_at >= curr.started_at\n\n\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/priority/worker.py", "--slots", "1"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_priority_via_scheduling(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n sleep_time = 3\n n = 30\n choices: list[Priority] = ["low", "medium", "high", "default"]\n run_at = datetime.now(tz=timezone.utc) + timedelta(seconds=sleep_time)\n\n versions = await asyncio.gather(\n *[\n priority_workflow.aio_schedule(\n run_at=run_at,\n options=ScheduleTriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n "priority": priority,\n "key": ix,\n "test_run_id": test_run_id,\n },\n ),\n )\n for ix in range(n)\n ]\n )\n\n await asyncio.sleep(sleep_time * 2)\n\n workflow_id = versions[0].workflow_id\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError("Timed out waiting for runs to finish")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n "test_run_id": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError("One or more runs failed or were cancelled")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get("priority") or "low",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(versions)\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n """Run start times should be in order of priority"""\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n """Runs should proceed one at a time"""\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n """Runs should finish after starting (this is mostly a test for engine datetime handling bugs)"""\n assert curr.finished_at >= curr.started_at\n\n\n@pytest_asyncio.fixture(loop_scope="session", scope="function")\nasync def crons(\n hatchet: Hatchet, dummy_runs: None\n) -> AsyncGenerator[tuple[str, str, int], None]:\n test_run_id = str(uuid4())\n choices: list[Priority] = ["low", "medium", "high"]\n n = 30\n\n crons = await asyncio.gather(\n *[\n hatchet.cron.aio_create(\n workflow_name=priority_workflow.name,\n cron_name=f"{test_run_id}-cron-{i}",\n expression="* * * * *",\n input={},\n additional_metadata={\n "trigger": "cron",\n "test_run_id": test_run_id,\n "priority": (priority := choice(choices)),\n "key": str(i),\n },\n priority=(priority_to_int(priority)),\n )\n for i in range(n)\n ]\n )\n\n yield crons[0].workflow_id, test_run_id, n\n\n await asyncio.gather(*[hatchet.cron.aio_delete(cron.metadata.id) for cron in crons])\n\n\ndef time_until_next_minute() -> float:\n now = datetime.now(tz=timezone.utc)\n next_minute = (now + timedelta(minutes=1)).replace(second=0, microsecond=0)\n\n return (next_minute - now).total_seconds()\n\n\n@pytest.mark.skip(\n reason="Test is flaky because the first jobs that are picked up don\'t necessarily go in priority order"\n)\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/priority/worker.py", "--slots", "1"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_priority_via_cron(\n hatchet: Hatchet, crons: tuple[str, str, int], on_demand_worker: Popen[Any]\n) -> None:\n workflow_id, test_run_id, n = crons\n\n await asyncio.sleep(time_until_next_minute() + 10)\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError("Timed out waiting for runs to finish")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n "test_run_id": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError("One or more runs failed or were cancelled")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get("priority") or "low",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == n\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n """Run start times should be in order of priority"""\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n """Runs should proceed one at a time"""\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n """Runs should finish after starting (this is mostly a test for engine datetime handling bugs)"""\n assert curr.finished_at >= curr.started_at\n', source: 'out/python/priority/test_priority.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/async_stream.ts b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/async_stream.ts index 3ad78e486..5b65c775f 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/async_stream.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/async_stream.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\n\nfrom examples.streaming.worker import streaming_workflow\n\n\nasync def main() -> None:\n ref = await streaming_workflow.aio_run_no_wait()\n await asyncio.sleep(1)\n\n stream = ref.stream()\n\n async for chunk in stream:\n print(chunk)\n\n\nif __name__ == "__main__":\n import asyncio\n\n asyncio.run(main())\n', + 'import asyncio\n\nfrom examples.streaming.worker import stream_task\nfrom hatchet_sdk.clients.listeners.run_event_listener import StepRunEventType\n\n\nasync def main() -> None:\n ref = await stream_task.aio_run_no_wait()\n\n async for chunk in ref.stream():\n if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM:\n print(chunk.payload, flush=True, end="")\n\n\nif __name__ == "__main__":\n asyncio.run(main())\n', source: 'out/python/streaming/async_stream.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/index.ts b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/index.ts index 5899df729..9af2c25ff 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/index.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/index.ts @@ -1,7 +1,9 @@ import async_stream from './async_stream'; import sync_stream from './sync_stream'; +import test_streaming from './test_streaming'; import worker from './worker'; export { async_stream }; export { sync_stream }; +export { test_streaming }; export { worker }; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/sync_stream.ts b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/sync_stream.ts index babbaade7..243feeb14 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/sync_stream.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/sync_stream.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import time\n\nfrom examples.streaming.worker import streaming_workflow\n\n\ndef main() -> None:\n ref = streaming_workflow.run_no_wait()\n time.sleep(1)\n\n stream = ref.stream()\n\n for chunk in stream:\n print(chunk)\n\n\nif __name__ == "__main__":\n main()\n', + 'import time\n\nfrom examples.streaming.worker import stream_task\n\n\ndef main() -> None:\n ref = stream_task.run_no_wait()\n time.sleep(1)\n\n stream = ref.stream()\n\n for chunk in stream:\n print(chunk)\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/streaming/sync_stream.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/test_streaming.ts b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/test_streaming.ts new file mode 100644 index 000000000..64ffd592e --- /dev/null +++ b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/test_streaming.ts @@ -0,0 +1,12 @@ +import { Snippet } from '@/next/lib/docs/generated/snips/types'; + +const snippet: Snippet = { + language: 'python', + content: + 'import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom subprocess import Popen\nfrom typing import Any\n\nimport pytest\n\nfrom examples.streaming.worker import chunks, stream_task\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.clients.listeners.run_event_listener import (\n StepRunEvent,\n StepRunEventType,\n)\n\n\n@pytest.mark.parametrize(\n "on_demand_worker",\n [\n (\n ["poetry", "run", "python", "examples/streaming/worker.py", "--slots", "1"],\n 8008,\n )\n ],\n indirect=True,\n)\n@pytest.mark.parametrize("execution_number", range(1))\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_streaming_ordering_and_completeness(\n execution_number: int,\n hatchet: Hatchet,\n on_demand_worker: Popen[Any],\n) -> None:\n ref = await stream_task.aio_run_no_wait()\n\n ix = 0\n anna_karenina = ""\n\n async for chunk in ref.stream():\n if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM:\n assert chunks[ix] == chunk.payload\n ix += 1\n anna_karenina += chunk.payload\n\n assert ix == len(chunks)\n assert anna_karenina == "".join(chunks)\n\n await ref.aio_result()\n', + source: 'out/python/streaming/test_streaming.py', + blocks: {}, + highlights: {}, +}; + +export default snippet; diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/worker.ts b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/worker.ts index b1fdcd3d0..57e0888bb 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/streaming/worker.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/streaming/worker.ts @@ -3,12 +3,12 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n# > Streaming\n\nstreaming_workflow = hatchet.workflow(name="StreamingWorkflow")\n\n\n@streaming_workflow.task()\nasync def step1(input: EmptyModel, ctx: Context) -> None:\n for i in range(10):\n await asyncio.sleep(1)\n ctx.put_stream(f"Processing {i}")\n\n\ndef main() -> None:\n worker = hatchet.worker("test-worker", workflows=[streaming_workflow])\n worker.start()\n\n\n\nif __name__ == "__main__":\n main()\n', + 'import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Generator\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=False)\n\n# > Streaming\n\ncontent = """\nHappy families are all alike; every unhappy family is unhappy in its own way.\n\nEverything was in confusion in the Oblonskys\' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him. This position of affairs had now lasted three days, and not only the husband and wife themselves, but all the members of their family and household, were painfully conscious of it. Every person in the house felt that there was so sense in their living together, and that the stray people brought together by chance in any inn had more in common with one another than they, the members of the family and household of the Oblonskys. The wife did not leave her own room, the husband had not been at home for three days. The children ran wild all over the house; the English governess quarreled with the housekeeper, and wrote to a friend asking her to look out for a new situation for her; the man-cook had walked off the day before just at dinner time; the kitchen-maid, and the coachman had given warning.\n"""\n\n\ndef create_chunks(content: str, n: int) -> Generator[str, None, None]:\n for i in range(0, len(content), n):\n yield content[i : i + n]\n\n\nchunks = list(create_chunks(content, 10))\n\n\n@hatchet.task()\nasync def stream_task(input: EmptyModel, ctx: Context) -> None:\n await asyncio.sleep(2)\n\n for chunk in chunks:\n ctx.put_stream(chunk)\n await asyncio.sleep(0.05)\n\n\ndef main() -> None:\n worker = hatchet.worker("test-worker", workflows=[stream_task])\n worker.start()\n\n\n\nif __name__ == "__main__":\n main()\n', source: 'out/python/streaming/worker.py', blocks: { streaming: { - start: 8, - stop: 23, + start: 10, + stop: 39, }, }, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/timeout/test_timeout.ts b/frontend/app/src/next/lib/docs/generated/snips/python/timeout/test_timeout.ts index 3a1ea55c2..b2454801c 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/timeout/test_timeout.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/timeout/test_timeout.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import pytest\n\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_execution_timeout() -> None:\n run = timeout_wf.run_no_wait()\n\n with pytest.raises(Exception, match="(Task exceeded timeout|TIMED_OUT)"):\n await run.aio_result()\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_run_refresh_timeout() -> None:\n result = await refresh_timeout_wf.aio_run()\n\n assert result["refresh_task"]["status"] == "success"\n', + 'import pytest\n\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_execution_timeout() -> None:\n run = timeout_wf.run_no_wait()\n\n with pytest.raises(\n Exception,\n match="(Task exceeded timeout|TIMED_OUT|Workflow run .* failed with multiple errors)",\n ):\n await run.aio_result()\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_run_refresh_timeout() -> None:\n result = await refresh_timeout_wf.aio_run()\n\n assert result["refresh_task"]["status"] == "success"\n', source: 'out/python/timeout/test_timeout.py', blocks: {}, highlights: {}, diff --git a/frontend/app/src/next/lib/docs/generated/snips/python/waits/test_waits.ts b/frontend/app/src/next/lib/docs/generated/snips/python/waits/test_waits.ts index c499c811e..471d191f0 100644 --- a/frontend/app/src/next/lib/docs/generated/snips/python/waits/test_waits.ts +++ b/frontend/app/src/next/lib/docs/generated/snips/python/waits/test_waits.ts @@ -3,7 +3,7 @@ import { Snippet } from '@/next/lib/docs/generated/snips/types'; const snippet: Snippet = { language: 'python', content: - 'import asyncio\nimport os\n\nimport pytest\n\nfrom examples.waits.worker import task_condition_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.skipif(\n os.getenv("CI", "false").lower() == "true",\n reason="Skipped in CI because of unreliability",\n)\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_waits(hatchet: Hatchet) -> None:\n\n ref = task_condition_workflow.run_no_wait()\n\n await asyncio.sleep(15)\n\n hatchet.event.push("skip_on_event:skip", {})\n hatchet.event.push("wait_for_event:start", {})\n\n result = await ref.aio_result()\n\n assert result["skip_on_event"] == {"skipped": True}\n\n first_random_number = result["start"]["random_number"]\n wait_for_event_random_number = result["wait_for_event"]["random_number"]\n wait_for_sleep_random_number = result["wait_for_sleep"]["random_number"]\n\n left_branch = result["left_branch"]\n right_branch = result["right_branch"]\n\n assert left_branch.get("skipped") is True or right_branch.get("skipped") is True\n\n branch_random_number = left_branch.get("random_number") or right_branch.get(\n "random_number"\n )\n\n result_sum = result["sum"]["sum"]\n\n assert (\n result_sum\n == first_random_number\n + wait_for_event_random_number\n + wait_for_sleep_random_number\n + branch_random_number\n )\n', + 'import asyncio\n\nimport pytest\n\nfrom examples.waits.worker import task_condition_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.asyncio(loop_scope="session")\nasync def test_waits(hatchet: Hatchet) -> None:\n\n ref = task_condition_workflow.run_no_wait()\n\n await asyncio.sleep(15)\n\n hatchet.event.push("skip_on_event:skip", {})\n hatchet.event.push("wait_for_event:start", {})\n\n result = await ref.aio_result()\n\n assert result["skip_on_event"] == {"skipped": True}\n\n first_random_number = result["start"]["random_number"]\n wait_for_event_random_number = result["wait_for_event"]["random_number"]\n wait_for_sleep_random_number = result["wait_for_sleep"]["random_number"]\n\n left_branch = result["left_branch"]\n right_branch = result["right_branch"]\n\n assert left_branch.get("skipped") is True or right_branch.get("skipped") is True\n\n branch_random_number = left_branch.get("random_number") or right_branch.get(\n "random_number"\n )\n\n result_sum = result["sum"]["sum"]\n\n assert (\n result_sum\n == first_random_number\n + wait_for_event_random_number\n + wait_for_sleep_random_number\n + branch_random_number\n )\n', source: 'out/python/waits/test_waits.py', blocks: {}, highlights: {}, diff --git a/frontend/docs/lib/generated/snips/python/concurrency_limit_rr/worker.ts b/frontend/docs/lib/generated/snips/python/concurrency_limit_rr/worker.ts index c99db0b61..b0e58831b 100644 --- a/frontend/docs/lib/generated/snips/python/concurrency_limit_rr/worker.ts +++ b/frontend/docs/lib/generated/snips/python/concurrency_limit_rr/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import time\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import (\n ConcurrencyExpression,\n ConcurrencyLimitStrategy,\n Context,\n Hatchet,\n)\n\nhatchet = Hatchet(debug=True)\n\n\n# > Concurrency Strategy With Key\nclass WorkflowInput(BaseModel):\n group: str\n\n\nconcurrency_limit_rr_workflow = hatchet.workflow(\n name=\"ConcurrencyDemoWorkflowRR\",\n concurrency=ConcurrencyExpression(\n expression=\"input.group\",\n max_runs=1,\n limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n ),\n input_validator=WorkflowInput,\n)\n\n\n@concurrency_limit_rr_workflow.task()\ndef step1(input: WorkflowInput, ctx: Context) -> None:\n print(\"starting step1\")\n time.sleep(2)\n print(\"finished step1\")\n pass\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"concurrency-demo-worker-rr\",\n slots=10,\n workflows=[concurrency_limit_rr_workflow],\n )\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "import time\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import (\n ConcurrencyExpression,\n ConcurrencyLimitStrategy,\n Context,\n Hatchet,\n)\n\nhatchet = Hatchet(debug=True)\n\n\n# > Concurrency Strategy With Key\nclass WorkflowInput(BaseModel):\n group: str\n\n\nconcurrency_limit_rr_workflow = hatchet.workflow(\n name=\"ConcurrencyDemoWorkflowRR\",\n concurrency=ConcurrencyExpression(\n expression=\"input.group\",\n max_runs=1,\n limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,\n ),\n input_validator=WorkflowInput,\n)\n\n\n@concurrency_limit_rr_workflow.task()\ndef step1(input: WorkflowInput, ctx: Context) -> None:\n print(\"starting step1\")\n time.sleep(2)\n print(\"finished step1\")\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"concurrency-demo-worker-rr\",\n slots=10,\n workflows=[concurrency_limit_rr_workflow],\n )\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/concurrency_limit_rr/worker.py", "blocks": { "concurrency_strategy_with_key": { diff --git a/frontend/docs/lib/generated/snips/python/dedupe/worker.ts b/frontend/docs/lib/generated/snips/python/dedupe/worker.ts index 4b6967137..64bc67b1c 100644 --- a/frontend/docs/lib/generated/snips/python/dedupe/worker.ts +++ b/frontend/docs/lib/generated/snips/python/dedupe/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\nfrom datetime import timedelta\nfrom typing import Any\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.admin import DedupeViolationErr\n\nhatchet = Hatchet(debug=True)\n\ndedupe_parent_wf = hatchet.workflow(name=\"DedupeParent\")\ndedupe_child_wf = hatchet.workflow(name=\"DedupeChild\")\n\n\n@dedupe_parent_wf.task(execution_timeout=timedelta(minutes=1))\nasync def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]:\n print(\"spawning child\")\n\n results = []\n\n for i in range(2):\n try:\n results.append(\n (\n dedupe_child_wf.aio_run(\n options=TriggerWorkflowOptions(\n additional_metadata={\"dedupe\": \"test\"}, key=f\"child{i}\"\n ),\n )\n )\n )\n except DedupeViolationErr as e:\n print(f\"dedupe violation {e}\")\n continue\n\n result = await asyncio.gather(*results)\n print(f\"results {result}\")\n\n return {\"results\": result}\n\n\n@dedupe_child_wf.task()\nasync def process(input: EmptyModel, ctx: Context) -> dict[str, str]:\n await asyncio.sleep(3)\n\n print(\"child process\")\n return {\"status\": \"success\"}\n\n\n@dedupe_child_wf.task()\nasync def process2(input: EmptyModel, ctx: Context) -> dict[str, str]:\n print(\"child process2\")\n return {\"status2\": \"success\"}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"fanout-worker\", slots=100, workflows=[dedupe_parent_wf, dedupe_child_wf]\n )\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "import asyncio\nfrom datetime import timedelta\nfrom typing import Any\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions\nfrom hatchet_sdk.exceptions import DedupeViolationError\n\nhatchet = Hatchet(debug=True)\n\ndedupe_parent_wf = hatchet.workflow(name=\"DedupeParent\")\ndedupe_child_wf = hatchet.workflow(name=\"DedupeChild\")\n\n\n@dedupe_parent_wf.task(execution_timeout=timedelta(minutes=1))\nasync def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]:\n print(\"spawning child\")\n\n results = []\n\n for i in range(2):\n try:\n results.append(\n dedupe_child_wf.aio_run(\n options=TriggerWorkflowOptions(\n additional_metadata={\"dedupe\": \"test\"}, key=f\"child{i}\"\n ),\n )\n )\n except DedupeViolationError as e:\n print(f\"dedupe violation {e}\")\n continue\n\n result = await asyncio.gather(*results)\n print(f\"results {result}\")\n\n return {\"results\": result}\n\n\n@dedupe_child_wf.task()\nasync def process(input: EmptyModel, ctx: Context) -> dict[str, str]:\n await asyncio.sleep(3)\n\n print(\"child process\")\n return {\"status\": \"success\"}\n\n\n@dedupe_child_wf.task()\nasync def process2(input: EmptyModel, ctx: Context) -> dict[str, str]:\n print(\"child process2\")\n return {\"status2\": \"success\"}\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"fanout-worker\", slots=100, workflows=[dedupe_parent_wf, dedupe_child_wf]\n )\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/dedupe/worker.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/durable/test_durable.ts b/frontend/docs/lib/generated/snips/python/durable/test_durable.ts index 5dfc12448..4161a81c5 100644 --- a/frontend/docs/lib/generated/snips/python/durable/test_durable.ts +++ b/frontend/docs/lib/generated/snips/python/durable/test_durable.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\nimport os\n\nimport pytest\n\nfrom examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.skipif(\n os.getenv(\"CI\", \"false\").lower() == \"true\",\n reason=\"Skipped in CI because of unreliability\",\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_durable(hatchet: Hatchet) -> None:\n ref = durable_workflow.run_no_wait()\n\n await asyncio.sleep(SLEEP_TIME + 10)\n\n hatchet.event.push(EVENT_KEY, {})\n\n result = await ref.aio_result()\n\n workers = await hatchet.workers.aio_list()\n\n assert workers.rows\n\n active_workers = [w for w in workers.rows if w.status == \"ACTIVE\"]\n\n assert len(active_workers) == 2\n assert any(w.name == \"e2e-test-worker\" for w in active_workers)\n assert any(w.name.endswith(\"e2e-test-worker_durable\") for w in active_workers)\n assert result[\"durable_task\"][\"status\"] == \"success\"\n", + "content": "import asyncio\n\nimport pytest\n\nfrom examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_durable(hatchet: Hatchet) -> None:\n ref = durable_workflow.run_no_wait()\n\n await asyncio.sleep(SLEEP_TIME + 10)\n\n hatchet.event.push(EVENT_KEY, {})\n\n result = await ref.aio_result()\n\n workers = await hatchet.workers.aio_list()\n\n assert workers.rows\n\n active_workers = [w for w in workers.rows if w.status == \"ACTIVE\"]\n\n assert len(active_workers) == 2\n assert any(\n w.name == hatchet.config.apply_namespace(\"e2e-test-worker\")\n for w in active_workers\n )\n assert any(\n w.name == hatchet.config.apply_namespace(\"e2e-test-worker_durable\")\n for w in active_workers\n )\n assert result[\"durable_task\"][\"status\"] == \"success\"\n", "source": "out/python/durable/test_durable.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/events/filter.ts b/frontend/docs/lib/generated/snips/python/events/filter.ts index 548dbed02..f1e6d63b1 100644 --- a/frontend/docs/lib/generated/snips/python/events/filter.ts +++ b/frontend/docs/lib/generated/snips/python/events/filter.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from examples.events.worker import EVENT_KEY, event_workflow\nfrom hatchet_sdk import Hatchet, PushEventOptions\n\nhatchet = Hatchet()\n\n# > Create a filter\nhatchet.filters.create(\n workflow_id=event_workflow.id,\n expression=\"input.should_skip == false\",\n scope=\"foobarbaz\",\n payload={\n \"main_character\": \"Anna\",\n \"supporting_character\": \"Stiva\",\n \"location\": \"Moscow\",\n },\n)\n\n# > Skip a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n \"should_skip\": True,\n },\n options=PushEventOptions(\n scope=\"foobarbaz\",\n ),\n)\n\n# > Trigger a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n \"should_skip\": True,\n },\n options=PushEventOptions(\n scope=\"foobarbaz\",\n ),\n)\n", + "content": "from examples.events.worker import EVENT_KEY, event_workflow\nfrom hatchet_sdk import Hatchet, PushEventOptions\n\nhatchet = Hatchet()\n\n# > Create a filter\nhatchet.filters.create(\n workflow_id=event_workflow.id,\n expression=\"input.should_skip == false\",\n scope=\"foobarbaz\",\n payload={\n \"main_character\": \"Anna\",\n \"supporting_character\": \"Stiva\",\n \"location\": \"Moscow\",\n },\n)\n\n# > Skip a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n \"should_skip\": True,\n },\n options=PushEventOptions(\n scope=\"foobarbaz\",\n ),\n)\n\n# > Trigger a run\nhatchet.event.push(\n event_key=EVENT_KEY,\n payload={\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=\"foobarbaz\",\n ),\n)\n", "source": "out/python/events/filter.py", "blocks": { "create_a_filter": { diff --git a/frontend/docs/lib/generated/snips/python/events/test_event.ts b/frontend/docs/lib/generated/snips/python/events/test_event.ts index 27a7d78a3..c30528230 100644 --- a/frontend/docs/lib/generated/snips/python/events/test_event.ts +++ b/frontend/docs/lib/generated/snips/python/events/test_event.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\nimport json\nfrom contextlib import asynccontextmanager\nfrom datetime import datetime, timedelta, timezone\nfrom typing import AsyncGenerator, cast\nfrom uuid import uuid4\n\nimport pytest\nfrom pydantic import BaseModel\n\nfrom examples.events.worker import (\n EVENT_KEY,\n SECONDARY_KEY,\n WILDCARD_KEY,\n EventWorkflowInput,\n event_workflow,\n)\nfrom hatchet_sdk.clients.events import (\n BulkPushEventOptions,\n BulkPushEventWithMetadata,\n PushEventOptions,\n)\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\nfrom hatchet_sdk.clients.rest.models.v1_task_summary import V1TaskSummary\nfrom hatchet_sdk.contracts.events_pb2 import Event\nfrom hatchet_sdk.hatchet import Hatchet\n\n\nclass ProcessedEvent(BaseModel):\n id: str\n payload: dict[str, str | bool]\n meta: dict[str, str | bool | int]\n should_have_runs: bool\n test_run_id: str\n\n def __hash__(self) -> int:\n return hash(self.model_dump_json())\n\n\n@asynccontextmanager\nasync def event_filter(\n hatchet: Hatchet,\n test_run_id: str,\n expression: str | None = None,\n payload: dict[str, str] = {},\n) -> AsyncGenerator[None, None]:\n expression = (\n expression\n or f\"input.should_skip == false && payload.test_run_id == '{test_run_id}'\"\n )\n\n f = await hatchet.filters.aio_create(\n workflow_id=event_workflow.id,\n expression=expression,\n scope=test_run_id,\n payload={\"test_run_id\": test_run_id, **payload},\n )\n\n try:\n yield\n finally:\n await hatchet.filters.aio_delete(f.metadata.id)\n\n\nasync def fetch_runs_for_event(\n hatchet: Hatchet, event: Event\n) -> tuple[ProcessedEvent, list[V1TaskSummary]]:\n runs = await hatchet.runs.aio_list(triggering_event_external_id=event.eventId)\n\n meta = (\n cast(dict[str, str | int | bool], json.loads(event.additionalMetadata))\n if event.additionalMetadata\n else {}\n )\n payload = (\n cast(dict[str, str | bool], json.loads(event.payload)) if event.payload else {}\n )\n\n processed_event = ProcessedEvent(\n id=event.eventId,\n payload=payload,\n meta=meta,\n should_have_runs=meta.get(\"should_have_runs\", False) is True,\n test_run_id=cast(str, meta[\"test_run_id\"]),\n )\n\n if not all([r.output for r in runs.rows]):\n return (processed_event, [])\n\n return (\n processed_event,\n runs.rows or [],\n )\n\n\nasync def wait_for_result(\n hatchet: Hatchet, events: list[Event]\n) -> dict[ProcessedEvent, list[V1TaskSummary]]:\n await asyncio.sleep(3)\n\n since = datetime.now(tz=timezone.utc) - timedelta(minutes=2)\n\n persisted = (await hatchet.event.aio_list(limit=100, since=since)).rows or []\n\n assert {e.eventId for e in events}.issubset({e.metadata.id for e in persisted})\n\n iters = 0\n while True:\n print(\"Waiting for event runs to complete...\")\n if iters > 15:\n print(\"Timed out waiting for event runs to complete.\")\n return {\n ProcessedEvent(\n id=event.eventId,\n payload=json.loads(event.payload) if event.payload else {},\n meta=(\n json.loads(event.additionalMetadata)\n if event.additionalMetadata\n else {}\n ),\n should_have_runs=False,\n test_run_id=cast(\n str, json.loads(event.additionalMetadata).get(\"test_run_id\", \"\")\n ),\n ): []\n for event in events\n }\n\n iters += 1\n\n event_runs = await asyncio.gather(\n *[fetch_runs_for_event(hatchet, event) for event in events]\n )\n\n all_empty = all(not event_run for _, event_run in event_runs)\n\n if all_empty:\n await asyncio.sleep(1)\n continue\n\n event_id_to_runs = {event_id: runs for (event_id, runs) in event_runs}\n\n any_queued_or_running = any(\n run.status in [V1TaskStatus.QUEUED, V1TaskStatus.RUNNING]\n for runs in event_id_to_runs.values()\n for run in runs\n )\n\n if any_queued_or_running:\n await asyncio.sleep(1)\n continue\n\n break\n\n return event_id_to_runs\n\n\nasync def wait_for_result_and_assert(hatchet: Hatchet, events: list[Event]) -> None:\n event_to_runs = await wait_for_result(hatchet, events)\n\n for event, runs in event_to_runs.items():\n await assert_event_runs_processed(event, runs)\n\n\nasync def assert_event_runs_processed(\n event: ProcessedEvent,\n runs: list[V1TaskSummary],\n) -> None:\n runs = [\n run\n for run in runs\n if (run.additional_metadata or {}).get(\"hatchet__event_id\") == event.id\n ]\n\n if event.should_have_runs:\n assert len(runs) > 0\n\n for run in runs:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.output.get(\"test_run_id\") == event.test_run_id\n else:\n assert len(runs) == 0\n\n\ndef bpi(\n index: int = 1,\n test_run_id: str = \"\",\n should_skip: bool = False,\n should_have_runs: bool = True,\n key: str = EVENT_KEY,\n payload: dict[str, str] = {},\n scope: str | None = None,\n) -> BulkPushEventWithMetadata:\n return BulkPushEventWithMetadata(\n key=key,\n payload={\n \"should_skip\": should_skip,\n **payload,\n },\n additional_metadata={\n \"should_have_runs\": should_have_runs,\n \"test_run_id\": test_run_id,\n \"key\": index,\n },\n scope=scope,\n )\n\n\ndef cp(should_skip: bool) -> dict[str, bool]:\n return EventWorkflowInput(should_skip=should_skip).model_dump()\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_push(hatchet: Hatchet) -> None:\n e = hatchet.event.push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_async_event_push(hatchet: Hatchet) -> None:\n e = await hatchet.event.aio_push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_async_event_bulk_push(hatchet: Hatchet) -> None:\n events = [\n BulkPushEventWithMetadata(\n key=\"event1\",\n payload={\"message\": \"This is event 1\", \"should_skip\": False},\n additional_metadata={\"source\": \"test\", \"user_id\": \"user123\"},\n ),\n BulkPushEventWithMetadata(\n key=\"event2\",\n payload={\"message\": \"This is event 2\", \"should_skip\": False},\n additional_metadata={\"source\": \"test\", \"user_id\": \"user456\"},\n ),\n BulkPushEventWithMetadata(\n key=\"event3\",\n payload={\"message\": \"This is event 3\", \"should_skip\": False},\n additional_metadata={\"source\": \"test\", \"user_id\": \"user789\"},\n ),\n ]\n opts = BulkPushEventOptions(namespace=\"bulk-test\")\n\n e = await hatchet.event.aio_bulk_push(events, opts)\n\n assert len(e) == 3\n\n # Sort both lists of events by their key to ensure comparison order\n sorted_events = sorted(events, key=lambda x: x.key)\n sorted_returned_events = sorted(e, key=lambda x: x.key)\n namespace = \"bulk-test\"\n\n # Check that the returned events match the original events\n for original_event, returned_event in zip(sorted_events, sorted_returned_events):\n assert returned_event.key == namespace + original_event.key\n\n\n@pytest.fixture(scope=\"function\")\ndef test_run_id() -> str:\n return str(uuid4())\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_engine_behavior(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n events = [\n bpi(\n test_run_id=test_run_id,\n ),\n bpi(\n test_run_id=test_run_id,\n key=\"thisisafakeeventfoobarbaz\",\n should_have_runs=False,\n ),\n ]\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\ndef gen_bulk_events(test_run_id: str) -> list[BulkPushEventWithMetadata]:\n return [\n ## No scope, so it shouldn't have any runs\n bpi(\n index=1,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n ),\n ## No scope, so it shouldn't have any runs\n bpi(\n index=2,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n ),\n ## Scope is set and `should_skip` is False, so it should have runs\n bpi(\n index=3,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=True,\n scope=test_run_id,\n ),\n ## Scope is set and `should_skip` is True, so it shouldn't have runs\n bpi(\n index=4,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn't have runs\n bpi(\n index=5,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n key=\"thisisafakeeventfoobarbaz\",\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn't have runs\n bpi(\n index=6,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n scope=test_run_id,\n key=\"thisisafakeeventfoobarbaz\",\n ),\n ]\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_skipping_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(hatchet, test_run_id):\n events = gen_bulk_events(test_run_id)\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\nasync def bulk_to_single(hatchet: Hatchet, event: BulkPushEventWithMetadata) -> Event:\n return await hatchet.event.aio_push(\n event_key=event.key,\n payload=event.payload,\n options=PushEventOptions(\n scope=event.scope,\n additional_metadata=event.additional_metadata,\n priority=event.priority,\n ),\n )\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_skipping_filtering_no_bulk(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(hatchet, test_run_id):\n raw_events = gen_bulk_events(test_run_id)\n events = await asyncio.gather(\n *[bulk_to_single(hatchet, event) for event in raw_events]\n )\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_payload_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n \"input.should_skip == false && payload.foobar == 'baz'\",\n {\"foobar\": \"qux\"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\"message\": \"This is event 1\", \"should_skip\": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": False,\n \"test_run_id\": test_run_id,\n \"key\": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_payload_filtering_with_payload_match(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n \"input.should_skip == false && payload.foobar == 'baz'\",\n {\"foobar\": \"baz\"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\"message\": \"This is event 1\", \"should_skip\": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": True,\n \"test_run_id\": test_run_id,\n \"key\": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_filtering_by_event_key(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n f\"event_key == '{SECONDARY_KEY}'\",\n ):\n event_1 = await hatchet.event.aio_push(\n event_key=SECONDARY_KEY,\n payload={\n \"message\": \"Should run because filter matches\",\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": True,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n event_2 = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n \"message\": \"Should skip because filter does not match\",\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": False,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event_1, event_2])\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_key_wildcards(hatchet: Hatchet, test_run_id: str) -> None:\n keys = [\n WILDCARD_KEY.replace(\"*\", \"1\"),\n WILDCARD_KEY.replace(\"*\", \"2\"),\n \"foobar\",\n EVENT_KEY,\n ]\n\n async with event_filter(\n hatchet,\n test_run_id,\n ):\n events = [\n await hatchet.event.aio_push(\n event_key=key,\n payload={\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": key != \"foobar\",\n \"test_run_id\": test_run_id,\n },\n ),\n )\n for key in keys\n ]\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_multiple_runs_for_multiple_scope_matches(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet, test_run_id, payload={\"filter_id\": \"1\"}, expression=\"1 == 1\"\n ):\n async with event_filter(\n hatchet, test_run_id, payload={\"filter_id\": \"2\"}, expression=\"2 == 2\"\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": True,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n\n event_to_runs = await wait_for_result(hatchet, [event])\n\n assert len(event_to_runs.keys()) == 1\n\n runs = list(event_to_runs.values())[0]\n\n assert len(runs) == 2\n\n assert {r.output.get(\"filter_id\") for r in runs} == {\"1\", \"2\"}\n", + "content": "import asyncio\nimport json\nfrom collections.abc import AsyncGenerator\nfrom contextlib import asynccontextmanager\nfrom datetime import datetime, timedelta, timezone\nfrom typing import cast\nfrom uuid import uuid4\n\nimport pytest\nfrom pydantic import BaseModel\n\nfrom examples.events.worker import (\n EVENT_KEY,\n SECONDARY_KEY,\n WILDCARD_KEY,\n EventWorkflowInput,\n event_workflow,\n)\nfrom hatchet_sdk.clients.events import (\n BulkPushEventOptions,\n BulkPushEventWithMetadata,\n PushEventOptions,\n)\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\nfrom hatchet_sdk.clients.rest.models.v1_task_summary import V1TaskSummary\nfrom hatchet_sdk.contracts.events_pb2 import Event\nfrom hatchet_sdk.hatchet import Hatchet\n\n\nclass ProcessedEvent(BaseModel):\n id: str\n payload: dict[str, str | bool]\n meta: dict[str, str | bool | int]\n should_have_runs: bool\n test_run_id: str\n\n def __hash__(self) -> int:\n return hash(self.model_dump_json())\n\n\n@asynccontextmanager\nasync def event_filter(\n hatchet: Hatchet,\n test_run_id: str,\n expression: str | None = None,\n payload: dict[str, str] = {},\n) -> AsyncGenerator[None, None]:\n expression = (\n expression\n or f\"input.should_skip == false && payload.test_run_id == '{test_run_id}'\"\n )\n\n f = await hatchet.filters.aio_create(\n workflow_id=event_workflow.id,\n expression=expression,\n scope=test_run_id,\n payload={\"test_run_id\": test_run_id, **payload},\n )\n\n try:\n yield\n finally:\n await hatchet.filters.aio_delete(f.metadata.id)\n\n\nasync def fetch_runs_for_event(\n hatchet: Hatchet, event: Event\n) -> tuple[ProcessedEvent, list[V1TaskSummary]]:\n runs = await hatchet.runs.aio_list(triggering_event_external_id=event.eventId)\n\n meta = (\n cast(dict[str, str | int | bool], json.loads(event.additionalMetadata))\n if event.additionalMetadata\n else {}\n )\n payload = (\n cast(dict[str, str | bool], json.loads(event.payload)) if event.payload else {}\n )\n\n processed_event = ProcessedEvent(\n id=event.eventId,\n payload=payload,\n meta=meta,\n should_have_runs=meta.get(\"should_have_runs\", False) is True,\n test_run_id=cast(str, meta[\"test_run_id\"]),\n )\n\n if not all([r.output for r in runs.rows]):\n return (processed_event, [])\n\n return (\n processed_event,\n runs.rows or [],\n )\n\n\nasync def wait_for_result(\n hatchet: Hatchet, events: list[Event]\n) -> dict[ProcessedEvent, list[V1TaskSummary]]:\n await asyncio.sleep(3)\n\n since = datetime.now(tz=timezone.utc) - timedelta(minutes=2)\n\n persisted = (await hatchet.event.aio_list(limit=100, since=since)).rows or []\n\n assert {e.eventId for e in events}.issubset({e.metadata.id for e in persisted})\n\n iters = 0\n while True:\n print(\"Waiting for event runs to complete...\")\n if iters > 15:\n print(\"Timed out waiting for event runs to complete.\")\n return {\n ProcessedEvent(\n id=event.eventId,\n payload=json.loads(event.payload) if event.payload else {},\n meta=(\n json.loads(event.additionalMetadata)\n if event.additionalMetadata\n else {}\n ),\n should_have_runs=False,\n test_run_id=cast(\n str, json.loads(event.additionalMetadata).get(\"test_run_id\", \"\")\n ),\n ): []\n for event in events\n }\n\n iters += 1\n\n event_runs = await asyncio.gather(\n *[fetch_runs_for_event(hatchet, event) for event in events]\n )\n\n all_empty = all(not event_run for _, event_run in event_runs)\n\n if all_empty:\n await asyncio.sleep(1)\n continue\n\n event_id_to_runs = {event_id: runs for (event_id, runs) in event_runs}\n\n any_queued_or_running = any(\n run.status in [V1TaskStatus.QUEUED, V1TaskStatus.RUNNING]\n for runs in event_id_to_runs.values()\n for run in runs\n )\n\n if any_queued_or_running:\n await asyncio.sleep(1)\n continue\n\n break\n\n return event_id_to_runs\n\n\nasync def wait_for_result_and_assert(hatchet: Hatchet, events: list[Event]) -> None:\n event_to_runs = await wait_for_result(hatchet, events)\n\n for event, runs in event_to_runs.items():\n await assert_event_runs_processed(event, runs)\n\n\nasync def assert_event_runs_processed(\n event: ProcessedEvent,\n runs: list[V1TaskSummary],\n) -> None:\n runs = [\n run\n for run in runs\n if (run.additional_metadata or {}).get(\"hatchet__event_id\") == event.id\n ]\n\n if event.should_have_runs:\n assert len(runs) > 0\n\n for run in runs:\n assert run.status == V1TaskStatus.COMPLETED\n assert run.output.get(\"test_run_id\") == event.test_run_id\n else:\n assert len(runs) == 0\n\n\ndef bpi(\n index: int = 1,\n test_run_id: str = \"\",\n should_skip: bool = False,\n should_have_runs: bool = True,\n key: str = EVENT_KEY,\n payload: dict[str, str] = {},\n scope: str | None = None,\n) -> BulkPushEventWithMetadata:\n return BulkPushEventWithMetadata(\n key=key,\n payload={\n \"should_skip\": should_skip,\n **payload,\n },\n additional_metadata={\n \"should_have_runs\": should_have_runs,\n \"test_run_id\": test_run_id,\n \"key\": index,\n },\n scope=scope,\n )\n\n\ndef cp(should_skip: bool) -> dict[str, bool]:\n return EventWorkflowInput(should_skip=should_skip).model_dump()\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_push(hatchet: Hatchet) -> None:\n e = hatchet.event.push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_async_event_push(hatchet: Hatchet) -> None:\n e = await hatchet.event.aio_push(EVENT_KEY, cp(False))\n\n assert e.eventId is not None\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_async_event_bulk_push(hatchet: Hatchet) -> None:\n events = [\n BulkPushEventWithMetadata(\n key=\"event1\",\n payload={\"message\": \"This is event 1\", \"should_skip\": False},\n additional_metadata={\"source\": \"test\", \"user_id\": \"user123\"},\n ),\n BulkPushEventWithMetadata(\n key=\"event2\",\n payload={\"message\": \"This is event 2\", \"should_skip\": False},\n additional_metadata={\"source\": \"test\", \"user_id\": \"user456\"},\n ),\n BulkPushEventWithMetadata(\n key=\"event3\",\n payload={\"message\": \"This is event 3\", \"should_skip\": False},\n additional_metadata={\"source\": \"test\", \"user_id\": \"user789\"},\n ),\n ]\n opts = BulkPushEventOptions(namespace=\"bulk-test\")\n\n e = await hatchet.event.aio_bulk_push(events, opts)\n\n assert len(e) == 3\n\n # Sort both lists of events by their key to ensure comparison order\n sorted_events = sorted(events, key=lambda x: x.key)\n sorted_returned_events = sorted(e, key=lambda x: x.key)\n namespace = \"bulk-test\"\n\n # Check that the returned events match the original events\n for original_event, returned_event in zip(\n sorted_events, sorted_returned_events, strict=False\n ):\n assert returned_event.key == namespace + original_event.key\n\n\n@pytest.fixture(scope=\"function\")\ndef test_run_id() -> str:\n return str(uuid4())\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_engine_behavior(hatchet: Hatchet) -> None:\n test_run_id = str(uuid4())\n events = [\n bpi(\n test_run_id=test_run_id,\n ),\n bpi(\n test_run_id=test_run_id,\n key=\"thisisafakeeventfoobarbaz\",\n should_have_runs=False,\n ),\n ]\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\ndef gen_bulk_events(test_run_id: str) -> list[BulkPushEventWithMetadata]:\n return [\n ## No scope, so it shouldn't have any runs\n bpi(\n index=1,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n ),\n ## No scope, so it shouldn't have any runs\n bpi(\n index=2,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n ),\n ## Scope is set and `should_skip` is False, so it should have runs\n bpi(\n index=3,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=True,\n scope=test_run_id,\n ),\n ## Scope is set and `should_skip` is True, so it shouldn't have runs\n bpi(\n index=4,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn't have runs\n bpi(\n index=5,\n test_run_id=test_run_id,\n should_skip=True,\n should_have_runs=False,\n scope=test_run_id,\n key=\"thisisafakeeventfoobarbaz\",\n ),\n ## Scope is set, `should_skip` is False, but key is different, so it shouldn't have runs\n bpi(\n index=6,\n test_run_id=test_run_id,\n should_skip=False,\n should_have_runs=False,\n scope=test_run_id,\n key=\"thisisafakeeventfoobarbaz\",\n ),\n ]\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_skipping_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(hatchet, test_run_id):\n events = gen_bulk_events(test_run_id)\n\n result = await hatchet.event.aio_bulk_push(events)\n\n await wait_for_result_and_assert(hatchet, result)\n\n\nasync def bulk_to_single(hatchet: Hatchet, event: BulkPushEventWithMetadata) -> Event:\n return await hatchet.event.aio_push(\n event_key=event.key,\n payload=event.payload,\n options=PushEventOptions(\n scope=event.scope,\n additional_metadata=event.additional_metadata,\n priority=event.priority,\n ),\n )\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_skipping_filtering_no_bulk(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(hatchet, test_run_id):\n raw_events = gen_bulk_events(test_run_id)\n events = await asyncio.gather(\n *[bulk_to_single(hatchet, event) for event in raw_events]\n )\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_payload_filtering(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n \"input.should_skip == false && payload.foobar == 'baz'\",\n {\"foobar\": \"qux\"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\"message\": \"This is event 1\", \"should_skip\": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": False,\n \"test_run_id\": test_run_id,\n \"key\": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_event_payload_filtering_with_payload_match(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n \"input.should_skip == false && payload.foobar == 'baz'\",\n {\"foobar\": \"baz\"},\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\"message\": \"This is event 1\", \"should_skip\": False},\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": True,\n \"test_run_id\": test_run_id,\n \"key\": 1,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event])\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_filtering_by_event_key(hatchet: Hatchet, test_run_id: str) -> None:\n async with event_filter(\n hatchet,\n test_run_id,\n f\"event_key == '{SECONDARY_KEY}'\",\n ):\n event_1 = await hatchet.event.aio_push(\n event_key=SECONDARY_KEY,\n payload={\n \"message\": \"Should run because filter matches\",\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": True,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n event_2 = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n \"message\": \"Should skip because filter does not match\",\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": False,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n\n await wait_for_result_and_assert(hatchet, [event_1, event_2])\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_key_wildcards(hatchet: Hatchet, test_run_id: str) -> None:\n keys = [\n WILDCARD_KEY.replace(\"*\", \"1\"),\n WILDCARD_KEY.replace(\"*\", \"2\"),\n \"foobar\",\n EVENT_KEY,\n ]\n\n async with event_filter(\n hatchet,\n test_run_id,\n ):\n events = [\n await hatchet.event.aio_push(\n event_key=key,\n payload={\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": key != \"foobar\",\n \"test_run_id\": test_run_id,\n },\n ),\n )\n for key in keys\n ]\n\n await wait_for_result_and_assert(hatchet, events)\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_multiple_runs_for_multiple_scope_matches(\n hatchet: Hatchet, test_run_id: str\n) -> None:\n async with event_filter(\n hatchet, test_run_id, payload={\"filter_id\": \"1\"}, expression=\"1 == 1\"\n ):\n async with event_filter(\n hatchet, test_run_id, payload={\"filter_id\": \"2\"}, expression=\"2 == 2\"\n ):\n event = await hatchet.event.aio_push(\n event_key=EVENT_KEY,\n payload={\n \"should_skip\": False,\n },\n options=PushEventOptions(\n scope=test_run_id,\n additional_metadata={\n \"should_have_runs\": True,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n\n event_to_runs = await wait_for_result(hatchet, [event])\n\n assert len(event_to_runs.keys()) == 1\n\n runs = list(event_to_runs.values())[0]\n\n assert len(runs) == 2\n\n assert {r.output.get(\"filter_id\") for r in runs} == {\"1\", \"2\"}\n", "source": "out/python/events/test_event.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/events/worker.ts b/frontend/docs/lib/generated/snips/python/events/worker.ts index a0f312ee8..be9caaccd 100644 --- a/frontend/docs/lib/generated/snips/python/events/worker.ts +++ b/frontend/docs/lib/generated/snips/python/events/worker.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from pydantic import BaseModel\n\nfrom hatchet_sdk import Context, DefaultFilter, Hatchet\n\nhatchet = Hatchet()\n\n\n# > Event trigger\nEVENT_KEY = \"user:create\"\nSECONDARY_KEY = \"foobarbaz\"\nWILDCARD_KEY = \"subscription:*\"\n\n\nclass EventWorkflowInput(BaseModel):\n should_skip: bool\n\n\nevent_workflow = hatchet.workflow(\n name=\"EventWorkflow\",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n)\n\n# > Event trigger with filter\nevent_workflow_with_filter = hatchet.workflow(\n name=\"EventWorkflow\",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n default_filters=[\n DefaultFilter(\n expression=\"true\",\n scope=\"example-scope\",\n payload={\n \"main_character\": \"Anna\",\n \"supporting_character\": \"Stiva\",\n \"location\": \"Moscow\",\n },\n )\n ],\n)\n\n\n@event_workflow.task()\ndef task(input: EventWorkflowInput, ctx: Context) -> dict[str, str]:\n print(\"event received\")\n\n return dict(ctx.filter_payload)\n\n\n# > Accessing the filter payload\n@event_workflow_with_filter.task()\ndef filtered_task(input: EventWorkflowInput, ctx: Context) -> None:\n print(ctx.filter_payload)\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(name=\"EventWorker\", workflows=[event_workflow])\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "from pydantic import BaseModel\n\nfrom hatchet_sdk import Context, DefaultFilter, Hatchet\n\nhatchet = Hatchet()\n\n\n# > Event trigger\nEVENT_KEY = \"user:create\"\nSECONDARY_KEY = \"foobarbaz\"\nWILDCARD_KEY = \"subscription:*\"\n\n\nclass EventWorkflowInput(BaseModel):\n should_skip: bool\n\n\nevent_workflow = hatchet.workflow(\n name=\"EventWorkflow\",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n)\n\n# > Event trigger with filter\nevent_workflow_with_filter = hatchet.workflow(\n name=\"EventWorkflow\",\n on_events=[EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],\n input_validator=EventWorkflowInput,\n default_filters=[\n DefaultFilter(\n expression=\"true\",\n scope=\"example-scope\",\n payload={\n \"main_character\": \"Anna\",\n \"supporting_character\": \"Stiva\",\n \"location\": \"Moscow\",\n },\n )\n ],\n)\n\n\n@event_workflow.task()\ndef task(input: EventWorkflowInput, ctx: Context) -> dict[str, str]:\n print(\"event received\")\n\n return ctx.filter_payload\n\n\n# > Accessing the filter payload\n@event_workflow_with_filter.task()\ndef filtered_task(input: EventWorkflowInput, ctx: Context) -> None:\n print(ctx.filter_payload)\n\n\n\n\ndef main() -> None:\n worker = hatchet.worker(name=\"EventWorker\", workflows=[event_workflow])\n\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/events/worker.py", "blocks": { "event_trigger": { diff --git a/frontend/docs/lib/generated/snips/python/lifespans/simple.ts b/frontend/docs/lib/generated/snips/python/lifespans/simple.ts index 0ab5460f3..9ebf4170a 100644 --- a/frontend/docs/lib/generated/snips/python/lifespans/simple.ts +++ b/frontend/docs/lib/generated/snips/python/lifespans/simple.ts @@ -2,12 +2,12 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "# > Lifespan\n\nfrom typing import AsyncGenerator, cast\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\nclass Lifespan(BaseModel):\n foo: str\n pi: float\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n yield Lifespan(foo=\"bar\", pi=3.14)\n\n\n@hatchet.task(name=\"LifespanWorkflow\")\ndef lifespan_task(input: EmptyModel, ctx: Context) -> Lifespan:\n return cast(Lifespan, ctx.lifespan)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"test-worker\", slots=1, workflows=[lifespan_task], lifespan=lifespan\n )\n worker.start()\n\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "# > Lifespan\n\nfrom collections.abc import AsyncGenerator\nfrom typing import cast\n\nfrom pydantic import BaseModel\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\nclass Lifespan(BaseModel):\n foo: str\n pi: float\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n yield Lifespan(foo=\"bar\", pi=3.14)\n\n\n@hatchet.task(name=\"LifespanWorkflow\")\ndef lifespan_task(input: EmptyModel, ctx: Context) -> Lifespan:\n return cast(Lifespan, ctx.lifespan)\n\n\ndef main() -> None:\n worker = hatchet.worker(\n \"test-worker\", slots=1, workflows=[lifespan_task], lifespan=lifespan\n )\n worker.start()\n\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/lifespans/simple.py", "blocks": { "lifespan": { "start": 2, - "stop": 32 + "stop": 33 } }, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/lifespans/worker.ts b/frontend/docs/lib/generated/snips/python/lifespans/worker.ts index 377e79cdb..215574691 100644 --- a/frontend/docs/lib/generated/snips/python/lifespans/worker.ts +++ b/frontend/docs/lib/generated/snips/python/lifespans/worker.ts @@ -2,16 +2,16 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from typing import AsyncGenerator, cast\nfrom uuid import UUID\n\nfrom psycopg_pool import ConnectionPool\nfrom pydantic import BaseModel, ConfigDict\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\n# > Use the lifespan in a task\nclass TaskOutput(BaseModel):\n num_rows: int\n external_ids: list[UUID]\n\n\nlifespan_workflow = hatchet.workflow(name=\"LifespanWorkflow\")\n\n\n@lifespan_workflow.task()\ndef sync_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute(\"SELECT * FROM v1_lookup_table_olap LIMIT 5;\")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print(\"executed sync task with lifespan\", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n\n\n@lifespan_workflow.task()\nasync def async_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute(\"SELECT * FROM v1_lookup_table_olap LIMIT 5;\")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print(\"executed async task with lifespan\", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n# > Define a lifespan\nclass Lifespan(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n foo: str\n pool: ConnectionPool\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n print(\"Running lifespan!\")\n with ConnectionPool(\"postgres://hatchet:hatchet@localhost:5431/hatchet\") as pool:\n yield Lifespan(\n foo=\"bar\",\n pool=pool,\n )\n\n print(\"Cleaning up lifespan!\")\n\n\nworker = hatchet.worker(\n \"test-worker\", slots=1, workflows=[lifespan_workflow], lifespan=lifespan\n)\n\n\ndef main() -> None:\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "from collections.abc import AsyncGenerator\nfrom typing import cast\nfrom uuid import UUID\n\nfrom psycopg_pool import ConnectionPool\nfrom pydantic import BaseModel, ConfigDict\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n\n# > Use the lifespan in a task\nclass TaskOutput(BaseModel):\n num_rows: int\n external_ids: list[UUID]\n\n\nlifespan_workflow = hatchet.workflow(name=\"LifespanWorkflow\")\n\n\n@lifespan_workflow.task()\ndef sync_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute(\"SELECT * FROM v1_lookup_table_olap LIMIT 5;\")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print(\"executed sync task with lifespan\", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n\n\n@lifespan_workflow.task()\nasync def async_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:\n pool = cast(Lifespan, ctx.lifespan).pool\n\n with pool.connection() as conn:\n query = conn.execute(\"SELECT * FROM v1_lookup_table_olap LIMIT 5;\")\n rows = query.fetchall()\n\n for row in rows:\n print(row)\n\n print(\"executed async task with lifespan\", ctx.lifespan)\n\n return TaskOutput(\n num_rows=len(rows),\n external_ids=[cast(UUID, row[0]) for row in rows],\n )\n\n\n# > Define a lifespan\nclass Lifespan(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n foo: str\n pool: ConnectionPool\n\n\nasync def lifespan() -> AsyncGenerator[Lifespan, None]:\n print(\"Running lifespan!\")\n with ConnectionPool(\"postgres://hatchet:hatchet@localhost:5431/hatchet\") as pool:\n yield Lifespan(\n foo=\"bar\",\n pool=pool,\n )\n\n print(\"Cleaning up lifespan!\")\n\n\nworker = hatchet.worker(\n \"test-worker\", slots=1, workflows=[lifespan_workflow], lifespan=lifespan\n)\n\n\ndef main() -> None:\n worker.start()\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/lifespans/worker.py", "blocks": { "use_the_lifespan_in_a_task": { - "start": 13, - "stop": 39 + "start": 14, + "stop": 40 }, "define_a_lifespan": { - "start": 62, - "stop": 82 + "start": 63, + "stop": 83 } }, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/logger/workflow.ts b/frontend/docs/lib/generated/snips/python/logger/workflow.ts index 1dbf42ab0..d164d665a 100644 --- a/frontend/docs/lib/generated/snips/python/logger/workflow.ts +++ b/frontend/docs/lib/generated/snips/python/logger/workflow.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "# > LoggingWorkflow\n\nimport logging\nimport time\n\nfrom examples.logger.client import hatchet\nfrom hatchet_sdk import Context, EmptyModel\n\nlogger = logging.getLogger(__name__)\n\nlogging_workflow = hatchet.workflow(\n name=\"LoggingWorkflow\",\n)\n\n\n@logging_workflow.task()\ndef root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n logger.info(\"executed step1 - {}\".format(i))\n logger.info({\"step1\": \"step1\"})\n\n time.sleep(0.1)\n\n return {\"status\": \"success\"}\n\n\n\n# > ContextLogger\n\n\n@logging_workflow.task()\ndef context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n ctx.log(\"executed step1 - {}\".format(i))\n ctx.log({\"step1\": \"step1\"})\n\n time.sleep(0.1)\n\n return {\"status\": \"success\"}\n\n\n", + "content": "# > LoggingWorkflow\n\nimport logging\nimport time\n\nfrom examples.logger.client import hatchet\nfrom hatchet_sdk import Context, EmptyModel\n\nlogger = logging.getLogger(__name__)\n\nlogging_workflow = hatchet.workflow(\n name=\"LoggingWorkflow\",\n)\n\n\n@logging_workflow.task()\ndef root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n logger.info(f\"executed step1 - {i}\")\n logger.info({\"step1\": \"step1\"})\n\n time.sleep(0.1)\n\n return {\"status\": \"success\"}\n\n\n\n# > ContextLogger\n\n\n@logging_workflow.task()\ndef context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:\n for i in range(12):\n ctx.log(f\"executed step1 - {i}\")\n ctx.log({\"step1\": \"step1\"})\n\n time.sleep(0.1)\n\n return {\"status\": \"success\"}\n\n\n", "source": "out/python/logger/workflow.py", "blocks": { "loggingworkflow": { diff --git a/frontend/docs/lib/generated/snips/python/migration_guides/mergent.ts b/frontend/docs/lib/generated/snips/python/migration_guides/mergent.ts index eb63cd31c..ccd6d60e4 100644 --- a/frontend/docs/lib/generated/snips/python/migration_guides/mergent.ts +++ b/frontend/docs/lib/generated/snips/python/migration_guides/mergent.ts @@ -2,32 +2,32 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "from datetime import datetime, timedelta, timezone\nfrom typing import Any, Dict, List, Mapping\n\nimport requests\nfrom pydantic import BaseModel\nfrom requests import Response\n\nfrom hatchet_sdk.context.context import Context\n\nfrom .hatchet_client import hatchet\n\n\nasync def process_image(image_url: str, filters: List[str]) -> Dict[str, Any]:\n # Do some image processing\n return {\"url\": image_url, \"size\": 100, \"format\": \"png\"}\n\n\n# > Before (Mergent)\nasync def process_image_task(request: Any) -> Dict[str, Any]:\n image_url = request.json[\"image_url\"]\n filters = request.json[\"filters\"]\n try:\n result = await process_image(image_url, filters)\n return {\"success\": True, \"processed_url\": result[\"url\"]}\n except Exception as e:\n print(f\"Image processing failed: {e}\")\n raise\n\n\n\n\n# > After (Hatchet)\nclass ImageProcessInput(BaseModel):\n image_url: str\n filters: List[str]\n\n\nclass ImageProcessOutput(BaseModel):\n processed_url: str\n metadata: Dict[str, Any]\n\n\n@hatchet.task(\n name=\"image-processor\",\n retries=3,\n execution_timeout=\"10m\",\n input_validator=ImageProcessInput,\n)\nasync def image_processor(input: ImageProcessInput, ctx: Context) -> ImageProcessOutput:\n # Do some image processing\n result = await process_image(input.image_url, input.filters)\n\n if not result[\"url\"]:\n raise ValueError(\"Processing failed to generate URL\")\n\n return ImageProcessOutput(\n processed_url=result[\"url\"],\n metadata={\n \"size\": result[\"size\"],\n \"format\": result[\"format\"],\n \"applied_filters\": input.filters,\n },\n )\n\n\n\n\nasync def run() -> None:\n # > Running a task (Mergent)\n headers: Mapping[str, str] = {\n \"Authorization\": \"Bearer \",\n \"Content-Type\": \"application/json\",\n }\n\n task_data = {\n \"name\": \"4cf95241-fa19-47ef-8a67-71e483747649\",\n \"queue\": \"default\",\n \"request\": {\n \"url\": \"https://example.com\",\n \"headers\": {\n \"Authorization\": \"fake-secret-token\",\n \"Content-Type\": \"application/json\",\n },\n \"body\": \"Hello, world!\",\n },\n }\n\n try:\n response: Response = requests.post(\n \"https://api.mergent.co/v2/tasks\",\n headers=headers,\n json=task_data,\n )\n print(response.json())\n except Exception as e:\n print(f\"Error: {e}\")\n\n # > Running a task (Hatchet)\n result = await image_processor.aio_run(\n ImageProcessInput(image_url=\"https://example.com/image.png\", filters=[\"blur\"])\n )\n\n # you can await fully typed results\n print(result)\n\n\nasync def schedule() -> None:\n # > Scheduling tasks (Mergent)\n options = {\n # same options as before\n \"json\": {\n # same body as before\n \"delay\": \"5m\"\n }\n }\n\n print(options)\n\n # > Scheduling tasks (Hatchet)\n # Schedule the task to run at a specific time\n run_at = datetime.now(tz=timezone.utc) + timedelta(days=1)\n await image_processor.aio_schedule(\n run_at,\n ImageProcessInput(image_url=\"https://example.com/image.png\", filters=[\"blur\"]),\n )\n\n # Schedule the task to run every hour\n await image_processor.aio_create_cron(\n \"run-hourly\",\n \"0 * * * *\",\n ImageProcessInput(image_url=\"https://example.com/image.png\", filters=[\"blur\"]),\n )\n", + "content": "from collections.abc import Mapping\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Any\n\nimport requests\nfrom pydantic import BaseModel\nfrom requests import Response\n\nfrom hatchet_sdk.context.context import Context\n\nfrom .hatchet_client import hatchet\n\n\nasync def process_image(image_url: str, filters: list[str]) -> dict[str, Any]:\n # Do some image processing\n return {\"url\": image_url, \"size\": 100, \"format\": \"png\"}\n\n\n# > Before (Mergent)\nasync def process_image_task(request: Any) -> dict[str, Any]:\n image_url = request.json[\"image_url\"]\n filters = request.json[\"filters\"]\n try:\n result = await process_image(image_url, filters)\n return {\"success\": True, \"processed_url\": result[\"url\"]}\n except Exception as e:\n print(f\"Image processing failed: {e}\")\n raise\n\n\n\n\n# > After (Hatchet)\nclass ImageProcessInput(BaseModel):\n image_url: str\n filters: list[str]\n\n\nclass ImageProcessOutput(BaseModel):\n processed_url: str\n metadata: dict[str, Any]\n\n\n@hatchet.task(\n name=\"image-processor\",\n retries=3,\n execution_timeout=\"10m\",\n input_validator=ImageProcessInput,\n)\nasync def image_processor(input: ImageProcessInput, ctx: Context) -> ImageProcessOutput:\n # Do some image processing\n result = await process_image(input.image_url, input.filters)\n\n if not result[\"url\"]:\n raise ValueError(\"Processing failed to generate URL\")\n\n return ImageProcessOutput(\n processed_url=result[\"url\"],\n metadata={\n \"size\": result[\"size\"],\n \"format\": result[\"format\"],\n \"applied_filters\": input.filters,\n },\n )\n\n\n\n\nasync def run() -> None:\n # > Running a task (Mergent)\n headers: Mapping[str, str] = {\n \"Authorization\": \"Bearer \",\n \"Content-Type\": \"application/json\",\n }\n\n task_data = {\n \"name\": \"4cf95241-fa19-47ef-8a67-71e483747649\",\n \"queue\": \"default\",\n \"request\": {\n \"url\": \"https://example.com\",\n \"headers\": {\n \"Authorization\": \"fake-secret-token\",\n \"Content-Type\": \"application/json\",\n },\n \"body\": \"Hello, world!\",\n },\n }\n\n try:\n response: Response = requests.post(\n \"https://api.mergent.co/v2/tasks\",\n headers=headers,\n json=task_data,\n )\n print(response.json())\n except Exception as e:\n print(f\"Error: {e}\")\n\n # > Running a task (Hatchet)\n result = await image_processor.aio_run(\n ImageProcessInput(image_url=\"https://example.com/image.png\", filters=[\"blur\"])\n )\n\n # you can await fully typed results\n print(result)\n\n\nasync def schedule() -> None:\n # > Scheduling tasks (Mergent)\n options = {\n # same options as before\n \"json\": {\n # same body as before\n \"delay\": \"5m\"\n }\n }\n\n print(options)\n\n # > Scheduling tasks (Hatchet)\n # Schedule the task to run at a specific time\n run_at = datetime.now(tz=timezone.utc) + timedelta(days=1)\n await image_processor.aio_schedule(\n run_at,\n ImageProcessInput(image_url=\"https://example.com/image.png\", filters=[\"blur\"]),\n )\n\n # Schedule the task to run every hour\n await image_processor.aio_create_cron(\n \"run-hourly\",\n \"0 * * * *\",\n ImageProcessInput(image_url=\"https://example.com/image.png\", filters=[\"blur\"]),\n )\n", "source": "out/python/migration_guides/mergent.py", "blocks": { "before_mergent": { - "start": 19, - "stop": 29 + "start": 20, + "stop": 30 }, "after_hatchet": { - "start": 33, - "stop": 65 + "start": 34, + "stop": 66 }, "running_a_task_mergent": { - "start": 70, - "stop": 96 + "start": 71, + "stop": 97 }, "running_a_task_hatchet": { - "start": 99, - "stop": 104 + "start": 100, + "stop": 105 }, "scheduling_tasks_mergent": { - "start": 109, - "stop": 115 + "start": 110, + "stop": 116 }, "scheduling_tasks_hatchet": { - "start": 120, - "stop": 132 + "start": 121, + "stop": 133 } }, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/non_retryable/test_no_retry.ts b/frontend/docs/lib/generated/snips/python/non_retryable/test_no_retry.ts index ec216f096..feb84ecbe 100644 --- a/frontend/docs/lib/generated/snips/python/non_retryable/test_no_retry.ts +++ b/frontend/docs/lib/generated/snips/python/non_retryable/test_no_retry.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import pytest\n\nfrom examples.non_retryable.worker import (\n non_retryable_workflow,\n should_not_retry,\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n)\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType\nfrom hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails\n\n\ndef find_id(runs: V1WorkflowRunDetails, match: str) -> str:\n return next(t.metadata.id for t in runs.tasks if match in t.display_name)\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_no_retry(hatchet: Hatchet) -> None:\n ref = await non_retryable_workflow.aio_run_no_wait()\n\n with pytest.raises(Exception, match=\"retry\"):\n await ref.aio_result()\n\n runs = await hatchet.runs.aio_get(ref.workflow_run_id)\n task_to_id = {\n task: find_id(runs, task.name)\n for task in [\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n should_not_retry,\n ]\n }\n\n retrying_events = [\n e for e in runs.task_events if e.event_type == V1TaskEventType.RETRYING\n ]\n\n \"\"\"Only one task should be retried.\"\"\"\n assert len(retrying_events) == 1\n\n \"\"\"The task id of the retrying events should match the tasks that are retried\"\"\"\n assert {e.task_id for e in retrying_events} == {\n task_to_id[should_retry_wrong_exception_type],\n }\n\n \"\"\"Three failed events should emit, one each for the two failing initial runs and one for the retry.\"\"\"\n assert (\n len([e for e in runs.task_events if e.event_type == V1TaskEventType.FAILED])\n == 3\n )\n", + "content": "import asyncio\n\nimport pytest\n\nfrom examples.non_retryable.worker import (\n non_retryable_workflow,\n should_not_retry,\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n)\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType\nfrom hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails\nfrom hatchet_sdk.exceptions import FailedTaskRunExceptionGroup\n\n\ndef find_id(runs: V1WorkflowRunDetails, match: str) -> str:\n return next(t.metadata.id for t in runs.tasks if match in t.display_name)\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_no_retry(hatchet: Hatchet) -> None:\n ref = await non_retryable_workflow.aio_run_no_wait()\n\n with pytest.raises(FailedTaskRunExceptionGroup) as exc_info:\n await ref.aio_result()\n\n exception_group = exc_info.value\n\n assert len(exception_group.exceptions) == 2\n\n exc_text = [e.exc for e in exception_group.exceptions]\n\n non_retries = [\n e\n for e in exc_text\n if \"This task should retry because it's not a NonRetryableException\" in e\n ]\n\n other_errors = [e for e in exc_text if \"This task should not retry\" in e]\n\n assert len(non_retries) == 1\n assert len(other_errors) == 1\n\n await asyncio.sleep(3)\n\n runs = await hatchet.runs.aio_get(ref.workflow_run_id)\n task_to_id = {\n task: find_id(runs, task.name)\n for task in [\n should_not_retry_successful_task,\n should_retry_wrong_exception_type,\n should_not_retry,\n ]\n }\n\n retrying_events = [\n e for e in runs.task_events if e.event_type == V1TaskEventType.RETRYING\n ]\n\n \"\"\"Only one task should be retried.\"\"\"\n assert len(retrying_events) == 1\n\n \"\"\"The task id of the retrying events should match the tasks that are retried\"\"\"\n assert retrying_events[0].task_id == task_to_id[should_retry_wrong_exception_type]\n\n \"\"\"Three failed events should emit, one each for the two failing initial runs and one for the retry.\"\"\"\n assert (\n len([e for e in runs.task_events if e.event_type == V1TaskEventType.FAILED])\n == 3\n )\n", "source": "out/python/non_retryable/test_no_retry.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts b/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts index 337811a7c..ad273fc35 100644 --- a/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts +++ b/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/client.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import base64\nimport os\n\nfrom langfuse import Langfuse # type: ignore[import-untyped]\nfrom langfuse.openai import AsyncOpenAI # type: ignore[import-untyped]\n\n# > Configure Langfuse\nLANGFUSE_AUTH = base64.b64encode(\n f\"{os.getenv('LANGFUSE_PUBLIC_KEY')}:{os.getenv('LANGFUSE_SECRET_KEY')}\".encode()\n).decode()\n\nos.environ[\"OTEL_EXPORTER_OTLP_ENDPOINT\"] = (\n os.getenv(\"LANGFUSE_HOST\", \"https://us.cloud.langfuse.com\") + \"/api/public/otel\"\n)\nos.environ[\"OTEL_EXPORTER_OTLP_HEADERS\"] = f\"Authorization=Basic {LANGFUSE_AUTH}\"\n\n## Note: Langfuse sets the global tracer provider, so you don't need to worry about it\nlf = Langfuse(\n public_key=os.getenv(\"LANGFUSE_PUBLIC_KEY\"),\n secret_key=os.getenv(\"LANGFUSE_SECRET_KEY\"),\n host=os.getenv(\"LANGFUSE_HOST\", \"https://app.langfuse.com\"),\n)\n\n# > Create OpenAI client\nopenai = AsyncOpenAI(\n api_key=os.getenv(\"OPENAI_API_KEY\"),\n)\n", + "content": "import base64\nimport os\n\nfrom langfuse import Langfuse # type: ignore\nfrom langfuse.openai import AsyncOpenAI # type: ignore\n\n# > Configure Langfuse\nLANGFUSE_AUTH = base64.b64encode(\n f\"{os.getenv('LANGFUSE_PUBLIC_KEY')}:{os.getenv('LANGFUSE_SECRET_KEY')}\".encode()\n).decode()\n\nos.environ[\"OTEL_EXPORTER_OTLP_ENDPOINT\"] = (\n os.getenv(\"LANGFUSE_HOST\", \"https://us.cloud.langfuse.com\") + \"/api/public/otel\"\n)\nos.environ[\"OTEL_EXPORTER_OTLP_HEADERS\"] = f\"Authorization=Basic {LANGFUSE_AUTH}\"\n\n## Note: Langfuse sets the global tracer provider, so you don't need to worry about it\nlf = Langfuse(\n public_key=os.getenv(\"LANGFUSE_PUBLIC_KEY\"),\n secret_key=os.getenv(\"LANGFUSE_SECRET_KEY\"),\n host=os.getenv(\"LANGFUSE_HOST\", \"https://app.langfuse.com\"),\n)\n\n# > Create OpenAI client\nopenai = AsyncOpenAI(\n api_key=os.getenv(\"OPENAI_API_KEY\"),\n)\n", "source": "out/python/opentelemetry_instrumentation/langfuse/client.py", "blocks": { "configure_langfuse": { diff --git a/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts b/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts index ca6209e24..4f584f6c9 100644 --- a/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts +++ b/frontend/docs/lib/generated/snips/python/opentelemetry_instrumentation/langfuse/trigger.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\n\nfrom langfuse import get_client # type: ignore[import-untyped]\nfrom opentelemetry.trace import StatusCode\n\nfrom examples.opentelemetry_instrumentation.langfuse.worker import langfuse_task\n\n# > Trigger task\ntracer = get_client()\n\n\nasync def main() -> None:\n # Traces will send to Langfuse\n # Use `_otel_tracer` to access the OpenTelemetry tracer if you need\n # to e.g. log statuses or attributes manually.\n with tracer._otel_tracer.start_as_current_span(name=\"trigger\") as span:\n result = await langfuse_task.aio_run()\n location = result.get(\"location\")\n\n if not location:\n span.set_status(StatusCode.ERROR)\n return\n\n span.set_attribute(\"location\", location)\n\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n", + "content": "import asyncio\n\nfrom langfuse import get_client # type: ignore\nfrom opentelemetry.trace import StatusCode\n\nfrom examples.opentelemetry_instrumentation.langfuse.worker import langfuse_task\n\n# > Trigger task\ntracer = get_client()\n\n\nasync def main() -> None:\n # Traces will send to Langfuse\n # Use `_otel_tracer` to access the OpenTelemetry tracer if you need\n # to e.g. log statuses or attributes manually.\n with tracer._otel_tracer.start_as_current_span(name=\"trigger\") as span:\n result = await langfuse_task.aio_run()\n location = result.get(\"location\")\n\n if not location:\n span.set_status(StatusCode.ERROR)\n return\n\n span.set_attribute(\"location\", location)\n\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n", "source": "out/python/opentelemetry_instrumentation/langfuse/trigger.py", "blocks": { "trigger_task": { diff --git a/frontend/docs/lib/generated/snips/python/priority/test_priority.ts b/frontend/docs/lib/generated/snips/python/priority/test_priority.ts index 53e92f654..41e2e9222 100644 --- a/frontend/docs/lib/generated/snips/python/priority/test_priority.ts +++ b/frontend/docs/lib/generated/snips/python/priority/test_priority.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom random import choice\nfrom subprocess import Popen\nfrom typing import Any, AsyncGenerator, Literal\nfrom uuid import uuid4\n\nimport pytest\nimport pytest_asyncio\nfrom pydantic import BaseModel\n\nfrom examples.priority.worker import DEFAULT_PRIORITY, SLEEP_TIME, priority_workflow\nfrom hatchet_sdk import Hatchet, ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\nPriority = Literal[\"low\", \"medium\", \"high\", \"default\"]\n\n\nclass RunPriorityStartedAt(BaseModel):\n priority: Priority\n started_at: datetime\n finished_at: datetime\n\n\ndef priority_to_int(priority: Priority) -> int:\n match priority:\n case \"high\":\n return 3\n case \"medium\":\n return 2\n case \"low\":\n return 1\n case \"default\":\n return DEFAULT_PRIORITY\n case _:\n raise ValueError(f\"Invalid priority: {priority}\")\n\n\n@pytest_asyncio.fixture(loop_scope=\"session\", scope=\"function\")\nasync def dummy_runs() -> None:\n priority: Priority = \"high\"\n\n await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority)),\n additional_metadata={\n \"priority\": priority,\n \"key\": ix,\n \"type\": \"dummy\",\n },\n )\n )\n for ix in range(40)\n ]\n )\n\n await asyncio.sleep(3)\n\n return None\n\n\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/priority/worker.py\", \"--slots\", \"1\"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_priority(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n choices: list[Priority] = [\"low\", \"medium\", \"high\", \"default\"]\n N = 30\n\n run_refs = await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n \"priority\": priority,\n \"key\": ix,\n \"test_run_id\": test_run_id,\n },\n )\n )\n for ix in range(N)\n ]\n )\n\n await asyncio.gather(*[r.aio_result() for r in run_refs])\n\n workflows = (\n await hatchet.workflows.aio_list(workflow_name=priority_workflow.name)\n ).rows\n\n assert workflows\n\n workflow = next((w for w in workflows if w.name == priority_workflow.name), None)\n\n assert workflow\n\n assert workflow.name == priority_workflow.name\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow.metadata.id],\n additional_metadata={\n \"test_run_id\": test_run_id,\n },\n limit=1_000,\n )\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get(\"priority\") or \"low\",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(run_refs)\n assert len(runs_ids_started_ats) == N\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n \"\"\"Run start times should be in order of priority\"\"\"\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n \"\"\"Runs should proceed one at a time\"\"\"\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n \"\"\"Runs should finish after starting (this is mostly a test for engine datetime handling bugs)\"\"\"\n assert curr.finished_at >= curr.started_at\n\n\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/priority/worker.py\", \"--slots\", \"1\"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_priority_via_scheduling(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n sleep_time = 3\n n = 30\n choices: list[Priority] = [\"low\", \"medium\", \"high\", \"default\"]\n run_at = datetime.now(tz=timezone.utc) + timedelta(seconds=sleep_time)\n\n versions = await asyncio.gather(\n *[\n priority_workflow.aio_schedule(\n run_at=run_at,\n options=ScheduleTriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n \"priority\": priority,\n \"key\": ix,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n for ix in range(n)\n ]\n )\n\n await asyncio.sleep(sleep_time * 2)\n\n workflow_id = versions[0].workflow_id\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError(\"Timed out waiting for runs to finish\")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n \"test_run_id\": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError(\"One or more runs failed or were cancelled\")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get(\"priority\") or \"low\",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(versions)\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n \"\"\"Run start times should be in order of priority\"\"\"\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n \"\"\"Runs should proceed one at a time\"\"\"\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n \"\"\"Runs should finish after starting (this is mostly a test for engine datetime handling bugs)\"\"\"\n assert curr.finished_at >= curr.started_at\n\n\n@pytest_asyncio.fixture(loop_scope=\"session\", scope=\"function\")\nasync def crons(\n hatchet: Hatchet, dummy_runs: None\n) -> AsyncGenerator[tuple[str, str, int], None]:\n test_run_id = str(uuid4())\n choices: list[Priority] = [\"low\", \"medium\", \"high\"]\n n = 30\n\n crons = await asyncio.gather(\n *[\n hatchet.cron.aio_create(\n workflow_name=priority_workflow.name,\n cron_name=f\"{test_run_id}-cron-{i}\",\n expression=\"* * * * *\",\n input={},\n additional_metadata={\n \"trigger\": \"cron\",\n \"test_run_id\": test_run_id,\n \"priority\": (priority := choice(choices)),\n \"key\": str(i),\n },\n priority=(priority_to_int(priority)),\n )\n for i in range(n)\n ]\n )\n\n yield crons[0].workflow_id, test_run_id, n\n\n await asyncio.gather(*[hatchet.cron.aio_delete(cron.metadata.id) for cron in crons])\n\n\ndef time_until_next_minute() -> float:\n now = datetime.now(tz=timezone.utc)\n next_minute = (now + timedelta(minutes=1)).replace(second=0, microsecond=0)\n\n return (next_minute - now).total_seconds()\n\n\n@pytest.mark.skip(\n reason=\"Test is flaky because the first jobs that are picked up don't necessarily go in priority order\"\n)\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/priority/worker.py\", \"--slots\", \"1\"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_priority_via_cron(\n hatchet: Hatchet, crons: tuple[str, str, int], on_demand_worker: Popen[Any]\n) -> None:\n workflow_id, test_run_id, n = crons\n\n await asyncio.sleep(time_until_next_minute() + 10)\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError(\"Timed out waiting for runs to finish\")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n \"test_run_id\": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError(\"One or more runs failed or were cancelled\")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get(\"priority\") or \"low\",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == n\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n \"\"\"Run start times should be in order of priority\"\"\"\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n \"\"\"Runs should proceed one at a time\"\"\"\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n \"\"\"Runs should finish after starting (this is mostly a test for engine datetime handling bugs)\"\"\"\n assert curr.finished_at >= curr.started_at\n", + "content": "import asyncio\nfrom collections.abc import AsyncGenerator\nfrom datetime import datetime, timedelta, timezone\nfrom random import choice\nfrom subprocess import Popen\nfrom typing import Any, Literal\nfrom uuid import uuid4\n\nimport pytest\nimport pytest_asyncio\nfrom pydantic import BaseModel\n\nfrom examples.priority.worker import DEFAULT_PRIORITY, SLEEP_TIME, priority_workflow\nfrom hatchet_sdk import Hatchet, ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions\nfrom hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus\n\nPriority = Literal[\"low\", \"medium\", \"high\", \"default\"]\n\n\nclass RunPriorityStartedAt(BaseModel):\n priority: Priority\n started_at: datetime\n finished_at: datetime\n\n\ndef priority_to_int(priority: Priority) -> int:\n match priority:\n case \"high\":\n return 3\n case \"medium\":\n return 2\n case \"low\":\n return 1\n case \"default\":\n return DEFAULT_PRIORITY\n case _:\n raise ValueError(f\"Invalid priority: {priority}\")\n\n\n@pytest_asyncio.fixture(loop_scope=\"session\", scope=\"function\")\nasync def dummy_runs() -> None:\n priority: Priority = \"high\"\n\n await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority)),\n additional_metadata={\n \"priority\": priority,\n \"key\": ix,\n \"type\": \"dummy\",\n },\n )\n )\n for ix in range(40)\n ]\n )\n\n await asyncio.sleep(3)\n\n return\n\n\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/priority/worker.py\", \"--slots\", \"1\"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_priority(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n choices: list[Priority] = [\"low\", \"medium\", \"high\", \"default\"]\n N = 30\n\n run_refs = await priority_workflow.aio_run_many_no_wait(\n [\n priority_workflow.create_bulk_run_item(\n options=TriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n \"priority\": priority,\n \"key\": ix,\n \"test_run_id\": test_run_id,\n },\n )\n )\n for ix in range(N)\n ]\n )\n\n await asyncio.gather(*[r.aio_result() for r in run_refs])\n\n workflows = (\n await hatchet.workflows.aio_list(workflow_name=priority_workflow.name)\n ).rows\n\n assert workflows\n\n workflow = next((w for w in workflows if w.name == priority_workflow.name), None)\n\n assert workflow\n\n assert workflow.name == priority_workflow.name\n\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow.metadata.id],\n additional_metadata={\n \"test_run_id\": test_run_id,\n },\n limit=1_000,\n )\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get(\"priority\") or \"low\",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(run_refs)\n assert len(runs_ids_started_ats) == N\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n \"\"\"Run start times should be in order of priority\"\"\"\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n \"\"\"Runs should proceed one at a time\"\"\"\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n \"\"\"Runs should finish after starting (this is mostly a test for engine datetime handling bugs)\"\"\"\n assert curr.finished_at >= curr.started_at\n\n\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/priority/worker.py\", \"--slots\", \"1\"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_priority_via_scheduling(\n hatchet: Hatchet, dummy_runs: None, on_demand_worker: Popen[Any]\n) -> None:\n test_run_id = str(uuid4())\n sleep_time = 3\n n = 30\n choices: list[Priority] = [\"low\", \"medium\", \"high\", \"default\"]\n run_at = datetime.now(tz=timezone.utc) + timedelta(seconds=sleep_time)\n\n versions = await asyncio.gather(\n *[\n priority_workflow.aio_schedule(\n run_at=run_at,\n options=ScheduleTriggerWorkflowOptions(\n priority=(priority_to_int(priority := choice(choices))),\n additional_metadata={\n \"priority\": priority,\n \"key\": ix,\n \"test_run_id\": test_run_id,\n },\n ),\n )\n for ix in range(n)\n ]\n )\n\n await asyncio.sleep(sleep_time * 2)\n\n workflow_id = versions[0].workflow_id\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError(\"Timed out waiting for runs to finish\")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n \"test_run_id\": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError(\"One or more runs failed or were cancelled\")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get(\"priority\") or \"low\",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == len(versions)\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n \"\"\"Run start times should be in order of priority\"\"\"\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n \"\"\"Runs should proceed one at a time\"\"\"\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n \"\"\"Runs should finish after starting (this is mostly a test for engine datetime handling bugs)\"\"\"\n assert curr.finished_at >= curr.started_at\n\n\n@pytest_asyncio.fixture(loop_scope=\"session\", scope=\"function\")\nasync def crons(\n hatchet: Hatchet, dummy_runs: None\n) -> AsyncGenerator[tuple[str, str, int], None]:\n test_run_id = str(uuid4())\n choices: list[Priority] = [\"low\", \"medium\", \"high\"]\n n = 30\n\n crons = await asyncio.gather(\n *[\n hatchet.cron.aio_create(\n workflow_name=priority_workflow.name,\n cron_name=f\"{test_run_id}-cron-{i}\",\n expression=\"* * * * *\",\n input={},\n additional_metadata={\n \"trigger\": \"cron\",\n \"test_run_id\": test_run_id,\n \"priority\": (priority := choice(choices)),\n \"key\": str(i),\n },\n priority=(priority_to_int(priority)),\n )\n for i in range(n)\n ]\n )\n\n yield crons[0].workflow_id, test_run_id, n\n\n await asyncio.gather(*[hatchet.cron.aio_delete(cron.metadata.id) for cron in crons])\n\n\ndef time_until_next_minute() -> float:\n now = datetime.now(tz=timezone.utc)\n next_minute = (now + timedelta(minutes=1)).replace(second=0, microsecond=0)\n\n return (next_minute - now).total_seconds()\n\n\n@pytest.mark.skip(\n reason=\"Test is flaky because the first jobs that are picked up don't necessarily go in priority order\"\n)\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/priority/worker.py\", \"--slots\", \"1\"],\n 8003,\n )\n ],\n indirect=True,\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_priority_via_cron(\n hatchet: Hatchet, crons: tuple[str, str, int], on_demand_worker: Popen[Any]\n) -> None:\n workflow_id, test_run_id, n = crons\n\n await asyncio.sleep(time_until_next_minute() + 10)\n\n attempts = 0\n\n while True:\n if attempts >= SLEEP_TIME * n * 2:\n raise TimeoutError(\"Timed out waiting for runs to finish\")\n\n attempts += 1\n await asyncio.sleep(1)\n runs = await hatchet.runs.aio_list(\n workflow_ids=[workflow_id],\n additional_metadata={\n \"test_run_id\": test_run_id,\n },\n limit=1_000,\n )\n\n if not runs.rows:\n continue\n\n if any(\n r.status in [V1TaskStatus.FAILED, V1TaskStatus.CANCELLED] for r in runs.rows\n ):\n raise ValueError(\"One or more runs failed or were cancelled\")\n\n if all(r.status == V1TaskStatus.COMPLETED for r in runs.rows):\n break\n\n runs_ids_started_ats: list[RunPriorityStartedAt] = sorted(\n [\n RunPriorityStartedAt(\n priority=(r.additional_metadata or {}).get(\"priority\") or \"low\",\n started_at=r.started_at or datetime.min,\n finished_at=r.finished_at or datetime.min,\n )\n for r in runs.rows\n ],\n key=lambda x: x.started_at,\n )\n\n assert len(runs_ids_started_ats) == n\n\n for i in range(len(runs_ids_started_ats) - 1):\n curr = runs_ids_started_ats[i]\n nxt = runs_ids_started_ats[i + 1]\n\n \"\"\"Run start times should be in order of priority\"\"\"\n assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)\n\n \"\"\"Runs should proceed one at a time\"\"\"\n assert curr.finished_at <= nxt.finished_at\n assert nxt.finished_at >= nxt.started_at\n\n \"\"\"Runs should finish after starting (this is mostly a test for engine datetime handling bugs)\"\"\"\n assert curr.finished_at >= curr.started_at\n", "source": "out/python/priority/test_priority.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/streaming/async_stream.ts b/frontend/docs/lib/generated/snips/python/streaming/async_stream.ts index 0c6db6e74..b04f1f935 100644 --- a/frontend/docs/lib/generated/snips/python/streaming/async_stream.ts +++ b/frontend/docs/lib/generated/snips/python/streaming/async_stream.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\n\nfrom examples.streaming.worker import streaming_workflow\n\n\nasync def main() -> None:\n ref = await streaming_workflow.aio_run_no_wait()\n await asyncio.sleep(1)\n\n stream = ref.stream()\n\n async for chunk in stream:\n print(chunk)\n\n\nif __name__ == \"__main__\":\n import asyncio\n\n asyncio.run(main())\n", + "content": "import asyncio\n\nfrom examples.streaming.worker import stream_task\nfrom hatchet_sdk.clients.listeners.run_event_listener import StepRunEventType\n\n\nasync def main() -> None:\n ref = await stream_task.aio_run_no_wait()\n\n async for chunk in ref.stream():\n if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM:\n print(chunk.payload, flush=True, end=\"\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n", "source": "out/python/streaming/async_stream.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/streaming/index.ts b/frontend/docs/lib/generated/snips/python/streaming/index.ts index 92b431fdd..4169e8185 100644 --- a/frontend/docs/lib/generated/snips/python/streaming/index.ts +++ b/frontend/docs/lib/generated/snips/python/streaming/index.ts @@ -1,7 +1,9 @@ import async_stream from './async_stream'; import sync_stream from './sync_stream'; +import test_streaming from './test_streaming'; import worker from './worker'; export { async_stream } export { sync_stream } +export { test_streaming } export { worker } diff --git a/frontend/docs/lib/generated/snips/python/streaming/sync_stream.ts b/frontend/docs/lib/generated/snips/python/streaming/sync_stream.ts index cd64673fb..0a5c84682 100644 --- a/frontend/docs/lib/generated/snips/python/streaming/sync_stream.ts +++ b/frontend/docs/lib/generated/snips/python/streaming/sync_stream.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import time\n\nfrom examples.streaming.worker import streaming_workflow\n\n\ndef main() -> None:\n ref = streaming_workflow.run_no_wait()\n time.sleep(1)\n\n stream = ref.stream()\n\n for chunk in stream:\n print(chunk)\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "import time\n\nfrom examples.streaming.worker import stream_task\n\n\ndef main() -> None:\n ref = stream_task.run_no_wait()\n time.sleep(1)\n\n stream = ref.stream()\n\n for chunk in stream:\n print(chunk)\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/streaming/sync_stream.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/streaming/test_streaming.ts b/frontend/docs/lib/generated/snips/python/streaming/test_streaming.ts new file mode 100644 index 000000000..0b9e6c889 --- /dev/null +++ b/frontend/docs/lib/generated/snips/python/streaming/test_streaming.ts @@ -0,0 +1,11 @@ +import { Snippet } from '@/lib/generated/snips/types'; + +const snippet: Snippet = { + "language": "python", + "content": "import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom subprocess import Popen\nfrom typing import Any\n\nimport pytest\n\nfrom examples.streaming.worker import chunks, stream_task\nfrom hatchet_sdk import Hatchet\nfrom hatchet_sdk.clients.listeners.run_event_listener import (\n StepRunEvent,\n StepRunEventType,\n)\n\n\n@pytest.mark.parametrize(\n \"on_demand_worker\",\n [\n (\n [\"poetry\", \"run\", \"python\", \"examples/streaming/worker.py\", \"--slots\", \"1\"],\n 8008,\n )\n ],\n indirect=True,\n)\n@pytest.mark.parametrize(\"execution_number\", range(1))\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_streaming_ordering_and_completeness(\n execution_number: int,\n hatchet: Hatchet,\n on_demand_worker: Popen[Any],\n) -> None:\n ref = await stream_task.aio_run_no_wait()\n\n ix = 0\n anna_karenina = \"\"\n\n async for chunk in ref.stream():\n if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM:\n assert chunks[ix] == chunk.payload\n ix += 1\n anna_karenina += chunk.payload\n\n assert ix == len(chunks)\n assert anna_karenina == \"\".join(chunks)\n\n await ref.aio_result()\n", + "source": "out/python/streaming/test_streaming.py", + "blocks": {}, + "highlights": {} +}; + +export default snippet; diff --git a/frontend/docs/lib/generated/snips/python/streaming/worker.ts b/frontend/docs/lib/generated/snips/python/streaming/worker.ts index 2ffefd918..0e238df66 100644 --- a/frontend/docs/lib/generated/snips/python/streaming/worker.ts +++ b/frontend/docs/lib/generated/snips/python/streaming/worker.ts @@ -2,12 +2,12 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=True)\n\n# > Streaming\n\nstreaming_workflow = hatchet.workflow(name=\"StreamingWorkflow\")\n\n\n@streaming_workflow.task()\nasync def step1(input: EmptyModel, ctx: Context) -> None:\n for i in range(10):\n await asyncio.sleep(1)\n ctx.put_stream(f\"Processing {i}\")\n\n\ndef main() -> None:\n worker = hatchet.worker(\"test-worker\", workflows=[streaming_workflow])\n worker.start()\n\n\n\nif __name__ == \"__main__\":\n main()\n", + "content": "import asyncio\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Generator\n\nfrom hatchet_sdk import Context, EmptyModel, Hatchet\n\nhatchet = Hatchet(debug=False)\n\n# > Streaming\n\ncontent = \"\"\"\nHappy families are all alike; every unhappy family is unhappy in its own way.\n\nEverything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him. This position of affairs had now lasted three days, and not only the husband and wife themselves, but all the members of their family and household, were painfully conscious of it. Every person in the house felt that there was so sense in their living together, and that the stray people brought together by chance in any inn had more in common with one another than they, the members of the family and household of the Oblonskys. The wife did not leave her own room, the husband had not been at home for three days. The children ran wild all over the house; the English governess quarreled with the housekeeper, and wrote to a friend asking her to look out for a new situation for her; the man-cook had walked off the day before just at dinner time; the kitchen-maid, and the coachman had given warning.\n\"\"\"\n\n\ndef create_chunks(content: str, n: int) -> Generator[str, None, None]:\n for i in range(0, len(content), n):\n yield content[i : i + n]\n\n\nchunks = list(create_chunks(content, 10))\n\n\n@hatchet.task()\nasync def stream_task(input: EmptyModel, ctx: Context) -> None:\n await asyncio.sleep(2)\n\n for chunk in chunks:\n ctx.put_stream(chunk)\n await asyncio.sleep(0.05)\n\n\ndef main() -> None:\n worker = hatchet.worker(\"test-worker\", workflows=[stream_task])\n worker.start()\n\n\n\nif __name__ == \"__main__\":\n main()\n", "source": "out/python/streaming/worker.py", "blocks": { "streaming": { - "start": 8, - "stop": 23 + "start": 10, + "stop": 39 } }, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/timeout/test_timeout.ts b/frontend/docs/lib/generated/snips/python/timeout/test_timeout.ts index 322785a4b..c63ae84e7 100644 --- a/frontend/docs/lib/generated/snips/python/timeout/test_timeout.ts +++ b/frontend/docs/lib/generated/snips/python/timeout/test_timeout.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import pytest\n\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_execution_timeout() -> None:\n run = timeout_wf.run_no_wait()\n\n with pytest.raises(Exception, match=\"(Task exceeded timeout|TIMED_OUT)\"):\n await run.aio_result()\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_run_refresh_timeout() -> None:\n result = await refresh_timeout_wf.aio_run()\n\n assert result[\"refresh_task\"][\"status\"] == \"success\"\n", + "content": "import pytest\n\nfrom examples.timeout.worker import refresh_timeout_wf, timeout_wf\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_execution_timeout() -> None:\n run = timeout_wf.run_no_wait()\n\n with pytest.raises(\n Exception,\n match=\"(Task exceeded timeout|TIMED_OUT|Workflow run .* failed with multiple errors)\",\n ):\n await run.aio_result()\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_run_refresh_timeout() -> None:\n result = await refresh_timeout_wf.aio_run()\n\n assert result[\"refresh_task\"][\"status\"] == \"success\"\n", "source": "out/python/timeout/test_timeout.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/lib/generated/snips/python/waits/test_waits.ts b/frontend/docs/lib/generated/snips/python/waits/test_waits.ts index 7698dc725..2d4433df0 100644 --- a/frontend/docs/lib/generated/snips/python/waits/test_waits.ts +++ b/frontend/docs/lib/generated/snips/python/waits/test_waits.ts @@ -2,7 +2,7 @@ import { Snippet } from '@/lib/generated/snips/types'; const snippet: Snippet = { "language": "python", - "content": "import asyncio\nimport os\n\nimport pytest\n\nfrom examples.waits.worker import task_condition_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.skipif(\n os.getenv(\"CI\", \"false\").lower() == \"true\",\n reason=\"Skipped in CI because of unreliability\",\n)\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_waits(hatchet: Hatchet) -> None:\n\n ref = task_condition_workflow.run_no_wait()\n\n await asyncio.sleep(15)\n\n hatchet.event.push(\"skip_on_event:skip\", {})\n hatchet.event.push(\"wait_for_event:start\", {})\n\n result = await ref.aio_result()\n\n assert result[\"skip_on_event\"] == {\"skipped\": True}\n\n first_random_number = result[\"start\"][\"random_number\"]\n wait_for_event_random_number = result[\"wait_for_event\"][\"random_number\"]\n wait_for_sleep_random_number = result[\"wait_for_sleep\"][\"random_number\"]\n\n left_branch = result[\"left_branch\"]\n right_branch = result[\"right_branch\"]\n\n assert left_branch.get(\"skipped\") is True or right_branch.get(\"skipped\") is True\n\n branch_random_number = left_branch.get(\"random_number\") or right_branch.get(\n \"random_number\"\n )\n\n result_sum = result[\"sum\"][\"sum\"]\n\n assert (\n result_sum\n == first_random_number\n + wait_for_event_random_number\n + wait_for_sleep_random_number\n + branch_random_number\n )\n", + "content": "import asyncio\n\nimport pytest\n\nfrom examples.waits.worker import task_condition_workflow\nfrom hatchet_sdk import Hatchet\n\n\n@pytest.mark.asyncio(loop_scope=\"session\")\nasync def test_waits(hatchet: Hatchet) -> None:\n\n ref = task_condition_workflow.run_no_wait()\n\n await asyncio.sleep(15)\n\n hatchet.event.push(\"skip_on_event:skip\", {})\n hatchet.event.push(\"wait_for_event:start\", {})\n\n result = await ref.aio_result()\n\n assert result[\"skip_on_event\"] == {\"skipped\": True}\n\n first_random_number = result[\"start\"][\"random_number\"]\n wait_for_event_random_number = result[\"wait_for_event\"][\"random_number\"]\n wait_for_sleep_random_number = result[\"wait_for_sleep\"][\"random_number\"]\n\n left_branch = result[\"left_branch\"]\n right_branch = result[\"right_branch\"]\n\n assert left_branch.get(\"skipped\") is True or right_branch.get(\"skipped\") is True\n\n branch_random_number = left_branch.get(\"random_number\") or right_branch.get(\n \"random_number\"\n )\n\n result_sum = result[\"sum\"][\"sum\"]\n\n assert (\n result_sum\n == first_random_number\n + wait_for_event_random_number\n + wait_for_sleep_random_number\n + branch_random_number\n )\n", "source": "out/python/waits/test_waits.py", "blocks": {}, "highlights": {} diff --git a/frontend/docs/pages/blog/_meta.js b/frontend/docs/pages/blog/_meta.js index 48eb54b3f..856b3bdf4 100644 --- a/frontend/docs/pages/blog/_meta.js +++ b/frontend/docs/pages/blog/_meta.js @@ -3,7 +3,7 @@ export default { title: "Why Go is a good fit for agents", }, "warning-event-loop-blocked": { - title: "Warning: The Event Loop May Be Blocked", + title: "Warning! The Event Loop May Be Blocked", }, "fastest-postgres-inserts": { title: "The fastest Postgres inserts", diff --git a/frontend/docs/pages/blog/warning-event-loop-blocked.mdx b/frontend/docs/pages/blog/warning-event-loop-blocked.mdx index 6f121312a..0308d2844 100644 --- a/frontend/docs/pages/blog/warning-event-loop-blocked.mdx +++ b/frontend/docs/pages/blog/warning-event-loop-blocked.mdx @@ -197,6 +197,10 @@ First line of defense: look for things that are obviously blocking. API calls, d As a last resort, you can also change your tasks from being async to sync, although we don't recommend this in the majority of cases. +### Use a linter + +[Ruff](https://docs.astral.sh/ruff/), via `flake8` (for example), has an [`ASYNC` linting rule](https://docs.astral.sh/ruff/rules/#flake8-async-async) to help you catch potential issues in async code. + ### Instrument your code If you've resolved all of the obvious issues but the Scary Warning ™️ is still popping up, instrumenting your code can help find the bottleneck. Hatchet's Python SDK provides [an OpenTelemetry Instrumentor](../home/opentelemetry.mdx), which allows you to easily export traces and spans from your Hatchet workers. If you have some long-running tasks (or long start times), you can use the traces to get a better sense for what might be blocking. In particular, if there are some async operations that appear to just be hanging for significantly longer durations than they should take, this is a good indication they're being blocked by something. diff --git a/frontend/docs/pages/sdks/python/_meta.js b/frontend/docs/pages/sdks/python/_meta.js index a2b187f63..575cead22 100644 --- a/frontend/docs/pages/sdks/python/_meta.js +++ b/frontend/docs/pages/sdks/python/_meta.js @@ -6,6 +6,13 @@ export default { }, }, + context: { + title: "Context", + theme: { + toc: true, + }, + }, + "feature-clients": { title: "Feature Clients", theme: { diff --git a/frontend/docs/pages/sdks/python/client.mdx b/frontend/docs/pages/sdks/python/client.mdx index c16d44840..febb40568 100644 --- a/frontend/docs/pages/sdks/python/client.mdx +++ b/frontend/docs/pages/sdks/python/client.mdx @@ -1,6 +1,6 @@ # Hatchet Python SDK Reference -This is the Python SDK reference, documenting methods available for interacting with Hatchet resources. Check out the [user guide](../../home) for an introduction to getting your first tasks running. +This is the Python SDK reference, documenting methods available for interacting with Hatchet resources. Check out the [user guide](../../home) for an introduction for getting your first tasks running. ## The Hatchet Python Client @@ -13,7 +13,7 @@ Methods: | Name | Description | | -------------- | ------------------------------------------------------------------------------------------------------------- | | `worker` | Create a Hatchet worker on which to run workflows. | -| `workflow` | Define a Hatchet workflow, which can then declare `task`s and be `run`, `scheduled`, and so on. | +| `workflow` | Define a Hatchet workflow, which can then declare `task`s and be `run`, `schedule`d, and so on. | | `task` | A decorator to transform a function into a standalone Hatchet task that runs as part of a workflow. | | `durable_task` | A decorator to transform a function into a standalone Hatchet _durable_ task that runs as part of a workflow. | @@ -73,14 +73,14 @@ Create a Hatchet worker on which to run workflows. Parameters: -| Name | Type | Description | Default | -| --------------- | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------- | -| `name` | `str` | The name of the worker. | _required_ | -| `slots` | `int` | The number of workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time. | `100` | -| `durable_slots` | `int` | The number of durable workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time that are durable. | `1000` | -| `labels` | `dict[str, Union[str, int]]` | A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. | `{}` | -| `workflows` | `list[BaseWorkflow[Any]]` | A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them. | `[]` | -| `lifespan` | `LifespanFn \| None` | A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks. | `None` | +| Name | Type | Description | Default | +| --------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------- | +| `name` | `str` | The name of the worker. | _required_ | +| `slots` | `int` | The number of workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time. | `100` | +| `durable_slots` | `int` | The number of durable workflow slots on the worker. In other words, the number of concurrent tasks the worker can run at any point in time that are durable. | `1000` | +| `labels` | `dict[str, str \| int] \| None` | A dictionary of labels to assign to the worker. For more details, view examples on affinity and worker labels. | `None` | +| `workflows` | `list[BaseWorkflow[Any]] \| None` | A list of workflows to register on the worker, as a shorthand for calling `register_workflow` on each or `register_workflows` on all of them. | `None` | +| `lifespan` | `LifespanFn \| None` | A lifespan function to run on the worker. This function will be called when the worker is started, and can be used to perform any setup or teardown tasks. | `None` | Returns: @@ -90,7 +90,7 @@ Returns: #### `workflow` -Define a Hatchet workflow, which can then declare `task`s and be `run`, `scheduled`, and so on. +Define a Hatchet workflow, which can then declare `task`s and be `run`, `schedule`d, and so on. Parameters: @@ -98,15 +98,15 @@ Parameters: | ------------------ | -------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | | `name` | `str` | The name of the workflow. | _required_ | | `description` | `str \| None` | A description for the workflow. | `None` | -| `input_validator` | `Type[TWorkflowInput] \| None` | A Pydantic model to use as a validator for the `input` to the tasks in the workflow. If no validator is provided, defaults to an `EmptyModel` under the hood. The `EmptyModel` is a Pydantic model with no fields specified, and with the `extra` config option set to `"allow"`. | `None` | -| `on_events` | `list[str]` | A list of event triggers for the workflow - events which cause the workflow to be run. | `[]` | -| `on_crons` | `list[str]` | A list of cron triggers for the workflow. | `[]` | +| `input_validator` | `type[TWorkflowInput] \| None` | A Pydantic model to use as a validator for the `input` to the tasks in the workflow. If no validator is provided, defaults to an `EmptyModel` under the hood. The `EmptyModel` is a Pydantic model with no fields specified, and with the `extra` config option set to `"allow"`. | `None` | +| `on_events` | `list[str] \| None` | A list of event triggers for the workflow - events which cause the workflow to be run. | `None` | +| `on_crons` | `list[str] \| None` | A list of cron triggers for the workflow. | `None` | | `version` | `str \| None` | A version for the workflow. | `None` | | `sticky` | `StickyStrategy \| None` | A sticky strategy for the workflow. | `None` | | `default_priority` | `int` | The priority of the workflow. Higher values will cause this workflow to have priority in scheduling over other, lower priority ones. | `1` | | `concurrency` | `ConcurrencyExpression \| list[ConcurrencyExpression] \| None` | A concurrency object controlling the concurrency settings for this workflow. | `None` | | `task_defaults` | `TaskDefaults` | A `TaskDefaults` object controlling the default task settings for this workflow. | `TaskDefaults()` | -| `default_filters` | `list[DefaultFilter]` | A list of filters to create with the workflow is created. Note that this is a helper to allow you to create filters "declaratively" without needing to make a separate API call once the workflow is created to create them. | `[]` | +| `default_filters` | `list[DefaultFilter] \| None` | A list of filters to create with the workflow is created. Note that this is a helper to allow you to create filters "declaratively" without needing to make a separate API call once the workflow is created to create them. | `None` | Returns: @@ -124,9 +124,9 @@ Parameters: | ----------------------- | -------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | | `name` | `str \| None` | The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator. | `None` | | `description` | `str \| None` | An optional description for the task. | `None` | -| `input_validator` | `Type[TWorkflowInput] \| None` | A Pydantic model to use as a validator for the input to the task. If no validator is provided, defaults to an `EmptyModel`. | `None` | -| `on_events` | `list[str]` | A list of event triggers for the task - events which cause the task to be run. | `[]` | -| `on_crons` | `list[str]` | A list of cron triggers for the task. | `[]` | +| `input_validator` | `type[TWorkflowInput] \| None` | A Pydantic model to use as a validator for the input to the task. If no validator is provided, defaults to an `EmptyModel`. | `None` | +| `on_events` | `list[str] \| None` | A list of event triggers for the task - events which cause the task to be run. | `None` | +| `on_crons` | `list[str] \| None` | A list of cron triggers for the task. | `None` | | `version` | `str \| None` | A version for the task. | `None` | | `sticky` | `StickyStrategy \| None` | A sticky strategy for the task. | `None` | | `default_priority` | `int` | The priority of the task. Higher values will cause this task to have priority in scheduling. | `1` | @@ -134,11 +134,11 @@ Parameters: | `schedule_timeout` | `Duration` | The maximum time allowed for scheduling the task. | `timedelta(minutes=5)` | | `execution_timeout` | `Duration` | The maximum time allowed for executing the task. | `timedelta(seconds=60)` | | `retries` | `int` | The number of times to retry the task before failing. | `0` | -| `rate_limits` | `list[RateLimit]` | A list of rate limit configurations for the task. | `[]` | -| `desired_worker_labels` | `dict[str, DesiredWorkerLabel]` | A dictionary of desired worker labels that determine to which worker the task should be assigned. | `{}` | +| `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the task. | `None` | +| `desired_worker_labels` | `dict[str, DesiredWorkerLabel] \| None` | A dictionary of desired worker labels that determine to which worker the task should be assigned. | `None` | | `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | | `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | -| `default_filters` | `list[DefaultFilter]` | A list of filters to create with the task is created. Note that this is a helper to allow you to create filters "declaratively" without needing to make a separate API call once the task is created to create them. | `[]` | +| `default_filters` | `list[DefaultFilter] \| None` | A list of filters to create with the task is created. Note that this is a helper to allow you to create filters "declaratively" without needing to make a separate API call once the task is created to create them. | `None` | Returns: @@ -156,9 +156,9 @@ Parameters: | ----------------------- | -------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | | `name` | `str \| None` | The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator. | `None` | | `description` | `str \| None` | An optional description for the task. | `None` | -| `input_validator` | `Type[TWorkflowInput] \| None` | A Pydantic model to use as a validator for the input to the task. If no validator is provided, defaults to an `EmptyModel`. | `None` | -| `on_events` | `list[str]` | A list of event triggers for the task - events which cause the task to be run. | `[]` | -| `on_crons` | `list[str]` | A list of cron triggers for the task. | `[]` | +| `input_validator` | `type[TWorkflowInput] \| None` | A Pydantic model to use as a validator for the input to the task. If no validator is provided, defaults to an `EmptyModel`. | `None` | +| `on_events` | `list[str] \| None` | A list of event triggers for the task - events which cause the task to be run. | `None` | +| `on_crons` | `list[str] \| None` | A list of cron triggers for the task. | `None` | | `version` | `str \| None` | A version for the task. | `None` | | `sticky` | `StickyStrategy \| None` | A sticky strategy for the task. | `None` | | `default_priority` | `int` | The priority of the task. Higher values will cause this task to have priority in scheduling. | `1` | @@ -166,11 +166,11 @@ Parameters: | `schedule_timeout` | `Duration` | The maximum time allowed for scheduling the task. | `timedelta(minutes=5)` | | `execution_timeout` | `Duration` | The maximum time allowed for executing the task. | `timedelta(seconds=60)` | | `retries` | `int` | The number of times to retry the task before failing. | `0` | -| `rate_limits` | `list[RateLimit]` | A list of rate limit configurations for the task. | `[]` | -| `desired_worker_labels` | `dict[str, DesiredWorkerLabel]` | A dictionary of desired worker labels that determine to which worker the task should be assigned. | `{}` | +| `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the task. | `None` | +| `desired_worker_labels` | `dict[str, DesiredWorkerLabel] \| None` | A dictionary of desired worker labels that determine to which worker the task should be assigned. | `None` | | `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | | `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | -| `default_filters` | `list[DefaultFilter]` | A list of filters to create with the task is created. Note that this is a helper to allow you to create filters "declaratively" without needing to make a separate API call once the task is created to create them. | `[]` | +| `default_filters` | `list[DefaultFilter] \| None` | A list of filters to create with the task is created. Note that this is a helper to allow you to create filters "declaratively" without needing to make a separate API call once the task is created to create them. | `None` | Returns: diff --git a/frontend/docs/pages/sdks/python/context.mdx b/frontend/docs/pages/sdks/python/context.mdx new file mode 100644 index 000000000..9b161b919 --- /dev/null +++ b/frontend/docs/pages/sdks/python/context.mdx @@ -0,0 +1,330 @@ +# Context + +The Hatchet Context class provides helper methods and useful data to tasks at runtime. It is passed as the second argument to all tasks and durable tasks. + +There are two types of context classes you'll encounter: + +- `Context`: The standard context for regular tasks with methods for logging, task output retrieval, cancellation, and more. +- `DurableContext`: An extended context for durable tasks that includes additional methods for durable execution like `aio_wait_for` and `aio_sleep_for`. + +## Context + +### Methods + +| Name | Description | +| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `was_skipped` | Check if a given task was skipped. You can read about skipping in [the docs](../../home/conditional-workflows#skip_if). | +| `task_output` | Get the output of a parent task in a DAG. | +| `cancel` | Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True. | +| `aio_cancel` | Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True. | +| `done` | Check if the current task run has been cancelled. | +| `log` | Log a line to the Hatchet API. This will send the log line to the Hatchet API and return immediately. | +| `release_slot` | Manually release the slot for the current step run to free up a slot on the worker. Note that this is an advanced feature and should be used with caution. | +| `put_stream` | Put a stream event to the Hatchet API. This will send the data to the Hatchet API and return immediately. You can then subscribe to the stream from a separate consumer. | +| `refresh_timeout` | Refresh the timeout for the current task run. You can read about refreshing timeouts in [the docs](../../home/timeouts#refreshing-timeouts). | +| `fetch_task_run_error` | A helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run. | + +### Attributes + +#### `was_triggered_by_event` + +A property that indicates whether the workflow was triggered by an event. + +Returns: + +| Type | Description | +| ------ | ---------------------------------------------------------------- | +| `bool` | True if the workflow was triggered by an event, False otherwise. | + +#### `workflow_input` + +The input to the workflow, as a dictionary. It's recommended to use the `input` parameter to the task (the first argument passed into the task at runtime) instead of this property. + +Returns: + +| Type | Description | +| ------------------------- | -------------------------- | +| `JSONSerializableMapping` | The input to the workflow. | + +#### `lifespan` + +The worker lifespan, if it exists. You can read about lifespans in [the docs](../../home/lifespans). + +**Note: You'll need to cast the return type of this property to the type returned by your lifespan generator.** + +#### `workflow_run_id` + +The id of the current workflow run. + +Returns: + +| Type | Description | +| ----- | ----------------------------------- | +| `str` | The id of the current workflow run. | + +#### `retry_count` + +The retry count of the current task run, which corresponds to the number of times the task has been retried. + +Returns: + +| Type | Description | +| ----- | ---------------------------------------- | +| `int` | The retry count of the current task run. | + +#### `attempt_number` + +The attempt number of the current task run, which corresponds to the number of times the task has been attempted, including the initial attempt. This is one more than the retry count. + +Returns: + +| Type | Description | +| ----- | ------------------------------------------- | +| `int` | The attempt number of the current task run. | + +#### `additional_metadata` + +The additional metadata sent with the current task run. + +Returns: + +| Type | Description | +| --------------------------------- | --------------------------------------------------------------------------------------------------- | +| `JSONSerializableMapping \| None` | The additional metadata sent with the current task run, or None if no additional metadata was sent. | + +#### `parent_workflow_run_id` + +The parent workflow run id of the current task run, if it exists. This is useful for knowing which workflow run spawned this run as a child. + +Returns: + +| Type | Description | +| ------------- | --------------------------------------------------------------------------------- | +| `str \| None` | The parent workflow run id of the current task run, or None if it does not exist. | + +#### `priority` + +The priority that the current task was run with. + +Returns: + +| Type | Description | +| ------------- | --------------------------------------------------------------------- | +| `int \| None` | The priority of the current task run, or None if no priority was set. | + +#### `workflow_id` + +The id of the workflow that this task belongs to. + +Returns: + +| Type | Description | +| ------------- | ------------------------------------------------- | +| `str \| None` | The id of the workflow that this task belongs to. | + +#### `workflow_version_id` + +The id of the workflow version that this task belongs to. + +Returns: + +| Type | Description | +| ------------- | --------------------------------------------------------- | +| `str \| None` | The id of the workflow version that this task belongs to. | + +#### `task_run_errors` + +A helper intended to be used in an on-failure step to retrieve the errors that occurred in upstream task runs. + +Returns: + +| Type | Description | +| ---------------- | -------------------------------------------------------- | +| `dict[str, str]` | A dictionary mapping task names to their error messages. | + +### Functions + +#### `was_skipped` + +Check if a given task was skipped. You can read about skipping in [the docs](../../home/conditional-workflows#skip_if). + +Parameters: + +| Name | Type | Description | Default | +| ------ | ------------------------- | ------------------------------------------------- | ---------- | +| `task` | `Task[TWorkflowInput, R]` | The task to check the status of (skipped or not). | _required_ | + +Returns: + +| Type | Description | +| ------ | ---------------------------------------------- | +| `bool` | True if the task was skipped, False otherwise. | + +#### `task_output` + +Get the output of a parent task in a DAG. + +Parameters: + +| Name | Type | Description | Default | +| ------ | ------------------------- | ------------------------------------------- | ---------- | +| `task` | `Task[TWorkflowInput, R]` | The task whose output you want to retrieve. | _required_ | + +Returns: + +| Type | Description | +| ---- | ----------------------------------------------------------------------- | +| `R` | The output of the parent task, validated against the task's validators. | + +Raises: + +| Type | Description | +| ------------ | ------------------------------------------------------------------------ | +| `ValueError` | If the task was skipped or if the step output for the task is not found. | + +#### `cancel` + +Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True. + +Returns: + +| Type | Description | +| ------ | ----------- | +| `None` | None | + +#### `aio_cancel` + +Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True. + +Returns: + +| Type | Description | +| ------ | ----------- | +| `None` | None | + +#### `done` + +Check if the current task run has been cancelled. + +Returns: + +| Type | Description | +| ------ | --------------------------------------------------------- | +| `bool` | True if the task run has been cancelled, False otherwise. | + +#### `log` + +Log a line to the Hatchet API. This will send the log line to the Hatchet API and return immediately. + +Parameters: + +| Name | Type | Description | Default | +| ---------------- | -------------------------------- | --------------------------------------------------------------------- | ---------- | +| `line` | `str \| JSONSerializableMapping` | The line to log. Can be a string or a JSON serializable mapping. | _required_ | +| `raise_on_error` | `bool` | If True, will raise an exception if the log fails. Defaults to False. | `False` | + +Returns: + +| Type | Description | +| ------ | ----------- | +| `None` | None | + +#### `release_slot` + +Manually release the slot for the current step run to free up a slot on the worker. Note that this is an advanced feature and should be used with caution. + +Returns: + +| Type | Description | +| ------ | ----------- | +| `None` | None | + +#### `put_stream` + +Put a stream event to the Hatchet API. This will send the data to the Hatchet API and return immediately. You can then subscribe to the stream from a separate consumer. + +Parameters: + +| Name | Type | Description | Default | +| ------ | -------------- | -------------------------------------------------------------- | ---------- | +| `data` | `str \| bytes` | The data to send to the Hatchet API. Can be a string or bytes. | _required_ | + +Returns: + +| Type | Description | +| ------ | ----------- | +| `None` | None | + +#### `refresh_timeout` + +Refresh the timeout for the current task run. You can read about refreshing timeouts in [the docs](../../home/timeouts#refreshing-timeouts). + +Parameters: + +| Name | Type | Description | Default | +| -------------- | ------------------ | -------------------------------------------------------------------------------------------------- | ---------- | +| `increment_by` | `str \| timedelta` | The amount of time to increment the timeout by. Can be a string (e.g. "5m") or a timedelta object. | _required_ | + +Returns: + +| Type | Description | +| ------ | ----------- | +| `None` | None | + +#### `fetch_task_run_error` + +A helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run. + +Parameters: + +| Name | Type | Description | Default | +| ------ | ------------------------- | ------------------------------------------ | ---------- | +| `task` | `Task[TWorkflowInput, R]` | The task whose error you want to retrieve. | _required_ | + +Returns: + +| Type | Description | +| ------------- | ---------------------------------------------------------------- | +| `str \| None` | The error message of the task run, or None if no error occurred. | + +## DurableContext + +Bases: `Context` + +### Methods + +| Name | Description | +| --------------- | -------------------------------------------------------------------------------------------------------------------------- | +| `aio_wait_for` | Durably wait for either a sleep or an event. | +| `aio_sleep_for` | Lightweight wrapper for durable sleep. Allows for shorthand usage of `ctx.aio_wait_for` when specifying a sleep condition. | + +### Functions + +#### `aio_wait_for` + +Durably wait for either a sleep or an event. + +Parameters: + +| Name | Type | Description | Default | +| ------------- | -------------------------------------- | -------------------------------------------------------------------------------------------- | ---------- | +| `signal_key` | `str` | The key to use for the durable event. This is used to identify the event in the Hatchet API. | _required_ | +| `*conditions` | `SleepCondition \| UserEventCondition` | The conditions to wait for. Can be a SleepCondition or UserEventCondition. | `()` | + +Returns: + +| Type | Description | +| ---------------- | ------------------------------------------------ | +| `dict[str, Any]` | A dictionary containing the results of the wait. | + +Raises: + +| Type | Description | +| ------------ | ----------------------------------------------- | +| `ValueError` | If the durable event listener is not available. | + +#### `aio_sleep_for` + +Lightweight wrapper for durable sleep. Allows for shorthand usage of `ctx.aio_wait_for` when specifying a sleep condition. + +For more complicated conditions, use `ctx.aio_wait_for` directly. diff --git a/frontend/docs/pages/sdks/python/feature-clients/filters.mdx b/frontend/docs/pages/sdks/python/feature-clients/filters.mdx index c1a001cf9..f5eac72cc 100644 --- a/frontend/docs/pages/sdks/python/feature-clients/filters.mdx +++ b/frontend/docs/pages/sdks/python/feature-clients/filters.mdx @@ -27,12 +27,12 @@ Create a new filter. Parameters: -| Name | Type | Description | Default | -| ------------- | ------------------------- | ---------------------------------------------------- | ---------- | -| `workflow_id` | `str` | The ID of the workflow to associate with the filter. | _required_ | -| `expression` | `str` | The expression to evaluate for the filter. | _required_ | -| `scope` | `str` | The scope for the filter. | _required_ | -| `payload` | `JSONSerializableMapping` | The payload to send with the filter. | `{}` | +| Name | Type | Description | Default | +| ------------- | --------------------------------- | ---------------------------------------------------- | ---------- | +| `workflow_id` | `str` | The ID of the workflow to associate with the filter. | _required_ | +| `expression` | `str` | The expression to evaluate for the filter. | _required_ | +| `scope` | `str` | The scope for the filter. | _required_ | +| `payload` | `JSONSerializableMapping \| None` | The payload to send with the filter. | `None` | Returns: @@ -113,12 +113,12 @@ Create a new filter. Parameters: -| Name | Type | Description | Default | -| ------------- | ------------------------- | ---------------------------------------------------- | ---------- | -| `workflow_id` | `str` | The ID of the workflow to associate with the filter. | _required_ | -| `expression` | `str` | The expression to evaluate for the filter. | _required_ | -| `scope` | `str` | The scope for the filter. | _required_ | -| `payload` | `JSONSerializableMapping` | The payload to send with the filter. | `{}` | +| Name | Type | Description | Default | +| ------------- | --------------------------------- | ---------------------------------------------------- | ---------- | +| `workflow_id` | `str` | The ID of the workflow to associate with the filter. | _required_ | +| `expression` | `str` | The expression to evaluate for the filter. | _required_ | +| `scope` | `str` | The scope for the filter. | _required_ | +| `payload` | `JSONSerializableMapping \| None` | The payload to send with the filter. | `None` | Returns: diff --git a/frontend/docs/pages/sdks/python/feature-clients/runs.mdx b/frontend/docs/pages/sdks/python/feature-clients/runs.mdx index 9b80883e1..7de5e6db6 100644 --- a/frontend/docs/pages/sdks/python/feature-clients/runs.mdx +++ b/frontend/docs/pages/sdks/python/feature-clients/runs.mdx @@ -154,12 +154,12 @@ IMPORTANT: It's preferable to use `Workflow.run` (and similar) to trigger workfl Parameters: -| Name | Type | Description | Default | -| --------------------- | ------------------------- | ----------------------------------------------------- | ---------- | -| `workflow_name` | `str` | The name of the workflow to trigger. | _required_ | -| `input` | `JSONSerializableMapping` | The input data for the workflow run. | _required_ | -| `additional_metadata` | `JSONSerializableMapping` | Additional metadata associated with the workflow run. | `{}` | -| `priority` | `int \| None` | The priority of the workflow run. | `None` | +| Name | Type | Description | Default | +| --------------------- | --------------------------------- | ----------------------------------------------------- | ---------- | +| `workflow_name` | `str` | The name of the workflow to trigger. | _required_ | +| `input` | `JSONSerializableMapping` | The input data for the workflow run. | _required_ | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata associated with the workflow run. | `None` | +| `priority` | `int \| None` | The priority of the workflow run. | `None` | Returns: @@ -175,12 +175,12 @@ IMPORTANT: It's preferable to use `Workflow.run` (and similar) to trigger workfl Parameters: -| Name | Type | Description | Default | -| --------------------- | ------------------------- | ----------------------------------------------------- | ---------- | -| `workflow_name` | `str` | The name of the workflow to trigger. | _required_ | -| `input` | `JSONSerializableMapping` | The input data for the workflow run. | _required_ | -| `additional_metadata` | `JSONSerializableMapping` | Additional metadata associated with the workflow run. | `{}` | -| `priority` | `int \| None` | The priority of the workflow run. | `None` | +| Name | Type | Description | Default | +| --------------------- | --------------------------------- | ----------------------------------------------------- | ---------- | +| `workflow_name` | `str` | The name of the workflow to trigger. | _required_ | +| `input` | `JSONSerializableMapping` | The input data for the workflow run. | _required_ | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata associated with the workflow run. | `None` | +| `priority` | `int \| None` | The priority of the workflow run. | `None` | Returns: diff --git a/frontend/docs/pages/sdks/python/feature-clients/scheduled.mdx b/frontend/docs/pages/sdks/python/feature-clients/scheduled.mdx index ddfd187eb..8146aa9ce 100644 --- a/frontend/docs/pages/sdks/python/feature-clients/scheduled.mdx +++ b/frontend/docs/pages/sdks/python/feature-clients/scheduled.mdx @@ -78,16 +78,16 @@ Retrieves a list of scheduled workflows based on provided filters. Parameters: -| Name | Type | Description | Default | -| ------------------------ | ------------------------------------------ | ---------------------------------------------------- | ------- | -| `offset` | `int \| None` | The offset to use in pagination. | `None` | -| `limit` | `int \| None` | The maximum number of scheduled workflows to return. | `None` | -| `workflow_id` | `str \| None` | The ID of the workflow to filter by. | `None` | -| `parent_workflow_run_id` | `str \| None` | The ID of the parent workflow run to filter by. | `None` | -| `statuses` | `list[ScheduledRunStatus] \| None` | A list of statuses to filter by. | `None` | -| `additional_metadata` | `Optional[JSONSerializableMapping]` | Additional metadata to filter by. | `None` | -| `order_by_field` | `Optional[ScheduledWorkflowsOrderByField]` | The field to order the results by. | `None` | -| `order_by_direction` | `Optional[WorkflowRunOrderByDirection]` | The direction to order the results by. | `None` | +| Name | Type | Description | Default | +| ------------------------ | ---------------------------------------- | ---------------------------------------------------- | ------- | +| `offset` | `int \| None` | The offset to use in pagination. | `None` | +| `limit` | `int \| None` | The maximum number of scheduled workflows to return. | `None` | +| `workflow_id` | `str \| None` | The ID of the workflow to filter by. | `None` | +| `parent_workflow_run_id` | `str \| None` | The ID of the parent workflow run to filter by. | `None` | +| `statuses` | `list[ScheduledRunStatus] \| None` | A list of statuses to filter by. | `None` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata to filter by. | `None` | +| `order_by_field` | `ScheduledWorkflowsOrderByField \| None` | The field to order the results by. | `None` | +| `order_by_direction` | `WorkflowRunOrderByDirection \| None` | The direction to order the results by. | `None` | Returns: @@ -154,16 +154,16 @@ Retrieves a list of scheduled workflows based on provided filters. Parameters: -| Name | Type | Description | Default | -| ------------------------ | ------------------------------------------ | ---------------------------------------------------- | ------- | -| `offset` | `int \| None` | The offset to use in pagination. | `None` | -| `limit` | `int \| None` | The maximum number of scheduled workflows to return. | `None` | -| `workflow_id` | `str \| None` | The ID of the workflow to filter by. | `None` | -| `parent_workflow_run_id` | `str \| None` | The ID of the parent workflow run to filter by. | `None` | -| `statuses` | `list[ScheduledRunStatus] \| None` | A list of statuses to filter by. | `None` | -| `additional_metadata` | `Optional[JSONSerializableMapping]` | Additional metadata to filter by. | `None` | -| `order_by_field` | `Optional[ScheduledWorkflowsOrderByField]` | The field to order the results by. | `None` | -| `order_by_direction` | `Optional[WorkflowRunOrderByDirection]` | The direction to order the results by. | `None` | +| Name | Type | Description | Default | +| ------------------------ | ---------------------------------------- | ---------------------------------------------------- | ------- | +| `offset` | `int \| None` | The offset to use in pagination. | `None` | +| `limit` | `int \| None` | The maximum number of scheduled workflows to return. | `None` | +| `workflow_id` | `str \| None` | The ID of the workflow to filter by. | `None` | +| `parent_workflow_run_id` | `str \| None` | The ID of the parent workflow run to filter by. | `None` | +| `statuses` | `list[ScheduledRunStatus] \| None` | A list of statuses to filter by. | `None` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata to filter by. | `None` | +| `order_by_field` | `ScheduledWorkflowsOrderByField \| None` | The field to order the results by. | `None` | +| `order_by_direction` | `WorkflowRunOrderByDirection \| None` | The direction to order the results by. | `None` | Returns: diff --git a/frontend/docs/pages/sdks/python/runnables.mdx b/frontend/docs/pages/sdks/python/runnables.mdx index f72bc03f3..84934dcaf 100644 --- a/frontend/docs/pages/sdks/python/runnables.mdx +++ b/frontend/docs/pages/sdks/python/runnables.mdx @@ -44,29 +44,29 @@ Tasks within workflows can be defined with `@workflow.task()` or `@workflow.dura Methods: -| Name | Description | -| ---------------------- | -------------------------------------------------------------------------------------------------------------------------- | -| `task` | A decorator to transform a function into a Hatchet task that runs as part of a workflow. | -| `durable_task` | A decorator to transform a function into a durable Hatchet task that runs as part of a workflow. | -| `on_failure_task` | A decorator to transform a function into a Hatchet on-failure task that runs as the last step in a workflow with failures. | -| `on_success_task` | A decorator to transform a function into a Hatchet on-success task that runs as the last step in a successful workflow. | -| `run` | Run the workflow synchronously and wait for it to complete. | -| `aio_run` | Run the workflow asynchronously and wait for it to complete. | -| `run_no_wait` | Synchronously trigger a workflow run without waiting for it to complete. | -| `aio_run_no_wait` | Asynchronously trigger a workflow run without waiting for it to complete. | -| `run_many` | Run a workflow in bulk and wait for all runs to complete. | -| `aio_run_many` | Run a workflow in bulk and wait for all runs to complete. | -| `run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | -| `aio_run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | -| `schedule` | Schedule a workflow to run at a specific time. | -| `aio_schedule` | Schedule a workflow to run at a specific time. | -| `create_cron` | Create a cron job for the workflow. | -| `aio_create_cron` | Create a cron job for the workflow. | -| `create_bulk_run_item` | Create a bulk run item for the workflow. Intended for use with `run_many` methods. | -| `list_runs` | List runs of the workflow. | -| `aio_list_runs` | List runs of the workflow. | -| `create_filter` | Create a new filter. | -| `aio_create_filter` | Create a new filter. | +| Name | Description | +| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `task` | A decorator to transform a function into a Hatchet task that runs as part of a workflow. | +| `durable_task` | A decorator to transform a function into a durable Hatchet task that runs as part of a workflow. | +| `on_failure_task` | A decorator to transform a function into a Hatchet on-failure task that runs as the last step in a workflow that had at least one task fail. | +| `on_success_task` | A decorator to transform a function into a Hatchet on-success task that runs as the last step in a workflow that had all upstream tasks succeed. | +| `run` | Run the workflow synchronously and wait for it to complete. | +| `aio_run` | Run the workflow asynchronously and wait for it to complete. | +| `run_no_wait` | Synchronously trigger a workflow run without waiting for it to complete. | +| `aio_run_no_wait` | Asynchronously trigger a workflow run without waiting for it to complete. | +| `run_many` | Run a workflow in bulk and wait for all runs to complete. | +| `aio_run_many` | Run a workflow in bulk and wait for all runs to complete. | +| `run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | +| `aio_run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | +| `schedule` | Schedule a workflow to run at a specific time. | +| `aio_schedule` | Schedule a workflow to run at a specific time. | +| `create_cron` | Create a cron job for the workflow. | +| `aio_create_cron` | Create a cron job for the workflow. | +| `create_bulk_run_item` | Create a bulk run item for the workflow. This is intended to be used in conjunction with the various `run_many` methods. | +| `list_runs` | List runs of the workflow. | +| `aio_list_runs` | List runs of the workflow. | +| `create_filter` | Create a new filter. | +| `aio_create_filter` | Create a new filter. | ### Attributes @@ -100,21 +100,21 @@ A decorator to transform a function into a Hatchet task that runs as part of a w Parameters: -| Name | Type | Description | Default | -| ----------------------- | --------------------------------- | ---------------------------------------------------------------------- | ----------------------- | -| `name` | `str \| None` | The name of the task. Defaults to the name of the function. | `None` | -| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. | `timedelta(minutes=5)` | -| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. | `timedelta(seconds=60)` | -| `parents` | `list[Task[TWorkflowInput, Any]]` | A list of tasks that are parents of the task. | `[]` | -| `retries` | `int` | The number of times to retry the task before failing. | `0` | -| `rate_limits` | `list[RateLimit]` | A list of rate limit configurations for the task. | `[]` | -| `desired_worker_labels` | `dict[str, DesiredWorkerLabel]` | A dictionary determining worker assignment. | `{}` | -| `backoff_factor` | `float \| None` | The backoff factor for exponential backoff in retries. | `None` | -| `backoff_max_seconds` | `int \| None` | The maximum number of seconds for retries with exponential backoff. | `None` | -| `concurrency` | `list[ConcurrencyExpression]` | A list of concurrency expressions for the task. | `[]` | -| `wait_for` | `list[Condition \| OrGroup]` | A list of conditions that must be met before the task can run. | `[]` | -| `skip_if` | `list[Condition \| OrGroup]` | A list of conditions that, if met, will cause the task to be skipped. | `[]` | -| `cancel_if` | `list[Condition \| OrGroup]` | A list of conditions that, if met, will cause the task to be canceled. | `[]` | +| Name | Type | Description | Default | +| ----------------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `name` | `str \| None` | The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator. | `None` | +| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time. | `timedelta(minutes=5)` | +| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time. | `timedelta(seconds=60)` | +| `parents` | `list[Task[TWorkflowInput, Any]] \| None` | A list of tasks that are parents of the task. Note: Parents must be defined before their children. | `None` | +| `retries` | `int` | The number of times to retry the task before failing. | `0` | +| `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the task. | `None` | +| `desired_worker_labels` | `dict[str, DesiredWorkerLabel] \| None` | A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details. | `None` | +| `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | +| `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | +| `concurrency` | `list[ConcurrencyExpression] \| None` | A list of concurrency expressions for the task. | `None` | +| `wait_for` | `list[Condition \| OrGroup] \| None` | A list of conditions that must be met before the task can run. | `None` | +| `skip_if` | `list[Condition \| OrGroup] \| None` | A list of conditions that, if met, will cause the task to be skipped. | `None` | +| `cancel_if` | `list[Condition \| OrGroup] \| None` | A list of conditions that, if met, will cause the task to be canceled. | `None` | Returns: @@ -126,25 +126,27 @@ Returns: A decorator to transform a function into a durable Hatchet task that runs as part of a workflow. -**IMPORTANT:** This decorator creates a _durable_ task, which works using Hatchet's durable execution capabilities. +**IMPORTANT:** This decorator creates a _durable_ task, which works using Hatchet's durable execution capabilities. This is an advanced feature of Hatchet. + +See the Hatchet docs for more information on durable execution to decide if this is right for you. Parameters: -| Name | Type | Description | Default | -| ----------------------- | --------------------------------- | ---------------------------------------------------------------------- | ----------------------- | -| `name` | `str \| None` | The name of the task. Defaults to the name of the function. | `None` | -| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. | `timedelta(minutes=5)` | -| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. | `timedelta(seconds=60)` | -| `parents` | `list[Task[TWorkflowInput, Any]]` | A list of tasks that are parents of the task. | `[]` | -| `retries` | `int` | The number of times to retry the task before failing. | `0` | -| `rate_limits` | `list[RateLimit]` | A list of rate limit configurations for the task. | `[]` | -| `desired_worker_labels` | `dict[str, DesiredWorkerLabel]` | A dictionary determining worker assignment. | `{}` | -| `backoff_factor` | `float \| None` | The backoff factor for exponential backoff in retries. | `None` | -| `backoff_max_seconds` | `int \| None` | The maximum number of seconds for retries with exponential backoff. | `None` | -| `concurrency` | `list[ConcurrencyExpression]` | A list of concurrency expressions for the task. | `[]` | -| `wait_for` | `list[Condition \| OrGroup]` | A list of conditions that must be met before the task can run. | `[]` | -| `skip_if` | `list[Condition \| OrGroup]` | A list of conditions that, if met, will cause the task to be skipped. | `[]` | -| `cancel_if` | `list[Condition \| OrGroup]` | A list of conditions that, if met, will cause the task to be canceled. | `[]` | +| Name | Type | Description | Default | +| ----------------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `name` | `str \| None` | The name of the task. If not specified, defaults to the name of the function being wrapped by the `task` decorator. | `None` | +| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time. | `timedelta(minutes=5)` | +| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time. | `timedelta(seconds=60)` | +| `parents` | `list[Task[TWorkflowInput, Any]] \| None` | A list of tasks that are parents of the task. Note: Parents must be defined before their children. | `None` | +| `retries` | `int` | The number of times to retry the task before failing. | `0` | +| `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the task. | `None` | +| `desired_worker_labels` | `dict[str, DesiredWorkerLabel] \| None` | A dictionary of desired worker labels that determine to which worker the task should be assigned. See documentation and examples on affinity and worker labels for more details. | `None` | +| `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | +| `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | +| `concurrency` | `list[ConcurrencyExpression] \| None` | A list of concurrency expressions for the task. | `None` | +| `wait_for` | `list[Condition \| OrGroup] \| None` | A list of conditions that must be met before the task can run. | `None` | +| `skip_if` | `list[Condition \| OrGroup] \| None` | A list of conditions that, if met, will cause the task to be skipped. | `None` | +| `cancel_if` | `list[Condition \| OrGroup] \| None` | A list of conditions that, if met, will cause the task to be canceled. | `None` | Returns: @@ -158,16 +160,16 @@ A decorator to transform a function into a Hatchet on-failure task that runs as Parameters: -| Name | Type | Description | Default | -| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------ | ----------------------- | -| `name` | `str \| None` | The name of the on-failure task. Defaults to the name of the function. | `None` | -| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. | `timedelta(minutes=5)` | -| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. | `timedelta(seconds=60)` | -| `retries` | `int` | The number of times to retry the on-failure task before failing. | `0` | -| `rate_limits` | `list[RateLimit]` | A list of rate limit configurations for the on-failure task. | `[]` | -| `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | -| `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | -| `concurrency` | `list[ConcurrencyExpression]` | A list of concurrency expressions for the on-success task. | `[]` | +| Name | Type | Description | Default | +| --------------------- | ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `name` | `str \| None` | The name of the on-failure task. If not specified, defaults to the name of the function being wrapped by the `on_failure_task` decorator. | `None` | +| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time. | `timedelta(minutes=5)` | +| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time. | `timedelta(seconds=60)` | +| `retries` | `int` | The number of times to retry the on-failure task before failing. | `0` | +| `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the on-failure task. | `None` | +| `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | +| `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | +| `concurrency` | `list[ConcurrencyExpression] \| None` | A list of concurrency expressions for the on-success task. | `None` | Returns: @@ -181,16 +183,16 @@ A decorator to transform a function into a Hatchet on-success task that runs as Parameters: -| Name | Type | Description | Default | -| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------ | ----------------------- | -| `name` | `str \| None` | The name of the on-success task. Defaults to the name of the function. | `None` | -| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. | `timedelta(minutes=5)` | -| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. | `timedelta(seconds=60)` | -| `retries` | `int` | The number of times to retry the on-success task before failing. | `0` | -| `rate_limits` | `list[RateLimit]` | A list of rate limit configurations for the on-success task. | `[]` | -| `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | -| `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | -| `concurrency` | `list[ConcurrencyExpression]` | A list of concurrency expressions for the on-success task. | `[]` | +| Name | Type | Description | Default | +| --------------------- | ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `name` | `str \| None` | The name of the on-success task. If not specified, defaults to the name of the function being wrapped by the `on_success_task` decorator. | `None` | +| `schedule_timeout` | `Duration` | The maximum time to wait for the task to be scheduled. The run will be canceled if the task does not begin within this time. | `timedelta(minutes=5)` | +| `execution_timeout` | `Duration` | The maximum time to wait for the task to complete. The run will be canceled if the task does not complete within this time. | `timedelta(seconds=60)` | +| `retries` | `int` | The number of times to retry the on-success task before failing | `0` | +| `rate_limits` | `list[RateLimit] \| None` | A list of rate limit configurations for the on-success task. | `None` | +| `backoff_factor` | `float \| None` | The backoff factor for controlling exponential backoff in retries. | `None` | +| `backoff_max_seconds` | `int \| None` | The maximum number of seconds to allow retries with exponential backoff to continue. | `None` | +| `concurrency` | `list[ConcurrencyExpression] \| None` | A list of concurrency expressions for the on-success task. | `None` | Returns: @@ -221,7 +223,7 @@ Returns: Run the workflow asynchronously and wait for it to complete. -This method triggers a workflow run, blocks until completion, and returns the final result. +This method triggers a workflow run, awaits until completion, and returns the final result. Parameters: @@ -304,7 +306,9 @@ Returns: #### `run_many_no_wait` -Run a workflow in bulk without waiting for all runs to complete. This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. +Run a workflow in bulk without waiting for all runs to complete. + +This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. Parameters: @@ -320,7 +324,9 @@ Returns: #### `aio_run_many_no_wait` -Run a workflow in bulk without waiting for all runs to complete. This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. +Run a workflow in bulk without waiting for all runs to complete. + +This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. Parameters: @@ -376,13 +382,13 @@ Create a cron job for the workflow. Parameters: -| Name | Type | Description | Default | -| --------------------- | ------------------------- | ----------------------------------------------------------------- | ------------------------------------ | -| `cron_name` | `str` | The name of the cron job. | _required_ | -| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | -| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | -| `additional_metadata` | `JSONSerializableMapping` | Additional metadata for the cron job. | `{}` | -| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | +| Name | Type | Description | Default | +| --------------------- | --------------------------------- | ----------------------------------------------------------------- | ------------------------------------ | +| `cron_name` | `str` | The name of the cron job. | _required_ | +| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | +| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata for the cron job. | `None` | +| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | Returns: @@ -396,13 +402,13 @@ Create a cron job for the workflow. Parameters: -| Name | Type | Description | Default | -| --------------------- | ------------------------- | ----------------------------------------------------------------- | ------------------------------------ | -| `cron_name` | `str` | The name of the cron job. | _required_ | -| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | -| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | -| `additional_metadata` | `JSONSerializableMapping` | Additional metadata for the cron job. | `{}` | -| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | +| Name | Type | Description | Default | +| --------------------- | --------------------------------- | ----------------------------------------------------------------- | ------------------------------------ | +| `cron_name` | `str` | The name of the cron job. | _required_ | +| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | +| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata for the cron job. | `None` | +| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | Returns: @@ -416,17 +422,17 @@ Create a bulk run item for the workflow. This is intended to be used in conjunct Parameters: -| Name | Type | Description | Default | -| --------- | ------------------------ | ---------------------------------------- | ------------------------------------ | -| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | -| `key` | `str \| None` | The key for the workflow run. | `None` | -| `options` | `TriggerWorkflowOptions` | Additional options for the workflow run. | `TriggerWorkflowOptions()` | +| Name | Type | Description | Default | +| --------- | ------------------------ | ----------------------------------------------------------------------------------------------------------- | ------------------------------------ | +| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | +| `key` | `str \| None` | The key for the workflow run. This is used to identify the run in the bulk operation and for deduplication. | `None` | +| `options` | `TriggerWorkflowOptions` | Additional options for the workflow run. | `TriggerWorkflowOptions()` | Returns: -| Type | Description | -| -------------------------- | -------------------------------------------------------------------------------------------- | -| `WorkflowRunTriggerConfig` | A `WorkflowRunTriggerConfig` object to trigger the workflow run, used in `run_many` methods. | +| Type | Description | +| -------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `WorkflowRunTriggerConfig` | A `WorkflowRunTriggerConfig` object that can be used to trigger the workflow run, which you then pass into the `run_many` methods. | #### `list_runs` @@ -484,11 +490,11 @@ Create a new filter. Parameters: -| Name | Type | Description | Default | -| ------------ | ------------------------- | ------------------------------------------ | ---------- | -| `expression` | `str` | The expression to evaluate for the filter. | _required_ | -| `scope` | `str` | The scope for the filter. | _required_ | -| `payload` | `JSONSerializableMapping` | The payload to send with the filter. | `{}` | +| Name | Type | Description | Default | +| ------------ | --------------------------------- | ------------------------------------------ | ---------- | +| `expression` | `str` | The expression to evaluate for the filter. | _required_ | +| `scope` | `str` | The scope for the filter. | _required_ | +| `payload` | `JSONSerializableMapping \| None` | The payload to send with the filter. | `None` | Returns: @@ -502,11 +508,11 @@ Create a new filter. Parameters: -| Name | Type | Description | Default | -| ------------ | ------------------------- | ------------------------------------------ | ---------- | -| `expression` | `str` | The expression to evaluate for the filter. | _required_ | -| `scope` | `str` | The scope for the filter. | _required_ | -| `payload` | `JSONSerializableMapping` | The payload to send with the filter. | `{}` | +| Name | Type | Description | Default | +| ------------ | --------------------------------- | ------------------------------------------ | ---------- | +| `expression` | `str` | The expression to evaluate for the filter. | _required_ | +| `scope` | `str` | The scope for the filter. | _required_ | +| `payload` | `JSONSerializableMapping \| None` | The payload to send with the filter. | `None` | Returns: @@ -520,31 +526,33 @@ Bases: `BaseWorkflow[TWorkflowInput]`, `Generic[TWorkflowInput, R]` Methods: -| Name | Description | -| ---------------------- | ---------------------------------------------------------------------------------- | -| `run` | Synchronously trigger a workflow run without waiting for it to complete. | -| `aio_run` | Run the workflow asynchronously and wait for it to complete. | -| `run_no_wait` | Run the workflow synchronously and wait for it to complete. | -| `aio_run_no_wait` | Asynchronously trigger a workflow run without waiting for it to complete. | -| `run_many` | Run a workflow in bulk and wait for all runs to complete. | -| `aio_run_many` | Run a workflow in bulk and wait for all runs to complete. | -| `run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | -| `aio_run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | -| `schedule` | Schedule a workflow to run at a specific time. | -| `aio_schedule` | Schedule a workflow to run at a specific time. | -| `create_cron` | Create a cron job for the workflow. | -| `aio_create_cron` | Create a cron job for the workflow. | -| `create_bulk_run_item` | Create a bulk run item for the workflow. Intended for use with `run_many` methods. | -| `list_runs` | List runs of the workflow. | -| `aio_list_runs` | List runs of the workflow. | -| `create_filter` | Create a new filter. | -| `aio_create_filter` | Create a new filter. | +| Name | Description | +| ---------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `run` | Run the workflow synchronously and wait for it to complete. | +| `aio_run` | Run the workflow asynchronously and wait for it to complete. | +| `run_no_wait` | Trigger a workflow run without waiting for it to complete. | +| `aio_run_no_wait` | Asynchronously trigger a workflow run without waiting for it to complete. | +| `run_many` | Run a workflow in bulk and wait for all runs to complete. | +| `aio_run_many` | Run a workflow in bulk and wait for all runs to complete. | +| `run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | +| `aio_run_many_no_wait` | Run a workflow in bulk without waiting for all runs to complete. | +| `schedule` | Schedule a workflow to run at a specific time. | +| `aio_schedule` | Schedule a workflow to run at a specific time. | +| `create_cron` | Create a cron job for the workflow. | +| `aio_create_cron` | Create a cron job for the workflow. | +| `create_bulk_run_item` | Create a bulk run item for the workflow. This is intended to be used in conjunction with the various `run_many` methods. | +| `list_runs` | List runs of the workflow. | +| `aio_list_runs` | List runs of the workflow. | +| `create_filter` | Create a new filter. | +| `aio_create_filter` | Create a new filter. | ### Functions #### `run` -Synchronously trigger a workflow run without waiting for it to complete. This method is useful for starting a workflow run and immediately returning a reference to the run without blocking while the workflow runs. +Run the workflow synchronously and wait for it to complete. + +This method triggers a workflow run, blocks until completion, and returns the extracted result. Parameters: @@ -555,15 +563,15 @@ Parameters: Returns: -| Type | Description | -| ---- | ------------------------------------------------------------------------- | -| `R` | A `WorkflowRunRef` object representing the reference to the workflow run. | +| Type | Description | +| ---- | ----------------------------------------------- | +| `R` | The extracted result of the workflow execution. | #### `aio_run` Run the workflow asynchronously and wait for it to complete. -This method triggers a workflow run, blocks until completion, and returns the final result. +This method triggers a workflow run, awaits until completion, and returns the extracted result. Parameters: @@ -574,15 +582,15 @@ Parameters: Returns: -| Type | Description | -| ---- | ----------------------------------------------------- | -| `R` | The result of the workflow execution as a dictionary. | +| Type | Description | +| ---- | ----------------------------------------------- | +| `R` | The extracted result of the workflow execution. | #### `run_no_wait` -Run the workflow synchronously and wait for it to complete. +Trigger a workflow run without waiting for it to complete. -This method triggers a workflow run, blocks until completion, and returns the final result. +This method triggers a workflow run and immediately returns a reference to the run without blocking while the workflow runs. Parameters: @@ -593,9 +601,9 @@ Parameters: Returns: -| Type | Description | -| ------------------------------- | ----------------------------------------------------- | -| `TaskRunRef[TWorkflowInput, R]` | The result of the workflow execution as a dictionary. | +| Type | Description | +| ------------------------------- | --------------------------------------------------------------------- | +| `TaskRunRef[TWorkflowInput, R]` | A `TaskRunRef` object representing the reference to the workflow run. | #### `aio_run_no_wait` @@ -610,9 +618,9 @@ Parameters: Returns: -| Type | Description | -| ------------------------------- | ------------------------------------------------------------------------- | -| `TaskRunRef[TWorkflowInput, R]` | A `WorkflowRunRef` object representing the reference to the workflow run. | +| Type | Description | +| ------------------------------- | --------------------------------------------------------------------- | +| `TaskRunRef[TWorkflowInput, R]` | A `TaskRunRef` object representing the reference to the workflow run. | #### `run_many` @@ -648,7 +656,9 @@ Returns: #### `run_many_no_wait` -Run a workflow in bulk without waiting for all runs to complete. This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. +Run a workflow in bulk without waiting for all runs to complete. + +This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. Parameters: @@ -664,7 +674,9 @@ Returns: #### `aio_run_many_no_wait` -Run a workflow in bulk without waiting for all runs to complete. This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. +Run a workflow in bulk without waiting for all runs to complete. + +This method triggers multiple workflow runs and immediately returns a list of references to the runs without blocking while the workflows run. Parameters: @@ -720,13 +732,13 @@ Create a cron job for the workflow. Parameters: -| Name | Type | Description | Default | -| --------------------- | ------------------------- | ----------------------------------------------------------------- | ------------------------------------ | -| `cron_name` | `str` | The name of the cron job. | _required_ | -| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | -| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | -| `additional_metadata` | `JSONSerializableMapping` | Additional metadata for the cron job. | `{}` | -| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | +| Name | Type | Description | Default | +| --------------------- | --------------------------------- | ----------------------------------------------------------------- | ------------------------------------ | +| `cron_name` | `str` | The name of the cron job. | _required_ | +| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | +| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata for the cron job. | `None` | +| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | Returns: @@ -740,13 +752,13 @@ Create a cron job for the workflow. Parameters: -| Name | Type | Description | Default | -| --------------------- | ------------------------- | ----------------------------------------------------------------- | ------------------------------------ | -| `cron_name` | `str` | The name of the cron job. | _required_ | -| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | -| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | -| `additional_metadata` | `JSONSerializableMapping` | Additional metadata for the cron job. | `{}` | -| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | +| Name | Type | Description | Default | +| --------------------- | --------------------------------- | ----------------------------------------------------------------- | ------------------------------------ | +| `cron_name` | `str` | The name of the cron job. | _required_ | +| `expression` | `str` | The cron expression that defines the schedule for the cron job. | _required_ | +| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | +| `additional_metadata` | `JSONSerializableMapping \| None` | Additional metadata for the cron job. | `None` | +| `priority` | `int \| None` | The priority of the cron job. Must be between 1 and 3, inclusive. | `None` | Returns: @@ -760,17 +772,17 @@ Create a bulk run item for the workflow. This is intended to be used in conjunct Parameters: -| Name | Type | Description | Default | -| --------- | ------------------------ | ---------------------------------------- | ------------------------------------ | -| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | -| `key` | `str \| None` | The key for the workflow run. | `None` | -| `options` | `TriggerWorkflowOptions` | Additional options for the workflow run. | `TriggerWorkflowOptions()` | +| Name | Type | Description | Default | +| --------- | ------------------------ | ----------------------------------------------------------------------------------------------------------- | ------------------------------------ | +| `input` | `TWorkflowInput` | The input data for the workflow. | `cast(TWorkflowInput, EmptyModel())` | +| `key` | `str \| None` | The key for the workflow run. This is used to identify the run in the bulk operation and for deduplication. | `None` | +| `options` | `TriggerWorkflowOptions` | Additional options for the workflow run. | `TriggerWorkflowOptions()` | Returns: -| Type | Description | -| -------------------------- | -------------------------------------------------------------------------------------------- | -| `WorkflowRunTriggerConfig` | A `WorkflowRunTriggerConfig` object to trigger the workflow run, used in `run_many` methods. | +| Type | Description | +| -------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `WorkflowRunTriggerConfig` | A `WorkflowRunTriggerConfig` object that can be used to trigger the workflow run, which you then pass into the `run_many` methods. | #### `list_runs` @@ -828,11 +840,11 @@ Create a new filter. Parameters: -| Name | Type | Description | Default | -| ------------ | ------------------------- | ------------------------------------------ | ---------- | -| `expression` | `str` | The expression to evaluate for the filter. | _required_ | -| `scope` | `str` | The scope for the filter. | _required_ | -| `payload` | `JSONSerializableMapping` | The payload to send with the filter. | `{}` | +| Name | Type | Description | Default | +| ------------ | --------------------------------- | ------------------------------------------ | ---------- | +| `expression` | `str` | The expression to evaluate for the filter. | _required_ | +| `scope` | `str` | The scope for the filter. | _required_ | +| `payload` | `JSONSerializableMapping \| None` | The payload to send with the filter. | `None` | Returns: @@ -846,11 +858,11 @@ Create a new filter. Parameters: -| Name | Type | Description | Default | -| ------------ | ------------------------- | ------------------------------------------ | ---------- | -| `expression` | `str` | The expression to evaluate for the filter. | _required_ | -| `scope` | `str` | The scope for the filter. | _required_ | -| `payload` | `JSONSerializableMapping` | The payload to send with the filter. | `{}` | +| Name | Type | Description | Default | +| ------------ | --------------------------------- | ------------------------------------------ | ---------- | +| `expression` | `str` | The expression to evaluate for the filter. | _required_ | +| `scope` | `str` | The scope for the filter. | _required_ | +| `payload` | `JSONSerializableMapping \| None` | The payload to send with the filter. | `None` | Returns: diff --git a/internal/services/controllers/v1/olap/process_task_status_updates.go b/internal/services/controllers/v1/olap/process_task_status_updates.go index 424601d7e..90e1bbafe 100644 --- a/internal/services/controllers/v1/olap/process_task_status_updates.go +++ b/internal/services/controllers/v1/olap/process_task_status_updates.go @@ -8,6 +8,7 @@ import ( "github.com/hatchet-dev/hatchet/internal/telemetry" "github.com/hatchet-dev/hatchet/pkg/repository/postgres/dbsqlc" "github.com/hatchet-dev/hatchet/pkg/repository/postgres/sqlchelpers" + "github.com/hatchet-dev/hatchet/pkg/repository/v1/sqlcv1" ) func (o *OLAPControllerImpl) runTenantTaskStatusUpdates(ctx context.Context) func() { @@ -45,6 +46,10 @@ func (o *OLAPControllerImpl) updateTaskStatuses(ctx context.Context, tenantId st payloads := make([]tasktypes.NotifyFinalizedPayload, 0, len(rows)) for _, row := range rows { + if row.ReadableStatus != sqlcv1.V1ReadableStatusOlapCOMPLETED && row.ReadableStatus != sqlcv1.V1ReadableStatusOlapCANCELLED && row.ReadableStatus != sqlcv1.V1ReadableStatusOlapFAILED { + continue + } + payloads = append(payloads, tasktypes.NotifyFinalizedPayload{ ExternalId: sqlchelpers.UUIDToStr(row.ExternalId), Status: row.ReadableStatus, diff --git a/internal/services/dispatcher/contracts/dispatcher.pb.go b/internal/services/dispatcher/contracts/dispatcher.pb.go index 48fc42cc6..c62d51062 100644 --- a/internal/services/dispatcher/contracts/dispatcher.pb.go +++ b/internal/services/dispatcher/contracts/dispatcher.pb.go @@ -1599,6 +1599,7 @@ type WorkflowEvent struct { StepRetries *int32 `protobuf:"varint,8,opt,name=stepRetries,proto3,oneof" json:"stepRetries,omitempty"` // (optional) the retry count of this step RetryCount *int32 `protobuf:"varint,9,opt,name=retryCount,proto3,oneof" json:"retryCount,omitempty"` + EventIndex *int64 `protobuf:"varint,10,opt,name=eventIndex,proto3,oneof" json:"eventIndex,omitempty"` } func (x *WorkflowEvent) Reset() { @@ -1696,6 +1697,13 @@ func (x *WorkflowEvent) GetRetryCount() int32 { return 0 } +func (x *WorkflowEvent) GetEventIndex() int64 { + if x != nil && x.EventIndex != nil { + return *x.EventIndex + } + return 0 +} + type WorkflowRunEvent struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2464,7 +2472,7 @@ var file_dispatcher_proto_rawDesc = []byte{ 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0xa5, 0x03, 0x0a, 0x0d, 0x57, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0xd9, 0x03, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, @@ -2489,186 +2497,189 @@ var file_dispatcher_proto_rawDesc = []byte{ 0x73, 0x74, 0x65, 0x70, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x52, 0x65, 0x74, 0x72, - 0x69, 0x65, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x33, 0x0a, - 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x15, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x22, 0xbe, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, - 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x74, 0x65, 0x70, 0x52, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, - 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x65, 0x70, 0x52, 0x65, - 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x12, - 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x22, 0x7f, 0x0a, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x0a, 0x10, 0x48, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x0b, 0x68, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x68, 0x65, - 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, - 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x65, - 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, - 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, - 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x42, 0x79, 0x22, 0x52, 0x0a, 0x16, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x41, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x32, 0x0a, 0x12, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x22, 0x15, 0x0a, - 0x13, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x37, 0x0a, 0x04, 0x53, 0x44, 0x4b, 0x53, 0x12, 0x0b, 0x0a, 0x07, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x4f, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x10, 0x03, 0x2a, 0x4e, 0x0a, - 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x53, - 0x54, 0x41, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, - 0x55, 0x4e, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x47, 0x45, - 0x54, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02, 0x2a, 0xa2, 0x01, - 0x0a, 0x17, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x47, - 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x22, 0x0a, - 0x1e, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x10, 0x03, 0x2a, 0xac, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, 0x5f, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, - 0x45, 0x44, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x20, 0x0a, 0x1c, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x41, 0x43, 0x4b, 0x4e, 0x4f, 0x57, 0x4c, 0x45, 0x44, 0x47, 0x45, 0x44, 0x10, - 0x04, 0x2a, 0x65, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, - 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, - 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, - 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, - 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x02, 0x2a, 0xfe, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, - 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x49, - 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, - 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x06, 0x2a, 0x3c, 0x0a, 0x14, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, - 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4e, - 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x00, 0x32, 0xf8, 0x06, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x70, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, - 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x4c, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x56, 0x32, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, - 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, - 0x01, 0x12, 0x34, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x11, - 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x12, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x53, 0x0a, 0x17, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, - 0x12, 0x3f, 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x47, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, - 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x10, 0x50, 0x75, - 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x0e, - 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x16, - 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0b, 0x55, 0x6e, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x19, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x43, 0x0a, 0x0e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x12, 0x16, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x52, 0x65, 0x66, + 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x74, 0x65, + 0x70, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x49, + 0x64, 0x12, 0x33, 0x0a, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, 0x74, + 0x65, 0x70, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, + 0x75, 0x6e, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x74, 0x65, 0x70, 0x52, 0x65, 0x61, 0x64, + 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, + 0x65, 0x70, 0x52, 0x65, 0x61, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, + 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x7f, 0x0a, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, + 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, + 0x75, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, + 0x0a, 0x0e, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x46, 0x69, + 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x6c, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x3c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0b, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x41, 0x74, 0x22, 0x13, 0x0a, + 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, + 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e, 0x63, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x22, 0x52, 0x0a, 0x16, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, - 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x52, 0x65, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4f, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x41, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x41, 0x74, 0x22, 0x32, 0x0a, + 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, + 0x64, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x37, 0x0a, 0x04, 0x53, 0x44, 0x4b, 0x53, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x06, 0x0a, + 0x02, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, + 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x10, + 0x03, 0x2a, 0x4e, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x53, 0x54, + 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x52, + 0x54, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x10, + 0x02, 0x2a, 0xa2, 0x01, 0x0a, 0x17, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, + 0x1c, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x20, 0x0a, 0x1c, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, + 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x2a, 0xac, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x65, 0x70, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x17, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x53, + 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x45, 0x50, + 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x45, 0x50, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x45, 0x56, 0x45, 0x4e, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x43, 0x4b, 0x4e, 0x4f, 0x57, 0x4c, 0x45, 0x44, + 0x47, 0x45, 0x44, 0x10, 0x04, 0x2a, 0x65, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, + 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x54, 0x45, 0x50, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, + 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x10, 0x02, 0x2a, 0xfe, 0x01, 0x0a, + 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, + 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, + 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x05, 0x12, 0x1e, 0x0a, + 0x1a, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x10, 0x06, 0x2a, 0x3c, 0x0a, + 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, + 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x49, 0x4e, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x00, 0x32, 0xf8, 0x06, 0x0a, 0x0a, + 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x08, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x06, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, + 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x56, 0x32, 0x12, 0x14, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0f, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x00, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, + 0x61, 0x74, 0x12, 0x11, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x19, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x53, 0x0a, 0x17, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x52, 0x75, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x12, 0x3f, 0x0a, 0x13, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x65, 0x70, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x10, 0x2e, 0x53, 0x74, + 0x65, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x12, 0x14, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, + 0x0a, 0x10, 0x50, 0x75, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x0e, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x1a, 0x16, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0b, + 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x19, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, + 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, 0x52, 0x65, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, + 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x2e, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, 0x73, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/internal/services/dispatcher/server_v1.go b/internal/services/dispatcher/server_v1.go index 471902fb3..7ac0fc2bb 100644 --- a/internal/services/dispatcher/server_v1.go +++ b/internal/services/dispatcher/server_v1.go @@ -27,6 +27,287 @@ import ( tasktypes "github.com/hatchet-dev/hatchet/internal/services/shared/tasktypes/v1" ) +type timeoutEvent struct { + events []*contracts.WorkflowEvent + timeoutAt time.Time +} + +type StreamEventBuffer struct { + stepRunIdToWorkflowEvents map[string][]*contracts.WorkflowEvent + stepRunIdToExpectedIndex map[string]int64 + stepRunIdToLastSeenTime map[string]time.Time + stepRunIdToCompletionTime map[string]time.Time + mu sync.Mutex + timeoutDuration time.Duration + gracePeriod time.Duration + eventsChan chan *contracts.WorkflowEvent + timedOutEventProducer chan timeoutEvent + ctx context.Context + cancel context.CancelFunc +} + +func NewStreamEventBuffer(timeout time.Duration) *StreamEventBuffer { + ctx, cancel := context.WithCancel(context.Background()) + + buffer := &StreamEventBuffer{ + stepRunIdToWorkflowEvents: make(map[string][]*contracts.WorkflowEvent), + stepRunIdToExpectedIndex: make(map[string]int64), + stepRunIdToLastSeenTime: make(map[string]time.Time), + stepRunIdToCompletionTime: make(map[string]time.Time), + timeoutDuration: timeout, + gracePeriod: 2 * time.Second, // Wait 2 seconds after completion for late events + eventsChan: make(chan *contracts.WorkflowEvent, 100), + timedOutEventProducer: make(chan timeoutEvent, 100), + ctx: ctx, + cancel: cancel, + } + + go buffer.processTimeoutEvents() + go buffer.periodicCleanup() + + return buffer +} + +func isTerminalEvent(event *contracts.WorkflowEvent) bool { + if event == nil { + return false + } + + return event.ResourceType == contracts.ResourceType_RESOURCE_TYPE_STEP_RUN && + (event.EventType == contracts.ResourceEventType_RESOURCE_EVENT_TYPE_COMPLETED || + event.EventType == contracts.ResourceEventType_RESOURCE_EVENT_TYPE_FAILED || + event.EventType == contracts.ResourceEventType_RESOURCE_EVENT_TYPE_CANCELLED) +} + +func sortByEventIndex(a, b *contracts.WorkflowEvent) int { + if a.EventIndex == nil && b.EventIndex == nil { + if a.EventTimestamp.AsTime().Before(b.EventTimestamp.AsTime()) { + return -1 + } + + if a.EventTimestamp.AsTime().After(b.EventTimestamp.AsTime()) { + return 1 + } + + return 0 + } + + if *a.EventIndex < *b.EventIndex { + return -1 + } + + if *a.EventIndex > *b.EventIndex { + return 1 + } + + return 0 +} + +func (b *StreamEventBuffer) processTimeoutEvents() { + for { + select { + case <-b.ctx.Done(): + return + case timeoutEvent := <-b.timedOutEventProducer: + timer := time.NewTimer(time.Until(timeoutEvent.timeoutAt)) + + select { + case <-b.ctx.Done(): + timer.Stop() + return + case <-timer.C: + b.mu.Lock() + for _, event := range timeoutEvent.events { + stepRunId := event.ResourceId + + if bufferedEvents, exists := b.stepRunIdToWorkflowEvents[stepRunId]; exists { + for _, e := range bufferedEvents { + select { + case b.eventsChan <- e: + case <-b.ctx.Done(): + b.mu.Unlock() + return + } + } + + delete(b.stepRunIdToWorkflowEvents, stepRunId) + delete(b.stepRunIdToLastSeenTime, stepRunId) + b.stepRunIdToExpectedIndex[stepRunId] = -1 + } + } + b.mu.Unlock() + } + } + } +} + +func (b *StreamEventBuffer) Events() <-chan *contracts.WorkflowEvent { + return b.eventsChan +} + +func (b *StreamEventBuffer) Close() { + b.cancel() + close(b.eventsChan) + close(b.timedOutEventProducer) +} + +func (b *StreamEventBuffer) periodicCleanup() { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + select { + case <-b.ctx.Done(): + return + case <-ticker.C: + b.mu.Lock() + now := time.Now() + + for stepRunId, completionTime := range b.stepRunIdToCompletionTime { + if now.Sub(completionTime) > b.gracePeriod { + delete(b.stepRunIdToWorkflowEvents, stepRunId) + delete(b.stepRunIdToExpectedIndex, stepRunId) + delete(b.stepRunIdToLastSeenTime, stepRunId) + delete(b.stepRunIdToCompletionTime, stepRunId) + } + } + + b.mu.Unlock() + } + } +} + +func (b *StreamEventBuffer) AddEvent(event *contracts.WorkflowEvent) { + b.mu.Lock() + defer b.mu.Unlock() + + stepRunId := event.ResourceId + now := time.Now() + + if event.ResourceType != contracts.ResourceType_RESOURCE_TYPE_STEP_RUN || + event.EventType != contracts.ResourceEventType_RESOURCE_EVENT_TYPE_STREAM { + + if isTerminalEvent(event) { + if events, exists := b.stepRunIdToWorkflowEvents[stepRunId]; exists && len(events) > 0 { + slices.SortFunc(events, sortByEventIndex) + + for _, e := range events { + select { + case b.eventsChan <- e: + case <-b.ctx.Done(): + return + } + } + + delete(b.stepRunIdToWorkflowEvents, stepRunId) + delete(b.stepRunIdToExpectedIndex, stepRunId) + delete(b.stepRunIdToLastSeenTime, stepRunId) + } + + b.stepRunIdToCompletionTime[stepRunId] = now + } + + select { + case b.eventsChan <- event: + case <-b.ctx.Done(): + return + } + return + } + + b.stepRunIdToLastSeenTime[stepRunId] = now + + if _, exists := b.stepRunIdToExpectedIndex[stepRunId]; !exists { + // IMPORTANT: Events are zero-indexed + b.stepRunIdToExpectedIndex[stepRunId] = 0 + } + + // If EventIndex is nil, don't buffer - just release the event immediately + if event.EventIndex == nil { + select { + case b.eventsChan <- event: + case <-b.ctx.Done(): + return + } + return + } + + expectedIndex := b.stepRunIdToExpectedIndex[stepRunId] + + // IMPORTANT: if expected index is -1, it means we're starting fresh after a timeout + if expectedIndex == -1 && event.EventIndex != nil { + b.stepRunIdToExpectedIndex[stepRunId] = *event.EventIndex + expectedIndex = *event.EventIndex + } + + // For stream events: if this event is the next expected one, send it immediately + // Only buffer if it's out of order + if *event.EventIndex == expectedIndex { + if bufferedEvents, exists := b.stepRunIdToWorkflowEvents[stepRunId]; exists && len(bufferedEvents) > 0 { + b.stepRunIdToWorkflowEvents[stepRunId] = append(bufferedEvents, event) + slices.SortFunc(b.stepRunIdToWorkflowEvents[stepRunId], sortByEventIndex) + + b.sendReadyEvents(stepRunId) + } else { + b.stepRunIdToExpectedIndex[stepRunId] = expectedIndex + 1 + select { + case b.eventsChan <- event: + case <-b.ctx.Done(): + return + } + } + return + } + + if _, exists := b.stepRunIdToWorkflowEvents[stepRunId]; !exists { + b.stepRunIdToWorkflowEvents[stepRunId] = make([]*contracts.WorkflowEvent, 0) + } + + b.stepRunIdToWorkflowEvents[stepRunId] = append(b.stepRunIdToWorkflowEvents[stepRunId], event) + slices.SortFunc(b.stepRunIdToWorkflowEvents[stepRunId], sortByEventIndex) + + b.sendReadyEvents(stepRunId) + + b.scheduleTimeoutIfNeeded(stepRunId, now) +} + +func (b *StreamEventBuffer) scheduleTimeoutIfNeeded(stepRunId string, eventTime time.Time) { + if events, exists := b.stepRunIdToWorkflowEvents[stepRunId]; exists && len(events) > 0 { + timeoutAt := eventTime.Add(b.timeoutDuration) + + timeoutEvent := timeoutEvent{ + events: append([]*contracts.WorkflowEvent{}, events...), + timeoutAt: timeoutAt, + } + + select { + case b.timedOutEventProducer <- timeoutEvent: + case <-b.ctx.Done(): + return + default: + // If the channel is full, we skip this timeout scheduling + } + } +} + +func (b *StreamEventBuffer) sendReadyEvents(stepRunId string) { + events := b.stepRunIdToWorkflowEvents[stepRunId] + expectedIdx := b.stepRunIdToExpectedIndex[stepRunId] + + for len(events) > 0 && events[0].EventIndex != nil && *events[0].EventIndex == expectedIdx { + select { + case b.eventsChan <- events[0]: + case <-b.ctx.Done(): + return + } + events = events[1:] + expectedIdx++ + } + + b.stepRunIdToWorkflowEvents[stepRunId] = events + b.stepRunIdToExpectedIndex[stepRunId] = expectedIdx +} + // SubscribeToWorkflowEvents registers workflow events with the dispatcher func (s *DispatcherImpl) subscribeToWorkflowRunsV1(server contracts.Dispatcher_SubscribeToWorkflowRunsServer) error { tenant := server.Context().Value("tenant").(*dbsqlc.Tenant) @@ -558,6 +839,38 @@ func (s *DispatcherImpl) subscribeToWorkflowEventsByWorkflowRunIdV1(workflowRunI var mu sync.Mutex // Mutex to protect activeRunIds var sendMu sync.Mutex // Mutex to protect sending messages + streamBuffer := NewStreamEventBuffer(5 * time.Second) + defer streamBuffer.Close() + + // Handle events from the stream buffer + go func() { + for { + select { + case <-ctx.Done(): + return + case event, ok := <-streamBuffer.Events(): + if !ok { + return + } + + sendMu.Lock() + err := stream.Send(event) + sendMu.Unlock() + + if err != nil { + s.l.Error().Err(err).Msgf("could not send workflow event to client") + cancel() + return + } + + if event.Hangup { + cancel() + return + } + } + } + }() + f := func(tenantId, msgId string, payloads [][]byte) error { wg.Add(1) defer wg.Done() @@ -631,19 +944,7 @@ func (s *DispatcherImpl) subscribeToWorkflowEventsByWorkflowRunIdV1(workflowRunI // send the task to the client for _, e := range events { - sendMu.Lock() - err = stream.Send(e) - sendMu.Unlock() - - if err != nil { - cancel() - s.l.Error().Err(err).Msgf("could not send workflow event to client") - return nil - } - - if e.Hangup { - cancel() - } + streamBuffer.AddEvent(e) } return nil @@ -924,6 +1225,7 @@ func (s *DispatcherImpl) msgsToWorkflowEvent(msgId string, payloads [][]byte, fi EventType: contracts.ResourceEventType_RESOURCE_EVENT_TYPE_STREAM, EventTimestamp: timestamppb.New(payload.CreatedAt), EventPayload: string(payload.Payload), + EventIndex: payload.EventIndex, }) } case "workflow-run-finished": @@ -972,15 +1274,7 @@ func (s *DispatcherImpl) msgsToWorkflowEvent(msgId string, payloads [][]byte, fi return -1 } - if a.EventTimestamp.AsTime().Before(b.EventTimestamp.AsTime()) { - return -1 - } - - if a.EventTimestamp.AsTime().After(b.EventTimestamp.AsTime()) { - return 1 - } - - return 0 + return sortByEventIndex(a, b) }) return matches, nil diff --git a/internal/services/dispatcher/stream_event_buffer_test.go b/internal/services/dispatcher/stream_event_buffer_test.go new file mode 100644 index 000000000..2a634ce15 --- /dev/null +++ b/internal/services/dispatcher/stream_event_buffer_test.go @@ -0,0 +1,301 @@ +//go:build !e2e && !load && !rampup && !integration + +package dispatcher + +import ( + "testing" + "time" + + "github.com/hatchet-dev/hatchet/internal/services/dispatcher/contracts" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func genEvent(payload string, hangup bool, eventIndex *int64) *contracts.WorkflowEvent { + return &contracts.WorkflowEvent{ + WorkflowRunId: "test-run-id", + ResourceId: "test-step-run-id", + ResourceType: contracts.ResourceType_RESOURCE_TYPE_STEP_RUN, + EventType: contracts.ResourceEventType_RESOURCE_EVENT_TYPE_STREAM, + EventTimestamp: timestamppb.Now(), + EventPayload: payload, + Hangup: hangup, + EventIndex: eventIndex, + } +} + +func TestStreamBuffer_BasicEventRelease(t *testing.T) { + buffer := NewStreamEventBuffer(5 * time.Second) + defer buffer.Close() + + ix := int64(0) + + event := genEvent("test_payload", false, &ix) + + buffer.AddEvent(event) + + select { + case receivedEvent := <-buffer.Events(): + assert.Equal(t, event, receivedEvent) + case <-time.After(1 * time.Second): + t.Fatal("Expected event was not received") + } +} + +func TestStreamBuffer_OutOfOrderRelease(t *testing.T) { + buffer := NewStreamEventBuffer(5 * time.Second) + defer buffer.Close() + + ix0 := int64(0) + ix1 := int64(1) + ix2 := int64(2) + + event2 := genEvent("test_payload", false, &ix1) + + buffer.AddEvent(event2) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + event3 := genEvent("test_payload", false, &ix2) + buffer.AddEvent(event3) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + event1 := genEvent("test_payload", false, &ix0) + buffer.AddEvent(event1) + + receivedEvents := make([]*contracts.WorkflowEvent, 0, 3) + for i := 0; i < 3; i++ { + select { + case event := <-buffer.Events(): + receivedEvents = append(receivedEvents, event) + case <-time.After(1 * time.Second): + t.Fatalf("Expected to receive event %d", i) + } + } + + assert.Equal(t, 3, len(receivedEvents)) + assert.Equal(t, event1, receivedEvents[0]) + assert.Equal(t, event2, receivedEvents[1]) + assert.Equal(t, event3, receivedEvents[2]) +} + +func TestStreamBuffer_Timeout(t *testing.T) { + buffer := NewStreamEventBuffer(1 * time.Second) + defer buffer.Close() + + ix1 := int64(1) + ix2 := int64(2) + ix0 := int64(0) + + event2 := genEvent("test_payload", false, &ix1) + buffer.AddEvent(event2) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + event3 := genEvent("test_payload", false, &ix2) + buffer.AddEvent(event3) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + time.Sleep(2 * time.Second) + + receivedEvents := make([]*contracts.WorkflowEvent, 0, 2) + for i := 0; i < 2; i++ { + select { + case event := <-buffer.Events(): + receivedEvents = append(receivedEvents, event) + case <-time.After(1 * time.Second): + t.Fatalf("Expected to receive timed out event %d", i) + } + } + + assert.Equal(t, 2, len(receivedEvents)) + assert.Equal(t, event2, receivedEvents[0]) + assert.Equal(t, event3, receivedEvents[1]) + + event1 := genEvent("test_payload", false, &ix0) + buffer.AddEvent(event1) + + // This should be released immediately (fresh sequence after timeout) + select { + case receivedEvent := <-buffer.Events(): + assert.Equal(t, event1, receivedEvent) + case <-time.After(1 * time.Second): + t.Fatal("Expected event was not received") + } +} + +func TestStreamBuffer_TimeoutWithSubsequentOrdering(t *testing.T) { + buffer := NewStreamEventBuffer(500 * time.Millisecond) + defer buffer.Close() + + ix1 := int64(1) + ix2 := int64(2) + ix5 := int64(5) + ix6 := int64(6) + + event1 := genEvent("payload1", false, &ix1) + buffer.AddEvent(event1) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + event2 := genEvent("payload2", false, &ix2) + buffer.AddEvent(event2) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + time.Sleep(1 * time.Second) + + receivedEvents := make([]*contracts.WorkflowEvent, 0, 2) + for i := 0; i < 2; i++ { + select { + case event := <-buffer.Events(): + receivedEvents = append(receivedEvents, event) + case <-time.After(1 * time.Second): + t.Fatalf("Expected to receive timed out event %d", i) + } + } + + assert.Equal(t, 2, len(receivedEvents)) + assert.Equal(t, event1, receivedEvents[0]) + assert.Equal(t, event2, receivedEvents[1]) + + // Now start a new sequence - event 5 should start a fresh sequence + event5 := genEvent("payload5", false, &ix5) + buffer.AddEvent(event5) + + select { + case receivedEvent := <-buffer.Events(): + assert.Equal(t, event5, receivedEvent) + case <-time.After(1 * time.Second): + t.Fatal("Expected event was not received") + } + + // Event 6 should be released immediately as it's the next in sequence + event6 := genEvent("payload6", false, &ix6) + buffer.AddEvent(event6) + + select { + case receivedEvent := <-buffer.Events(): + assert.Equal(t, event6, receivedEvent) + case <-time.After(1 * time.Second): + t.Fatal("Expected event was not received") + } +} + +func TestStreamBuffer_HangupHandling(t *testing.T) { + buffer := NewStreamEventBuffer(500 * time.Millisecond) + defer buffer.Close() + + ix0 := int64(0) + ix1 := int64(1) + ix2 := int64(2) + ix3 := int64(3) + + event2 := genEvent("first-event", false, &ix1) + event3 := genEvent("second-event", false, &ix2) + + buffer.AddEvent(event2) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + buffer.AddEvent(event3) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + eventHangup := genEvent("hangup-event", true, &ix3) + buffer.AddEvent(eventHangup) + + select { + case <-buffer.Events(): + t.Fatal("Should not receive out-of-order event") + case <-time.After(100 * time.Millisecond): + // Expected - no event should be received + } + + event0 := genEvent("first-event", false, &ix0) + buffer.AddEvent(event0) + + receivedEvents := make([]*contracts.WorkflowEvent, 0, 4) + for i := 0; i < 4; i++ { + select { + case event := <-buffer.Events(): + receivedEvents = append(receivedEvents, event) + case <-time.After(1 * time.Second): + t.Fatalf("Expected to receive event %d", i) + } + } + + assert.Equal(t, 4, len(receivedEvents)) + assert.Equal(t, event0, receivedEvents[0]) + assert.Equal(t, event2, receivedEvents[1]) + assert.Equal(t, event3, receivedEvents[2]) + assert.Equal(t, eventHangup, receivedEvents[3]) +} + +func TestStreamBuffer_NoIndexSent(t *testing.T) { + buffer := NewStreamEventBuffer(500 * time.Millisecond) + defer buffer.Close() + + event1 := genEvent("first-event", false, nil) + event2 := genEvent("second-event", false, nil) + + buffer.AddEvent(event2) + + select { + case receivedEvent := <-buffer.Events(): + assert.Equal(t, event2, receivedEvent) + case <-time.After(1 * time.Second): + t.Fatal("Expected event was not received") + } + + buffer.AddEvent(event1) + + select { + case receivedEvent := <-buffer.Events(): + assert.Equal(t, event1, receivedEvent) + case <-time.After(1 * time.Second): + t.Fatal("Expected event was not received") + } +} diff --git a/internal/services/ingestor/contracts/events.pb.go b/internal/services/ingestor/contracts/events.pb.go index d061f7a74..a568d48d6 100644 --- a/internal/services/ingestor/contracts/events.pb.go +++ b/internal/services/ingestor/contracts/events.pb.go @@ -313,7 +313,8 @@ type PutStreamEventRequest struct { // the stream event message Message []byte `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` // associated stream event metadata - Metadata string `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata string `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + EventIndex *int64 `protobuf:"varint,6,opt,name=eventIndex,proto3,oneof" json:"eventIndex,omitempty"` } func (x *PutStreamEventRequest) Reset() { @@ -376,6 +377,13 @@ func (x *PutStreamEventRequest) GetMetadata() string { return "" } +func (x *PutStreamEventRequest) GetEventIndex() int64 { + if x != nil && x.EventIndex != nil { + return *x.EventIndex + } + return 0 +} + type PutStreamEventResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -645,7 +653,7 @@ var file_events_proto_rawDesc = []byte{ 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x10, 0x0a, 0x0e, 0x50, 0x75, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, + 0x65, 0x22, 0xd9, 0x01, 0x0a, 0x15, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x65, 0x70, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x63, 0x72, 0x65, @@ -655,56 +663,59 @@ var file_events_proto_rawDesc = []byte{ 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x18, 0x0a, 0x16, 0x50, 0x75, 0x74, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x41, 0x0a, 0x14, 0x42, 0x75, 0x6c, 0x6b, 0x50, 0x75, 0x73, 0x68, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x06, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x50, 0x75, - 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x10, 0x50, 0x75, 0x73, 0x68, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x33, 0x0a, 0x12, 0x61, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x0d, + 0x0a, 0x0b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x18, 0x0a, + 0x16, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x0a, 0x14, 0x42, 0x75, 0x6c, 0x6b, 0x50, + 0x75, 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x29, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x10, 0x50, + 0x75, 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x42, 0x0a, 0x0e, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x33, 0x0a, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, - 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x88, 0x01, - 0x01, 0x12, 0x19, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x02, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, - 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x22, 0x2e, 0x0a, 0x12, 0x52, 0x65, - 0x70, 0x6c, 0x61, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x32, 0x88, 0x02, 0x0a, 0x0d, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x04, - 0x50, 0x75, 0x73, 0x68, 0x12, 0x11, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x06, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, - 0x00, 0x12, 0x2c, 0x0a, 0x08, 0x42, 0x75, 0x6c, 0x6b, 0x50, 0x75, 0x73, 0x68, 0x12, 0x15, 0x2e, - 0x42, 0x75, 0x6c, 0x6b, 0x50, 0x75, 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x07, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x00, 0x12, - 0x32, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x06, 0x2e, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x06, 0x50, 0x75, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x0e, 0x2e, - 0x50, 0x75, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, - 0x50, 0x75, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x43, 0x0a, 0x0e, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x50, 0x75, 0x74, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x45, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x76, 0x2f, - 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x22, 0x2e, + 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x32, 0x88, + 0x02, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x23, 0x0a, 0x04, 0x50, 0x75, 0x73, 0x68, 0x12, 0x11, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x06, 0x2e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x2c, 0x0a, 0x08, 0x42, 0x75, 0x6c, 0x6b, 0x50, 0x75, 0x73, + 0x68, 0x12, 0x15, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x50, 0x75, 0x73, 0x68, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x07, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x69, 0x6e, + 0x67, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x13, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, + 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x06, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x06, 0x50, 0x75, 0x74, 0x4c, 0x6f, + 0x67, 0x12, 0x0e, 0x2e, 0x50, 0x75, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x0f, 0x2e, 0x50, 0x75, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0e, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x2e, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x50, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x45, 0x5a, 0x43, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2d, + 0x64, 0x65, 0x76, 0x2f, 0x68, 0x61, 0x74, 0x63, 0x68, 0x65, 0x74, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -873,6 +884,7 @@ func file_events_proto_init() { } file_events_proto_msgTypes[0].OneofWrappers = []interface{}{} file_events_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_events_proto_msgTypes[4].OneofWrappers = []interface{}{} file_events_proto_msgTypes[7].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ diff --git a/internal/services/ingestor/server_v1.go b/internal/services/ingestor/server_v1.go index 2f37867f4..d32d8c2c2 100644 --- a/internal/services/ingestor/server_v1.go +++ b/internal/services/ingestor/server_v1.go @@ -37,6 +37,7 @@ func (i *IngestorImpl) putStreamEventV1(ctx context.Context, tenant *dbsqlc.Tena StepRunId: req.StepRunId, CreatedAt: req.CreatedAt.AsTime(), Payload: req.Message, + EventIndex: req.EventIndex, }, ) diff --git a/internal/services/shared/tasktypes/v1/event.go b/internal/services/shared/tasktypes/v1/event.go index dfb74890a..6d9fcbf9b 100644 --- a/internal/services/shared/tasktypes/v1/event.go +++ b/internal/services/shared/tasktypes/v1/event.go @@ -32,4 +32,5 @@ type StreamEventPayload struct { CreatedAt time.Time `json:"created_at" validate:"required"` Payload []byte `json:"payload"` RetryCount *int32 `json:"retry_count,omitempty"` + EventIndex *int64 `json:"event_index"` } diff --git a/sdks/python/CHANGELOG.md b/sdks/python/CHANGELOG.md index d268161ba..22f37c09c 100644 --- a/sdks/python/CHANGELOG.md +++ b/sdks/python/CHANGELOG.md @@ -5,7 +5,26 @@ All notable changes to Hatchet's Python SDK will be documented in this changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [1.12.2] - 2025-06-25 +## [1.13.0] - 2025-06-25 + +### Added + +- Documentation for the `Context` classes +- Allows for a worker to be terminated after a certain number of tasks by providing the `terminate_worker_after_num_tasks` config option + +### Changed + +- Adds a number of helpful Ruff linting rules +- `DedupeViolationErr` is now `DedupeViolationError` +- Fixed events documentation to correctly have a skipped run example. +- Changed default arguments to many methods from mutable defaults like `[]` to None +- Changes `JSONSerializableMapping` from `Mapping` to `dict` +- Handles some potential bugs related to `asyncio` tasks being garbage collected. +- Improves exception printing with an `ExceptionGroup` implementation +- Fixes a bug with namespacing of user event conditions where the namespace was not respected so the task waiting for it would hang +- Fixes a memory leak in streaming and logging, and fixes some issues with log capture. + +## [1.12.3] - 2025-06-25 ### Changed diff --git a/sdks/python/apply_patches.py b/sdks/python/apply_patches.py index ee8725ed7..9026f883b 100644 --- a/sdks/python/apply_patches.py +++ b/sdks/python/apply_patches.py @@ -1,7 +1,7 @@ import re +from collections.abc import Callable from copy import deepcopy from pathlib import Path -from typing import Callable def prepend_import(content: str, import_statement: str) -> str: diff --git a/sdks/python/conftest.py b/sdks/python/conftest.py index 3bcd28155..7388af6c0 100644 --- a/sdks/python/conftest.py +++ b/sdks/python/conftest.py @@ -1,5 +1,6 @@ +from collections.abc import AsyncGenerator, Generator from subprocess import Popen -from typing import AsyncGenerator, Generator, cast +from typing import cast import pytest import pytest_asyncio diff --git a/sdks/python/docs/context.md b/sdks/python/docs/context.md new file mode 100644 index 000000000..3ade120b4 --- /dev/null +++ b/sdks/python/docs/context.md @@ -0,0 +1,47 @@ +# Context + +The Hatchet Context class provides helper methods and useful data to tasks at runtime. It is passed as the second argument to all tasks and durable tasks. + +There are two types of context classes you'll encounter: + +* `Context` - The standard context for regular tasks with methods for logging, task output retrieval, cancellation, and more +* `DurableContext` - An extended context for durable tasks that includes additional methods for durable execution like `aio_wait_for` and `aio_sleep_for` + + +## Context + +::: context.context.Context + options: + inherited_members: false + members: + - was_skipped + - task_output + - was_triggered_by_event + - workflow_input + - lifespan + - workflow_run_id + - cancel + - aio_cancel + - done + - log + - release_slot + - put_stream + - refresh_timeout + - retry_count + - attempt_number + - additional_metadata + - parent_workflow_run_id + - priority + - workflow_id + - workflow_version_id + - task_run_errors + - fetch_task_run_error + +## DurableContext + +::: context.context.DurableContext + options: + inherited_members: true + members: + - aio_wait_for + - aio_sleep_for diff --git a/sdks/python/examples/concurrency_limit_rr/worker.py b/sdks/python/examples/concurrency_limit_rr/worker.py index 85a8ee95b..e98d77143 100644 --- a/sdks/python/examples/concurrency_limit_rr/worker.py +++ b/sdks/python/examples/concurrency_limit_rr/worker.py @@ -34,7 +34,6 @@ def step1(input: WorkflowInput, ctx: Context) -> None: print("starting step1") time.sleep(2) print("finished step1") - pass def main() -> None: diff --git a/sdks/python/examples/dedupe/worker.py b/sdks/python/examples/dedupe/worker.py index 82e063e9a..68c99acbb 100644 --- a/sdks/python/examples/dedupe/worker.py +++ b/sdks/python/examples/dedupe/worker.py @@ -3,7 +3,7 @@ from datetime import timedelta from typing import Any from hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions -from hatchet_sdk.clients.admin import DedupeViolationErr +from hatchet_sdk.exceptions import DedupeViolationError hatchet = Hatchet(debug=True) @@ -20,15 +20,13 @@ async def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]: for i in range(2): try: results.append( - ( - dedupe_child_wf.aio_run( - options=TriggerWorkflowOptions( - additional_metadata={"dedupe": "test"}, key=f"child{i}" - ), - ) + dedupe_child_wf.aio_run( + options=TriggerWorkflowOptions( + additional_metadata={"dedupe": "test"}, key=f"child{i}" + ), ) ) - except DedupeViolationErr as e: + except DedupeViolationError as e: print(f"dedupe violation {e}") continue diff --git a/sdks/python/examples/durable/test_durable.py b/sdks/python/examples/durable/test_durable.py index 193add56a..4698df37a 100644 --- a/sdks/python/examples/durable/test_durable.py +++ b/sdks/python/examples/durable/test_durable.py @@ -1,5 +1,4 @@ import asyncio -import os import pytest @@ -7,10 +6,6 @@ from examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow from hatchet_sdk import Hatchet -@pytest.mark.skipif( - os.getenv("CI", "false").lower() == "true", - reason="Skipped in CI because of unreliability", -) @pytest.mark.asyncio(loop_scope="session") async def test_durable(hatchet: Hatchet) -> None: ref = durable_workflow.run_no_wait() @@ -28,6 +23,12 @@ async def test_durable(hatchet: Hatchet) -> None: active_workers = [w for w in workers.rows if w.status == "ACTIVE"] assert len(active_workers) == 2 - assert any(w.name == "e2e-test-worker" for w in active_workers) - assert any(w.name.endswith("e2e-test-worker_durable") for w in active_workers) + assert any( + w.name == hatchet.config.apply_namespace("e2e-test-worker") + for w in active_workers + ) + assert any( + w.name == hatchet.config.apply_namespace("e2e-test-worker_durable") + for w in active_workers + ) assert result["durable_task"]["status"] == "success" diff --git a/sdks/python/examples/events/filter.py b/sdks/python/examples/events/filter.py index 17a09c125..fc4400276 100644 --- a/sdks/python/examples/events/filter.py +++ b/sdks/python/examples/events/filter.py @@ -32,7 +32,7 @@ hatchet.event.push( hatchet.event.push( event_key=EVENT_KEY, payload={ - "should_skip": True, + "should_skip": False, }, options=PushEventOptions( scope="foobarbaz", diff --git a/sdks/python/examples/events/test_event.py b/sdks/python/examples/events/test_event.py index aa6de1855..9abb531cb 100644 --- a/sdks/python/examples/events/test_event.py +++ b/sdks/python/examples/events/test_event.py @@ -1,8 +1,9 @@ import asyncio import json +from collections.abc import AsyncGenerator from contextlib import asynccontextmanager from datetime import datetime, timedelta, timezone -from typing import AsyncGenerator, cast +from typing import cast from uuid import uuid4 import pytest @@ -255,7 +256,9 @@ async def test_async_event_bulk_push(hatchet: Hatchet) -> None: namespace = "bulk-test" # Check that the returned events match the original events - for original_event, returned_event in zip(sorted_events, sorted_returned_events): + for original_event, returned_event in zip( + sorted_events, sorted_returned_events, strict=False + ): assert returned_event.key == namespace + original_event.key diff --git a/sdks/python/examples/events/worker.py b/sdks/python/examples/events/worker.py index 947aaafc9..8d35c166c 100644 --- a/sdks/python/examples/events/worker.py +++ b/sdks/python/examples/events/worker.py @@ -46,7 +46,7 @@ event_workflow_with_filter = hatchet.workflow( def task(input: EventWorkflowInput, ctx: Context) -> dict[str, str]: print("event received") - return dict(ctx.filter_payload) + return ctx.filter_payload # > Accessing the filter payload diff --git a/sdks/python/examples/lifespans/simple.py b/sdks/python/examples/lifespans/simple.py index 07a511b16..62a6c511e 100644 --- a/sdks/python/examples/lifespans/simple.py +++ b/sdks/python/examples/lifespans/simple.py @@ -1,6 +1,7 @@ # > Lifespan -from typing import AsyncGenerator, cast +from collections.abc import AsyncGenerator +from typing import cast from pydantic import BaseModel diff --git a/sdks/python/examples/lifespans/worker.py b/sdks/python/examples/lifespans/worker.py index ff61b6fe2..33bc85128 100644 --- a/sdks/python/examples/lifespans/worker.py +++ b/sdks/python/examples/lifespans/worker.py @@ -1,4 +1,5 @@ -from typing import AsyncGenerator, cast +from collections.abc import AsyncGenerator +from typing import cast from uuid import UUID from psycopg_pool import ConnectionPool diff --git a/sdks/python/examples/logger/workflow.py b/sdks/python/examples/logger/workflow.py index 34da7e292..da2eaef47 100644 --- a/sdks/python/examples/logger/workflow.py +++ b/sdks/python/examples/logger/workflow.py @@ -16,7 +16,7 @@ logging_workflow = hatchet.workflow( @logging_workflow.task() def root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]: for i in range(12): - logger.info("executed step1 - {}".format(i)) + logger.info(f"executed step1 - {i}") logger.info({"step1": "step1"}) time.sleep(0.1) @@ -32,7 +32,7 @@ def root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]: @logging_workflow.task() def context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]: for i in range(12): - ctx.log("executed step1 - {}".format(i)) + ctx.log(f"executed step1 - {i}") ctx.log({"step1": "step1"}) time.sleep(0.1) diff --git a/sdks/python/examples/migration_guides/mergent.py b/sdks/python/examples/migration_guides/mergent.py index 9b06cdd15..611051841 100644 --- a/sdks/python/examples/migration_guides/mergent.py +++ b/sdks/python/examples/migration_guides/mergent.py @@ -1,5 +1,6 @@ +from collections.abc import Mapping from datetime import datetime, timedelta, timezone -from typing import Any, Dict, List, Mapping +from typing import Any import requests from pydantic import BaseModel @@ -10,13 +11,13 @@ from hatchet_sdk.context.context import Context from .hatchet_client import hatchet -async def process_image(image_url: str, filters: List[str]) -> Dict[str, Any]: +async def process_image(image_url: str, filters: list[str]) -> dict[str, Any]: # Do some image processing return {"url": image_url, "size": 100, "format": "png"} # > Before (Mergent) -async def process_image_task(request: Any) -> Dict[str, Any]: +async def process_image_task(request: Any) -> dict[str, Any]: image_url = request.json["image_url"] filters = request.json["filters"] try: @@ -33,12 +34,12 @@ async def process_image_task(request: Any) -> Dict[str, Any]: # > After (Hatchet) class ImageProcessInput(BaseModel): image_url: str - filters: List[str] + filters: list[str] class ImageProcessOutput(BaseModel): processed_url: str - metadata: Dict[str, Any] + metadata: dict[str, Any] @hatchet.task( diff --git a/sdks/python/examples/non_retryable/test_no_retry.py b/sdks/python/examples/non_retryable/test_no_retry.py index 82a58a72a..f1414392e 100644 --- a/sdks/python/examples/non_retryable/test_no_retry.py +++ b/sdks/python/examples/non_retryable/test_no_retry.py @@ -1,3 +1,5 @@ +import asyncio + import pytest from examples.non_retryable.worker import ( @@ -9,6 +11,7 @@ from examples.non_retryable.worker import ( from hatchet_sdk import Hatchet from hatchet_sdk.clients.rest.models.v1_task_event_type import V1TaskEventType from hatchet_sdk.clients.rest.models.v1_workflow_run_details import V1WorkflowRunDetails +from hatchet_sdk.exceptions import FailedTaskRunExceptionGroup def find_id(runs: V1WorkflowRunDetails, match: str) -> str: @@ -19,9 +22,28 @@ def find_id(runs: V1WorkflowRunDetails, match: str) -> str: async def test_no_retry(hatchet: Hatchet) -> None: ref = await non_retryable_workflow.aio_run_no_wait() - with pytest.raises(Exception, match="retry"): + with pytest.raises(FailedTaskRunExceptionGroup) as exc_info: await ref.aio_result() + exception_group = exc_info.value + + assert len(exception_group.exceptions) == 2 + + exc_text = [e.exc for e in exception_group.exceptions] + + non_retries = [ + e + for e in exc_text + if "This task should retry because it's not a NonRetryableException" in e + ] + + other_errors = [e for e in exc_text if "This task should not retry" in e] + + assert len(non_retries) == 1 + assert len(other_errors) == 1 + + await asyncio.sleep(3) + runs = await hatchet.runs.aio_get(ref.workflow_run_id) task_to_id = { task: find_id(runs, task.name) @@ -40,9 +62,7 @@ async def test_no_retry(hatchet: Hatchet) -> None: assert len(retrying_events) == 1 """The task id of the retrying events should match the tasks that are retried""" - assert {e.task_id for e in retrying_events} == { - task_to_id[should_retry_wrong_exception_type], - } + assert retrying_events[0].task_id == task_to_id[should_retry_wrong_exception_type] """Three failed events should emit, one each for the two failing initial runs and one for the retry.""" assert ( diff --git a/sdks/python/examples/opentelemetry_instrumentation/langfuse/client.py b/sdks/python/examples/opentelemetry_instrumentation/langfuse/client.py index acb017420..0a9508d3c 100644 --- a/sdks/python/examples/opentelemetry_instrumentation/langfuse/client.py +++ b/sdks/python/examples/opentelemetry_instrumentation/langfuse/client.py @@ -1,8 +1,8 @@ import base64 import os -from langfuse import Langfuse # type: ignore[import-untyped] -from langfuse.openai import AsyncOpenAI # type: ignore[import-untyped] +from langfuse import Langfuse # type: ignore +from langfuse.openai import AsyncOpenAI # type: ignore # > Configure Langfuse LANGFUSE_AUTH = base64.b64encode( diff --git a/sdks/python/examples/opentelemetry_instrumentation/langfuse/trigger.py b/sdks/python/examples/opentelemetry_instrumentation/langfuse/trigger.py index 0ee6a788a..723797e3a 100644 --- a/sdks/python/examples/opentelemetry_instrumentation/langfuse/trigger.py +++ b/sdks/python/examples/opentelemetry_instrumentation/langfuse/trigger.py @@ -1,6 +1,8 @@ +# type: ignore + import asyncio -from langfuse import get_client # type: ignore[import-untyped] +from langfuse import get_client # type: ignore from opentelemetry.trace import StatusCode from examples.opentelemetry_instrumentation.langfuse.worker import langfuse_task diff --git a/sdks/python/examples/priority/test_priority.py b/sdks/python/examples/priority/test_priority.py index 113ac20ca..7a5cbe10a 100644 --- a/sdks/python/examples/priority/test_priority.py +++ b/sdks/python/examples/priority/test_priority.py @@ -1,8 +1,9 @@ import asyncio +from collections.abc import AsyncGenerator from datetime import datetime, timedelta, timezone from random import choice from subprocess import Popen -from typing import Any, AsyncGenerator, Literal +from typing import Any, Literal from uuid import uuid4 import pytest @@ -58,7 +59,7 @@ async def dummy_runs() -> None: await asyncio.sleep(3) - return None + return @pytest.mark.parametrize( diff --git a/sdks/python/examples/streaming/async_stream.py b/sdks/python/examples/streaming/async_stream.py index 289b57c08..ab3d6ba02 100644 --- a/sdks/python/examples/streaming/async_stream.py +++ b/sdks/python/examples/streaming/async_stream.py @@ -1,19 +1,16 @@ import asyncio -from examples.streaming.worker import streaming_workflow +from examples.streaming.worker import stream_task +from hatchet_sdk.clients.listeners.run_event_listener import StepRunEventType async def main() -> None: - ref = await streaming_workflow.aio_run_no_wait() - await asyncio.sleep(1) + ref = await stream_task.aio_run_no_wait() - stream = ref.stream() - - async for chunk in stream: - print(chunk) + async for chunk in ref.stream(): + if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM: + print(chunk.payload, flush=True, end="") if __name__ == "__main__": - import asyncio - asyncio.run(main()) diff --git a/sdks/python/examples/streaming/sync_stream.py b/sdks/python/examples/streaming/sync_stream.py index 8566de7de..888777b8d 100644 --- a/sdks/python/examples/streaming/sync_stream.py +++ b/sdks/python/examples/streaming/sync_stream.py @@ -1,10 +1,10 @@ import time -from examples.streaming.worker import streaming_workflow +from examples.streaming.worker import stream_task def main() -> None: - ref = streaming_workflow.run_no_wait() + ref = stream_task.run_no_wait() time.sleep(1) stream = ref.stream() diff --git a/sdks/python/examples/streaming/test_streaming.py b/sdks/python/examples/streaming/test_streaming.py new file mode 100644 index 000000000..e61141a18 --- /dev/null +++ b/sdks/python/examples/streaming/test_streaming.py @@ -0,0 +1,47 @@ +import asyncio +from datetime import datetime, timedelta, timezone +from subprocess import Popen +from typing import Any + +import pytest + +from examples.streaming.worker import chunks, stream_task +from hatchet_sdk import Hatchet +from hatchet_sdk.clients.listeners.run_event_listener import ( + StepRunEvent, + StepRunEventType, +) + + +@pytest.mark.parametrize( + "on_demand_worker", + [ + ( + ["poetry", "run", "python", "examples/streaming/worker.py", "--slots", "1"], + 8008, + ) + ], + indirect=True, +) +@pytest.mark.parametrize("execution_number", range(5)) # run test multiple times +@pytest.mark.asyncio(loop_scope="session") +async def test_streaming_ordering_and_completeness( + execution_number: int, + hatchet: Hatchet, + on_demand_worker: Popen[Any], +) -> None: + ref = await stream_task.aio_run_no_wait() + + ix = 0 + anna_karenina = "" + + async for chunk in ref.stream(): + if chunk.type == StepRunEventType.STEP_RUN_EVENT_TYPE_STREAM: + assert chunks[ix] == chunk.payload + ix += 1 + anna_karenina += chunk.payload + + assert ix == len(chunks) + assert anna_karenina == "".join(chunks) + + await ref.aio_result() diff --git a/sdks/python/examples/streaming/worker.py b/sdks/python/examples/streaming/worker.py index aba03bf8e..f9a41d8c2 100644 --- a/sdks/python/examples/streaming/worker.py +++ b/sdks/python/examples/streaming/worker.py @@ -1,23 +1,39 @@ import asyncio +from datetime import datetime, timedelta, timezone +from typing import Generator from hatchet_sdk import Context, EmptyModel, Hatchet -hatchet = Hatchet(debug=True) +hatchet = Hatchet(debug=False) # > Streaming -streaming_workflow = hatchet.workflow(name="StreamingWorkflow") +content = """ +Happy families are all alike; every unhappy family is unhappy in its own way. + +Everything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him. This position of affairs had now lasted three days, and not only the husband and wife themselves, but all the members of their family and household, were painfully conscious of it. Every person in the house felt that there was so sense in their living together, and that the stray people brought together by chance in any inn had more in common with one another than they, the members of the family and household of the Oblonskys. The wife did not leave her own room, the husband had not been at home for three days. The children ran wild all over the house; the English governess quarreled with the housekeeper, and wrote to a friend asking her to look out for a new situation for her; the man-cook had walked off the day before just at dinner time; the kitchen-maid, and the coachman had given warning. +""" -@streaming_workflow.task() -async def step1(input: EmptyModel, ctx: Context) -> None: - for i in range(10): - await asyncio.sleep(1) - ctx.put_stream(f"Processing {i}") +def create_chunks(content: str, n: int) -> Generator[str, None, None]: + for i in range(0, len(content), n): + yield content[i : i + n] + + +chunks = list(create_chunks(content, 10)) + + +@hatchet.task() +async def stream_task(input: EmptyModel, ctx: Context) -> None: + await asyncio.sleep(2) + + for chunk in chunks: + ctx.put_stream(chunk) + await asyncio.sleep(0.05) def main() -> None: - worker = hatchet.worker("test-worker", workflows=[streaming_workflow]) + worker = hatchet.worker("test-worker", workflows=[stream_task]) worker.start() diff --git a/sdks/python/examples/timeout/test_timeout.py b/sdks/python/examples/timeout/test_timeout.py index 1942716f3..1f46e52a1 100644 --- a/sdks/python/examples/timeout/test_timeout.py +++ b/sdks/python/examples/timeout/test_timeout.py @@ -7,7 +7,10 @@ from examples.timeout.worker import refresh_timeout_wf, timeout_wf async def test_execution_timeout() -> None: run = timeout_wf.run_no_wait() - with pytest.raises(Exception, match="(Task exceeded timeout|TIMED_OUT)"): + with pytest.raises( + Exception, + match="(Task exceeded timeout|TIMED_OUT|Workflow run .* failed with multiple errors)", + ): await run.aio_result() diff --git a/sdks/python/examples/waits/test_waits.py b/sdks/python/examples/waits/test_waits.py index f0830fb57..2b91c5fb4 100644 --- a/sdks/python/examples/waits/test_waits.py +++ b/sdks/python/examples/waits/test_waits.py @@ -1,5 +1,4 @@ import asyncio -import os import pytest @@ -7,10 +6,6 @@ from examples.waits.worker import task_condition_workflow from hatchet_sdk import Hatchet -@pytest.mark.skipif( - os.getenv("CI", "false").lower() == "true", - reason="Skipped in CI because of unreliability", -) @pytest.mark.asyncio(loop_scope="session") async def test_waits(hatchet: Hatchet) -> None: diff --git a/sdks/python/hatchet_sdk/__init__.py b/sdks/python/hatchet_sdk/__init__.py index ff0c341cf..9dfb23922 100644 --- a/sdks/python/hatchet_sdk/__init__.py +++ b/sdks/python/hatchet_sdk/__init__.py @@ -1,5 +1,4 @@ from hatchet_sdk.clients.admin import ( - DedupeViolationErr, ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions, ) @@ -138,6 +137,11 @@ from hatchet_sdk.contracts.workflows_pb2 import ( RateLimitDuration, WorkerLabelComparator, ) +from hatchet_sdk.exceptions import ( + DedupeViolationError, + FailedTaskRunExceptionGroup, + TaskRunError, +) from hatchet_sdk.features.runs import BulkCancelReplayOpts, RunFilter from hatchet_sdk.hatchet import Hatchet from hatchet_sdk.runnables.task import Task @@ -162,7 +166,6 @@ from hatchet_sdk.waits import ( from hatchet_sdk.worker.worker import Worker, WorkerStartOptions, WorkerStatus __all__ = [ - "AcceptInviteRequest", "APIError", "APIErrors", "APIMeta", @@ -170,11 +173,24 @@ __all__ = [ "APIMetaIntegration", "APIResourceMeta", "APIToken", + "AcceptInviteRequest", + "BulkCancelReplayOpts", + "ClientConfig", + "ClientTLSConfig", + "ConcurrencyExpression", + "ConcurrencyLimitStrategy", + "Condition", + "Context", "CreateAPITokenRequest", "CreateAPITokenResponse", "CreatePullRequestFromStepRun", "CreateTenantInviteRequest", "CreateTenantRequest", + "CreateWorkflowVersionOpts", + "DedupeViolationError", + "DefaultFilter", + "DurableContext", + "EmptyModel", "Event", "EventData", "EventKeyList", @@ -182,10 +198,12 @@ __all__ = [ "EventOrderByDirection", "EventOrderByField", "EventWorkflowRunSummary", + "FailedTaskRunExceptionGroup", "GetStepRunDiffResponse", "GithubAppInstallation", "GithubBranch", "GithubRepo", + "Hatchet", "Job", "JobRun", "JobRunStatus", @@ -198,15 +216,30 @@ __all__ = [ "LogLineList", "LogLineOrderByDirection", "LogLineOrderByField", + "OTelAttribute", + "OpenTelemetryConfig", + "OrGroup", "PaginationResponse", + "ParentCondition", "PullRequest", "PullRequestState", + "PushEventOptions", + "RateLimitDuration", + "RegisterDurableEventRequest", "RejectInviteRequest", "ReplayEventRequest", "RerunStepRunRequest", + "RunFilter", + "ScheduleTriggerWorkflowOptions", + "SleepCondition", "StepRun", "StepRunDiff", + "StepRunEventType", "StepRunStatus", + "StickyStrategy", + "Task", + "TaskDefaults", + "TaskRunError", "Tenant", "TenantInvite", "TenantInviteList", @@ -214,20 +247,30 @@ __all__ = [ "TenantMember", "TenantMemberList", "TenantMemberRole", + "TriggerWorkflowOptions", "TriggerWorkflowRunRequest", "UpdateTenantInviteRequest", "User", + "UserEventCondition", "UserLoginRequest", "UserRegisterRequest", "UserTenantMembershipsList", "UserTenantPublic", + "V1TaskStatus", "Worker", + "Worker", + "WorkerContext", "WorkerLabelComparator", "WorkerList", + "WorkerStartOptions", + "WorkerStatus", "Workflow", + "Workflow", + "WorkflowConfig", "WorkflowDeploymentConfig", "WorkflowList", "WorkflowRun", + "WorkflowRunEventType", "WorkflowRunList", "WorkflowRunStatus", "WorkflowRunTriggeredBy", @@ -238,43 +281,6 @@ __all__ = [ "WorkflowVersion", "WorkflowVersionDefinition", "WorkflowVersionMeta", - "ConcurrencyLimitStrategy", - "CreateWorkflowVersionOpts", - "RateLimitDuration", - "StickyStrategy", - "DedupeViolationErr", - "ScheduleTriggerWorkflowOptions", - "TriggerWorkflowOptions", - "PushEventOptions", - "StepRunEventType", - "WorkflowRunEventType", - "Context", - "WorkerContext", - "ClientConfig", - "Hatchet", - "workflow", - "Worker", - "WorkerStartOptions", - "WorkerStatus", - "ConcurrencyExpression", - "Workflow", - "WorkflowConfig", - "Task", - "EmptyModel", - "Condition", - "OrGroup", "or_", - "SleepCondition", - "UserEventCondition", - "ParentCondition", - "DurableContext", - "RegisterDurableEventRequest", - "TaskDefaults", - "BulkCancelReplayOpts", - "RunFilter", - "V1TaskStatus", - "OTelAttribute", - "OpenTelemetryConfig", - "ClientTLSConfig", - "DefaultFilter", + "workflow", ] diff --git a/sdks/python/hatchet_sdk/clients/admin.py b/sdks/python/hatchet_sdk/clients/admin.py index f97525f73..dcc2e8606 100644 --- a/sdks/python/hatchet_sdk/clients/admin.py +++ b/sdks/python/hatchet_sdk/clients/admin.py @@ -1,7 +1,8 @@ import asyncio import json +from collections.abc import Generator from datetime import datetime -from typing import Generator, TypeVar, Union, cast +from typing import TypeVar, cast import grpc from google.protobuf import timestamp_pb2 @@ -16,6 +17,7 @@ from hatchet_sdk.contracts import workflows_pb2 as v0_workflow_protos from hatchet_sdk.contracts.v1 import workflows_pb2 as workflow_protos from hatchet_sdk.contracts.v1.workflows_pb2_grpc import AdminServiceStub from hatchet_sdk.contracts.workflows_pb2_grpc import WorkflowServiceStub +from hatchet_sdk.exceptions import DedupeViolationError from hatchet_sdk.features.runs import RunsClient from hatchet_sdk.metadata import get_metadata from hatchet_sdk.rate_limit import RateLimitDuration @@ -59,12 +61,6 @@ class WorkflowRunTriggerConfig(BaseModel): key: str | None = None -class DedupeViolationErr(Exception): - """Raised by the Hatchet library to indicate that a workflow has already been run with this deduplication value.""" - - pass - - class AdminClient: def __init__( self, @@ -113,7 +109,7 @@ class AdminClient: try: return json.dumps(v).encode("utf-8") except json.JSONDecodeError as e: - raise ValueError(f"Error encoding payload: {e}") + raise ValueError("Error encoding payload") from e def _prepare_workflow_request( self, @@ -124,7 +120,7 @@ class AdminClient: try: payload_data = json.dumps(input) except json.JSONDecodeError as e: - raise ValueError(f"Error encoding payload: {e}") + raise ValueError("Error encoding payload") from e _options = self.TriggerWorkflowRequest.model_validate(options.model_dump()) @@ -148,18 +144,17 @@ class AdminClient: seconds = int(t) nanos = int(t % 1 * 1e9) return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) - elif isinstance(schedule, timestamp_pb2.Timestamp): + if isinstance(schedule, timestamp_pb2.Timestamp): return schedule - else: - raise ValueError( - "Invalid schedule type. Must be datetime or timestamp_pb2.Timestamp." - ) + raise ValueError( + "Invalid schedule type. Must be datetime or timestamp_pb2.Timestamp." + ) def _prepare_schedule_workflow_request( self, name: str, - schedules: list[Union[datetime, timestamp_pb2.Timestamp]], - input: JSONSerializableMapping = {}, + schedules: list[datetime | timestamp_pb2.Timestamp], + input: JSONSerializableMapping | None = None, options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(), ) -> v0_workflow_protos.ScheduleWorkflowRequest: return v0_workflow_protos.ScheduleWorkflowRequest( @@ -194,8 +189,8 @@ class AdminClient: async def aio_schedule_workflow( self, name: str, - schedules: list[Union[datetime, timestamp_pb2.Timestamp]], - input: JSONSerializableMapping = {}, + schedules: list[datetime | timestamp_pb2.Timestamp], + input: JSONSerializableMapping | None = None, options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(), ) -> v0_workflow_protos.WorkflowVersion: return await asyncio.to_thread( @@ -245,8 +240,8 @@ class AdminClient: def schedule_workflow( self, name: str, - schedules: list[Union[datetime, timestamp_pb2.Timestamp]], - input: JSONSerializableMapping = {}, + schedules: list[datetime | timestamp_pb2.Timestamp], + input: JSONSerializableMapping | None = None, options: ScheduleTriggerWorkflowOptions = ScheduleTriggerWorkflowOptions(), ) -> v0_workflow_protos.WorkflowVersion: try: @@ -269,7 +264,7 @@ class AdminClient: ) except (grpc.RpcError, grpc.aio.AioRpcError) as e: if e.code() == grpc.StatusCode.ALREADY_EXISTS: - raise DedupeViolationErr(e.details()) + raise DedupeViolationError(e.details()) from e raise e @@ -336,7 +331,7 @@ class AdminClient: ) except (grpc.RpcError, grpc.aio.AioRpcError) as e: if e.code() == grpc.StatusCode.ALREADY_EXISTS: - raise DedupeViolationErr(e.details()) + raise DedupeViolationError(e.details()) from e raise e return WorkflowRunRef( @@ -369,7 +364,7 @@ class AdminClient: ) except (grpc.RpcError, grpc.aio.AioRpcError) as e: if e.code() == grpc.StatusCode.ALREADY_EXISTS: - raise DedupeViolationErr(e.details()) + raise DedupeViolationError(e.details()) from e raise e diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py index f78291c81..854d903c2 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/action_listener.py @@ -1,7 +1,8 @@ import asyncio import json import time -from typing import TYPE_CHECKING, AsyncGenerator, cast +from collections.abc import AsyncGenerator +from typing import TYPE_CHECKING, cast import grpc import grpc.aio @@ -302,7 +303,7 @@ class ActionListener: ) self.run_heartbeat = False raise Exception("retry_exhausted") - elif self.retries >= 1: + if self.retries >= 1: # logger.info # if we are retrying, we wait for a bit. this should eventually be replaced with exp backoff + jitter await exp_backoff_sleep( @@ -369,4 +370,4 @@ class ActionListener: return cast(WorkerUnsubscribeRequest, req) except grpc.RpcError as e: - raise Exception(f"Failed to unsubscribe: {e}") + raise Exception("Failed to unsubscribe") from e diff --git a/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py b/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py index 1bae1dd52..0a5744fc4 100644 --- a/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py +++ b/sdks/python/hatchet_sdk/clients/dispatcher/dispatcher.py @@ -93,10 +93,7 @@ class DispatcherClient: ) except Exception as e: # for step action events, send a failure event when we cannot send the completed event - if ( - event_type == STEP_EVENT_TYPE_COMPLETED - or event_type == STEP_EVENT_TYPE_FAILED - ): + if event_type in (STEP_EVENT_TYPE_COMPLETED, STEP_EVENT_TYPE_FAILED): await self._try_send_step_action_event( action, STEP_EVENT_TYPE_FAILED, diff --git a/sdks/python/hatchet_sdk/clients/event_ts.py b/sdks/python/hatchet_sdk/clients/event_ts.py index c5dc556ba..ab9a1aecd 100644 --- a/sdks/python/hatchet_sdk/clients/event_ts.py +++ b/sdks/python/hatchet_sdk/clients/event_ts.py @@ -1,5 +1,6 @@ import asyncio -from typing import Callable, Generic, TypeVar, cast, overload +from collections.abc import Callable +from typing import Generic, TypeVar, cast, overload import grpc.aio from grpc._cython import cygrpc # type: ignore[attr-defined] diff --git a/sdks/python/hatchet_sdk/clients/events.py b/sdks/python/hatchet_sdk/clients/events.py index e9b062e3f..6ae4c24f4 100644 --- a/sdks/python/hatchet_sdk/clients/events.py +++ b/sdks/python/hatchet_sdk/clients/events.py @@ -1,7 +1,7 @@ import asyncio import datetime import json -from typing import List, cast +from typing import cast from google.protobuf import timestamp_pb2 from pydantic import BaseModel, Field @@ -88,7 +88,7 @@ class EventClient(BaseRestClient): self, events: list[BulkPushEventWithMetadata], options: BulkPushEventOptions = BulkPushEventOptions(), - ) -> List[Event]: + ) -> list[Event]: return await asyncio.to_thread(self.bulk_push, events=events, options=options) ## IMPORTANT: Keep this method's signature in sync with the wrapper in the OTel instrumentor @@ -105,12 +105,12 @@ class EventClient(BaseRestClient): try: meta_bytes = json.dumps(options.additional_metadata) except Exception as e: - raise ValueError(f"Error encoding meta: {e}") + raise ValueError("Error encoding meta") from e try: payload_str = json.dumps(payload) except (TypeError, ValueError) as e: - raise ValueError(f"Error encoding payload: {e}") + raise ValueError("Error encoding payload") from e request = PushEventRequest( key=namespaced_event_key, @@ -139,12 +139,12 @@ class EventClient(BaseRestClient): try: meta_str = json.dumps(meta) except Exception as e: - raise ValueError(f"Error encoding meta: {e}") + raise ValueError("Error encoding meta") from e try: serialized_payload = json.dumps(payload) except (TypeError, ValueError) as e: - raise ValueError(f"Error serializing payload: {e}") + raise ValueError("Error serializing payload") from e return PushEventRequest( key=event_key, @@ -159,9 +159,9 @@ class EventClient(BaseRestClient): @tenacity_retry def bulk_push( self, - events: List[BulkPushEventWithMetadata], + events: list[BulkPushEventWithMetadata], options: BulkPushEventOptions = BulkPushEventOptions(), - ) -> List[Event]: + ) -> list[Event]: namespace = options.namespace or self.namespace bulk_request = BulkPushEventRequest( @@ -190,7 +190,7 @@ class EventClient(BaseRestClient): self.events_service_client.PutLog(request, metadata=get_metadata(self.token)) @tenacity_retry - def stream(self, data: str | bytes, step_run_id: str) -> None: + def stream(self, data: str | bytes, step_run_id: str, index: int) -> None: if isinstance(data, str): data_bytes = data.encode("utf-8") elif isinstance(data, bytes): @@ -202,11 +202,15 @@ class EventClient(BaseRestClient): stepRunId=step_run_id, createdAt=proto_timestamp_now(), message=data_bytes, + eventIndex=index, ) - self.events_service_client.PutStreamEvent( - request, metadata=get_metadata(self.token) - ) + try: + self.events_service_client.PutStreamEvent( + request, metadata=get_metadata(self.token) + ) + except Exception: + raise async def aio_list( self, diff --git a/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py b/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py index 871df35dd..421c12e6b 100644 --- a/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py +++ b/sdks/python/hatchet_sdk/clients/listeners/durable_event_listener.py @@ -8,6 +8,7 @@ from pydantic import BaseModel, ConfigDict from hatchet_sdk.clients.listeners.pooled_listener import PooledListener from hatchet_sdk.clients.rest.tenacity_utils import tenacity_retry +from hatchet_sdk.config import ClientConfig from hatchet_sdk.connection import new_conn from hatchet_sdk.contracts.v1.dispatcher_pb2 import ( DurableEvent, @@ -32,6 +33,7 @@ class RegisterDurableEventRequest(BaseModel): task_id: str signal_key: str conditions: list[SleepCondition | UserEventCondition] + config: ClientConfig def to_proto(self) -> RegisterDurableEventRequestProto: return RegisterDurableEventRequestProto( @@ -39,12 +41,12 @@ class RegisterDurableEventRequest(BaseModel): signal_key=self.signal_key, conditions=DurableEventListenerConditions( sleep_conditions=[ - c.to_proto() + c.to_proto(self.config) for c in self.conditions if isinstance(c, SleepCondition) ], user_event_conditions=[ - c.to_proto() + c.to_proto(self.config) for c in self.conditions if isinstance(c, UserEventCondition) ], diff --git a/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py b/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py index 55df16512..69d3fa0eb 100644 --- a/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py +++ b/sdks/python/hatchet_sdk/clients/listeners/pooled_listener.py @@ -252,10 +252,10 @@ class PooledListener(Generic[R, T, L], ABC): metadata=get_metadata(self.token), ) - except grpc.RpcError as e: + except grpc.RpcError as e: # noqa: PERF203 if e.code() == grpc.StatusCode.UNAVAILABLE: retries = retries + 1 else: - raise ValueError(f"gRPC error: {e}") + raise ValueError("gRPC error") from e raise ValueError("Failed to connect to listener") diff --git a/sdks/python/hatchet_sdk/clients/listeners/run_event_listener.py b/sdks/python/hatchet_sdk/clients/listeners/run_event_listener.py index 1b3380136..3cbd4cc10 100644 --- a/sdks/python/hatchet_sdk/clients/listeners/run_event_listener.py +++ b/sdks/python/hatchet_sdk/clients/listeners/run_event_listener.py @@ -1,8 +1,9 @@ import asyncio +from collections.abc import AsyncGenerator, Callable, Generator from enum import Enum from queue import Empty, Queue from threading import Thread -from typing import Any, AsyncGenerator, Callable, Generator, Literal, TypeVar, cast +from typing import Any, Literal, TypeVar, cast import grpc from pydantic import BaseModel @@ -129,8 +130,7 @@ class RunEventListener: thread.join() def __iter__(self) -> Generator[StepRunEvent, None, None]: - for item in self.async_to_sync_thread(self.__aiter__()): - yield item + yield from self.async_to_sync_thread(self.__aiter__()) async def _generator(self) -> AsyncGenerator[StepRunEvent, None]: while True: @@ -216,7 +216,7 @@ class RunEventListener: metadata=get_metadata(self.config.token), ), ) - elif self.additional_meta_kv is not None: + if self.additional_meta_kv is not None: return cast( AsyncGenerator[WorkflowEvent, None], self.client.SubscribeToWorkflowEvents( @@ -227,14 +227,13 @@ class RunEventListener: metadata=get_metadata(self.config.token), ), ) - else: - raise Exception("no listener method provided") + raise Exception("no listener method provided") - except grpc.RpcError as e: + except grpc.RpcError as e: # noqa: PERF203 if e.code() == grpc.StatusCode.UNAVAILABLE: retries = retries + 1 else: - raise ValueError(f"gRPC error: {e}") + raise ValueError("gRPC error") from e raise Exception("Failed to subscribe to workflow events") diff --git a/sdks/python/hatchet_sdk/clients/listeners/workflow_listener.py b/sdks/python/hatchet_sdk/clients/listeners/workflow_listener.py index a08a22fd0..0ea995b0e 100644 --- a/sdks/python/hatchet_sdk/clients/listeners/workflow_listener.py +++ b/sdks/python/hatchet_sdk/clients/listeners/workflow_listener.py @@ -1,5 +1,6 @@ import json -from typing import Any, AsyncIterator, cast +from collections.abc import AsyncIterator +from typing import Any, cast import grpc import grpc.aio @@ -11,6 +12,11 @@ from hatchet_sdk.contracts.dispatcher_pb2 import ( WorkflowRunEvent, ) from hatchet_sdk.contracts.dispatcher_pb2_grpc import DispatcherStub +from hatchet_sdk.exceptions import ( + DedupeViolationError, + FailedTaskRunExceptionGroup, + TaskRunError, +) DEDUPE_MESSAGE = "DUPLICATE_WORKFLOW_RUN" @@ -27,16 +33,18 @@ class PooledWorkflowRunListener( return response.workflowRunId async def aio_result(self, id: str) -> dict[str, Any]: - from hatchet_sdk.clients.admin import DedupeViolationErr - event = await self.subscribe(id) errors = [result.error for result in event.results if result.error] + workflow_run_id = event.workflowRunId if errors: if DEDUPE_MESSAGE in errors[0]: - raise DedupeViolationErr(errors[0]) - else: - raise Exception(f"Workflow Errors: {errors}") + raise DedupeViolationError(errors[0]) + + raise FailedTaskRunExceptionGroup( + f"Workflow run {workflow_run_id} failed.", + [TaskRunError.deserialize(e) for e in errors], + ) return { result.stepReadableId: json.loads(result.output) diff --git a/sdks/python/hatchet_sdk/clients/rest/api_response.py b/sdks/python/hatchet_sdk/clients/rest/api_response.py index ca801da0b..7cec36058 100644 --- a/sdks/python/hatchet_sdk/clients/rest/api_response.py +++ b/sdks/python/hatchet_sdk/clients/rest/api_response.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing import Generic, Mapping, Optional, TypeVar +from collections.abc import Mapping +from typing import Generic, TypeVar from pydantic import BaseModel, Field, StrictBytes, StrictInt @@ -15,7 +16,7 @@ class ApiResponse(BaseModel, Generic[T]): """ status_code: StrictInt = Field(description="HTTP status code") - headers: Optional[Mapping[str, str]] = Field(None, description="HTTP headers") + headers: Mapping[str, str] | None = Field(None, description="HTTP headers") data: T = Field(description="Deserialized data given the data type") raw_data: StrictBytes = Field(description="Raw data (HTTP response body)") diff --git a/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py b/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py index c90f73526..4b687b0cb 100644 --- a/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py +++ b/sdks/python/hatchet_sdk/clients/rest/tenacity_utils.py @@ -1,4 +1,5 @@ -from typing import Callable, ParamSpec, TypeVar +from collections.abc import Callable +from typing import ParamSpec, TypeVar import grpc import tenacity @@ -28,12 +29,9 @@ def tenacity_alert_retry(retry_state: tenacity.RetryCallState) -> None: def tenacity_should_retry(ex: BaseException) -> bool: - if isinstance(ex, (grpc.aio.AioRpcError, grpc.RpcError)): - if ex.code() in [ + if isinstance(ex, grpc.aio.AioRpcError | grpc.RpcError): + return ex.code() not in [ grpc.StatusCode.UNIMPLEMENTED, grpc.StatusCode.NOT_FOUND, - ]: - return False - return True - else: - return False + ] + return False diff --git a/sdks/python/hatchet_sdk/config.py b/sdks/python/hatchet_sdk/config.py index 1a70abb90..cefa65a57 100644 --- a/sdks/python/hatchet_sdk/config.py +++ b/sdks/python/hatchet_sdk/config.py @@ -82,6 +82,8 @@ class ClientConfig(BaseSettings): enable_force_kill_sync_threads: bool = False enable_thread_pool_monitoring: bool = False + terminate_worker_after_num_tasks: int | None = None + @model_validator(mode="after") def validate_token_and_tenant(self) -> "ClientConfig": if not self.token: diff --git a/sdks/python/hatchet_sdk/connection.py b/sdks/python/hatchet_sdk/connection.py index 74fe46c80..a250bf97f 100644 --- a/sdks/python/hatchet_sdk/connection.py +++ b/sdks/python/hatchet_sdk/connection.py @@ -22,7 +22,8 @@ def new_conn(config: ClientConfig, aio: bool) -> grpc.Channel | grpc.aio.Channel root: bytes | None = None if config.tls_config.root_ca_file: - root = open(config.tls_config.root_ca_file, "rb").read() + with open(config.tls_config.root_ca_file, "rb") as f: + root = f.read() credentials = grpc.ssl_channel_credentials(root_certificates=root) elif config.tls_config.strategy == "mtls": @@ -30,9 +31,14 @@ def new_conn(config: ClientConfig, aio: bool) -> grpc.Channel | grpc.aio.Channel assert config.tls_config.key_file assert config.tls_config.cert_file - root = open(config.tls_config.root_ca_file, "rb").read() - private_key = open(config.tls_config.key_file, "rb").read() - certificate_chain = open(config.tls_config.cert_file, "rb").read() + with open(config.tls_config.root_ca_file, "rb") as f: + root = f.read() + + with open(config.tls_config.key_file, "rb") as f: + private_key = f.read() + + with open(config.tls_config.cert_file, "rb") as f: + certificate_chain = f.read() credentials = grpc.ssl_channel_credentials( root_certificates=root, diff --git a/sdks/python/hatchet_sdk/context/context.py b/sdks/python/hatchet_sdk/context/context.py index 7a2ba8636..b4a74cbe6 100644 --- a/sdks/python/hatchet_sdk/context/context.py +++ b/sdks/python/hatchet_sdk/context/context.py @@ -1,6 +1,5 @@ +import asyncio import json -import traceback -from concurrent.futures import Future, ThreadPoolExecutor from datetime import timedelta from typing import TYPE_CHECKING, Any, cast from warnings import warn @@ -21,6 +20,7 @@ from hatchet_sdk.logger import logger from hatchet_sdk.utils.timedelta_to_expression import Duration, timedelta_to_expr from hatchet_sdk.utils.typing import JSONSerializableMapping from hatchet_sdk.waits import SleepCondition, UserEventCondition +from hatchet_sdk.worker.runner.utils.capture_logs import AsyncLogSender, LogRecord if TYPE_CHECKING: from hatchet_sdk.runnables.task import Task @@ -38,6 +38,7 @@ class Context: worker: WorkerContext, runs_client: RunsClient, lifespan_context: Any | None, + log_sender: AsyncLogSender, ): self.worker = worker @@ -53,24 +54,41 @@ class Context: self.runs_client = runs_client self.durable_event_listener = durable_event_listener - # FIXME: this limits the number of concurrent log requests to 1, which means we can do about - # 100 log lines per second but this depends on network. - self.logger_thread_pool = ThreadPoolExecutor(max_workers=1) - self.stream_event_thread_pool = ThreadPoolExecutor(max_workers=1) - self.input = self.data.input self.filter_payload = self.data.filter_payload + self.log_sender = log_sender self._lifespan_context = lifespan_context + self.stream_index = 0 + + def _increment_stream_index(self) -> int: + index = self.stream_index + self.stream_index += 1 + + return index + def was_skipped(self, task: "Task[TWorkflowInput, R]") -> bool: - return self.data.parents.get(task.name, {}).get("skipped", False) + """ + Check if a given task was skipped. You can read about skipping in [the docs](https://docs.hatchet.run/home/conditional-workflows#skip_if). + + :param task: The task to check the status of (skipped or not). + :return: True if the task was skipped, False otherwise. + """ + return self.data.parents.get(task.name, {}).get("skipped", False) is True @property def trigger_data(self) -> JSONSerializableMapping: return self.data.triggers def task_output(self, task: "Task[TWorkflowInput, R]") -> "R": + """ + Get the output of a parent task in a DAG. + + :param task: The task whose output you want to retrieve. + :return: The output of the parent task, validated against the task's validators. + :raises ValueError: If the task was skipped or if the step output for the task is not found. + """ from hatchet_sdk.runnables.types import R if self.was_skipped(task): @@ -78,8 +96,8 @@ class Context: try: parent_step_data = cast(R, self.data.parents[task.name]) - except KeyError: - raise ValueError(f"Step output for '{task.name}' not found") + except KeyError as e: + raise ValueError(f"Step output for '{task.name}' not found") from e if parent_step_data and (v := task.validators.step_output): return cast(R, v.model_validate(parent_step_data)) @@ -90,6 +108,7 @@ class Context: warn( "`aio_task_output` is deprecated. Use `task_output` instead.", DeprecationWarning, + stacklevel=2, ) if task.is_async_function: @@ -101,48 +120,82 @@ class Context: @property def was_triggered_by_event(self) -> bool: + """ + A property that indicates whether the workflow was triggered by an event. + + :return: True if the workflow was triggered by an event, False otherwise. + """ return self.data.triggered_by == "event" @property def workflow_input(self) -> JSONSerializableMapping: + """ + The input to the workflow, as a dictionary. It's recommended to use the `input` parameter to the task (the first argument passed into the task at runtime) instead of this property. + + :return: The input to the workflow. + """ return self.input @property def lifespan(self) -> Any: + """ + The worker lifespan, if it exists. You can read about lifespans in [the docs](https://docs.hatchet.run/home/lifespans). + + **Note: You'll need to cast the return type of this property to the type returned by your lifespan generator.** + """ return self._lifespan_context @property def workflow_run_id(self) -> str: + """ + The id of the current workflow run. + + :return: The id of the current workflow run. + """ return self.action.workflow_run_id def _set_cancellation_flag(self) -> None: self.exit_flag = True def cancel(self) -> None: + """ + Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True. + + :return: None + """ logger.debug("cancelling step...") self.runs_client.cancel(self.step_run_id) self._set_cancellation_flag() async def aio_cancel(self) -> None: + """ + Cancel the current task run. This will call the Hatchet API to cancel the step run and set the exit flag to True. + + :return: None + """ logger.debug("cancelling step...") await self.runs_client.aio_cancel(self.step_run_id) self._set_cancellation_flag() - # done returns true if the context has been cancelled def done(self) -> bool: - return self.exit_flag + """ + Check if the current task run has been cancelled. - def _log(self, line: str) -> tuple[bool, Exception | None]: - try: - self.event_client.log(message=line, step_run_id=self.step_run_id) - return True, None - except Exception as e: - # we don't want to raise an exception here, as it will kill the log thread - return False, e + :return: True if the task run has been cancelled, False otherwise. + """ + return self.exit_flag def log( self, line: str | JSONSerializableMapping, raise_on_error: bool = False ) -> None: + """ + Log a line to the Hatchet API. This will send the log line to the Hatchet API and return immediately. + + :param line: The line to log. Can be a string or a JSON serializable mapping. + :param raise_on_error: If True, will raise an exception if the log fails. Defaults to False. + :return: None + """ + if self.step_run_id == "": return @@ -152,43 +205,51 @@ class Context: except Exception: line = str(line) - future = self.logger_thread_pool.submit(self._log, line) - - def handle_result(future: Future[tuple[bool, Exception | None]]) -> None: - success, exception = future.result() - - if not success and exception: - if raise_on_error: - raise exception - else: - thread_trace = "".join( - traceback.format_exception( - type(exception), exception, exception.__traceback__ - ) - ) - call_site_trace = "".join(traceback.format_stack()) - logger.error( - f"Error in log thread: {exception}\n{thread_trace}\nCalled from:\n{call_site_trace}" - ) - - future.add_done_callback(handle_result) + logger.info(line) + self.log_sender.publish(LogRecord(message=line, step_run_id=self.step_run_id)) def release_slot(self) -> None: + """ + Manually release the slot for the current step run to free up a slot on the worker. Note that this is an advanced feature and should be used with caution. + + :return: None + """ return self.dispatcher_client.release_slot(self.step_run_id) - def _put_stream(self, data: str | bytes) -> None: + def put_stream(self, data: str | bytes) -> None: + """ + Put a stream event to the Hatchet API. This will send the data to the Hatchet API and return immediately. You can then subscribe to the stream from a separate consumer. + + :param data: The data to send to the Hatchet API. Can be a string or bytes. + :return: None + """ try: - self.event_client.stream(data=data, step_run_id=self.step_run_id) + ix = self._increment_stream_index() + + self.event_client.stream( + data=data, + step_run_id=self.step_run_id, + index=ix, + ) except Exception as e: logger.error(f"Error putting stream event: {e}") - def put_stream(self, data: str | bytes) -> None: - if self.step_run_id == "": - return + async def aio_put_stream(self, data: str | bytes) -> None: + """ + Put a stream event to the Hatchet API. This will send the data to the Hatchet API and return immediately. You can then subscribe to the stream from a separate consumer. - self.stream_event_thread_pool.submit(self._put_stream, data) + :param data: The data to send to the Hatchet API. Can be a string or bytes. + :return: None + """ + await asyncio.to_thread(self.put_stream, data) def refresh_timeout(self, increment_by: str | timedelta) -> None: + """ + Refresh the timeout for the current task run. You can read about refreshing timeouts in [the docs](https://docs.hatchet.run/home/timeouts#refreshing-timeouts). + + :param increment_by: The amount of time to increment the timeout by. Can be a string (e.g. "5m") or a timedelta object. + :return: None + """ if isinstance(increment_by, timedelta): increment_by = timedelta_to_expr(increment_by) @@ -201,10 +262,30 @@ class Context: @property def retry_count(self) -> int: + """ + The retry count of the current task run, which corresponds to the number of times the task has been retried. + + :return: The retry count of the current task run. + """ return self.action.retry_count + @property + def attempt_number(self) -> int: + """ + The attempt number of the current task run, which corresponds to the number of times the task has been attempted, including the initial attempt. This is one more than the retry count. + + :return: The attempt number of the current task run. + """ + + return self.retry_count + 1 + @property def additional_metadata(self) -> JSONSerializableMapping | None: + """ + The additional metadata sent with the current task run. + + :return: The additional metadata sent with the current task run, or None if no additional metadata was sent. + """ return self.action.additional_metadata @property @@ -217,27 +298,54 @@ class Context: @property def parent_workflow_run_id(self) -> str | None: + """ + The parent workflow run id of the current task run, if it exists. This is useful for knowing which workflow run spawned this run as a child. + + :return: The parent workflow run id of the current task run, or None if it does not exist. + """ return self.action.parent_workflow_run_id @property def priority(self) -> int | None: + """ + The priority that the current task was run with. + + :return: The priority of the current task run, or None if no priority was set. + """ return self.action.priority @property def workflow_id(self) -> str | None: + """ + The id of the workflow that this task belongs to. + + :return: The id of the workflow that this task belongs to. + """ + return self.action.workflow_id @property def workflow_version_id(self) -> str | None: + """ + The id of the workflow version that this task belongs to. + + :return: The id of the workflow version that this task belongs to. + """ + return self.action.workflow_version_id @property def task_run_errors(self) -> dict[str, str]: + """ + A helper intended to be used in an on-failure step to retrieve the errors that occurred in upstream task runs. + + :return: A dictionary mapping task names to their error messages. + """ errors = self.data.step_run_errors if not errors: logger.error( - "No step run errors found. `context.step_run_errors` is intended to be run in an on-failure step, and will only work on engine versions more recent than v0.53.10" + "No step run errors found. `context.task_run_errors` is intended to be run in an on-failure step, and will only work on engine versions more recent than v0.53.10" ) return errors @@ -246,6 +354,12 @@ class Context: self, task: "Task[TWorkflowInput, R]", ) -> str | None: + """ + A helper intended to be used in an on-failure step to retrieve the error that occurred in a specific upstream task run. + + :param task: The task whose error you want to retrieve. + :return: The error message of the task run, or None if no error occurred. + """ errors = self.data.step_run_errors return errors.get(task.name) @@ -255,6 +369,15 @@ class DurableContext(Context): async def aio_wait_for( self, signal_key: str, *conditions: SleepCondition | UserEventCondition ) -> dict[str, Any]: + """ + Durably wait for either a sleep or an event. + + :param signal_key: The key to use for the durable event. This is used to identify the event in the Hatchet API. + :param *conditions: The conditions to wait for. Can be a SleepCondition or UserEventCondition. + + :return: A dictionary containing the results of the wait. + :raises ValueError: If the durable event listener is not available. + """ if self.durable_event_listener is None: raise ValueError("Durable event listener is not available") @@ -264,6 +387,7 @@ class DurableContext(Context): task_id=task_id, signal_key=signal_key, conditions=list(conditions), + config=self.runs_client.client_config, ) self.durable_event_listener.register_durable_event(request) diff --git a/sdks/python/hatchet_sdk/context/worker_context.py b/sdks/python/hatchet_sdk/context/worker_context.py index 770ae0975..7bbbca690 100644 --- a/sdks/python/hatchet_sdk/context/worker_context.py +++ b/sdks/python/hatchet_sdk/context/worker_context.py @@ -2,11 +2,11 @@ from hatchet_sdk.clients.dispatcher.dispatcher import DispatcherClient class WorkerContext: - _worker_id: str | None = None - _registered_workflow_names: list[str] = [] - _labels: dict[str, str | int] = {} - def __init__(self, labels: dict[str, str | int], client: DispatcherClient): + self._worker_id: str | None = None + self._registered_workflow_names: list[str] = [] + self._labels: dict[str, str | int] = {} + self._labels = labels self.client = client @@ -23,6 +23,3 @@ class WorkerContext: def id(self) -> str | None: return self._worker_id - - # def has_workflow(self, workflow_name: str): - # return workflow_name in self._registered_workflow_names diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py index b8e9acd35..e4f96b907 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.py @@ -15,7 +15,7 @@ _sym_db = _symbol_database.Default() from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64ispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"V\n\x0cWorkerLabels\x12\x15\n\x08strValue\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08intValue\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0b\n\t_strValueB\x0b\n\t_intValue\"\xc8\x01\n\x0bRuntimeInfo\x12\x17\n\nsdkVersion\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1c\n\x0flanguageVersion\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_sdkVersionB\x0b\n\t_languageB\x12\n\x10_languageVersionB\x05\n\x03_osB\x08\n\x06_extra\"\xc0\x02\n\x15WorkerRegisterRequest\x12\x12\n\nworkerName\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x14\n\x07maxRuns\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x16\n\twebhookId\x18\x06 \x01(\tH\x01\x88\x01\x01\x12&\n\x0bruntimeInfo\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x42\n\n\x08_maxRunsB\x0c\n\n_webhookIdB\x0e\n\x0c_runtimeInfo\"P\n\x16WorkerRegisterResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\x12\x12\n\nworkerName\x18\x03 \x01(\t\"\xa3\x01\n\x19UpsertWorkerLabelsRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"@\n\x1aUpsertWorkerLabelsResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\"\xf6\x04\n\x0e\x41ssignedAction\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x15\n\rworkflowRunId\x18\x02 \x01(\t\x12\x18\n\x10getGroupKeyRunId\x18\x03 \x01(\t\x12\r\n\x05jobId\x18\x04 \x01(\t\x12\x0f\n\x07jobName\x18\x05 \x01(\t\x12\x10\n\x08jobRunId\x18\x06 \x01(\t\x12\x0e\n\x06stepId\x18\x07 \x01(\t\x12\x11\n\tstepRunId\x18\x08 \x01(\t\x12\x10\n\x08\x61\x63tionId\x18\t \x01(\t\x12\x1f\n\nactionType\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x15\n\ractionPayload\x18\x0b \x01(\t\x12\x10\n\x08stepName\x18\x0c \x01(\t\x12\x12\n\nretryCount\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x17\n\nworkflowId\x18\x13 \x01(\tH\x04\x88\x01\x01\x12\x1e\n\x11workflowVersionId\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\r\n\x0b_workflowIdB\x14\n\x12_workflowVersionId\"\'\n\x13WorkerListenRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\",\n\x18WorkerUnsubscribeRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\"?\n\x19WorkerUnsubscribeResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\"\xe1\x01\n\x13GroupKeyActionEvent\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12\x15\n\rworkflowRunId\x18\x02 \x01(\t\x12\x18\n\x10getGroupKeyRunId\x18\x03 \x01(\t\x12\x10\n\x08\x61\x63tionId\x18\x04 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\teventType\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x14\n\x0c\x65ventPayload\x18\x07 \x01(\t\"\xc4\x02\n\x0fStepActionEvent\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12\r\n\x05jobId\x18\x02 \x01(\t\x12\x10\n\x08jobRunId\x18\x03 \x01(\t\x12\x0e\n\x06stepId\x18\x04 \x01(\t\x12\x11\n\tstepRunId\x18\x05 \x01(\t\x12\x10\n\x08\x61\x63tionId\x18\x06 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\teventType\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x14\n\x0c\x65ventPayload\x18\t \x01(\t\x12\x17\n\nretryCount\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1b\n\x0eshouldNotRetry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_retryCountB\x11\n\x0f_shouldNotRetry\"9\n\x13\x41\x63tionEventResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\"\xc0\x01\n SubscribeToWorkflowEventsRequest\x12\x1a\n\rworkflowRunId\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x61\x64\x64itionalMetaKey\x18\x02 \x01(\tH\x01\x88\x01\x01\x12 \n\x13\x61\x64\x64itionalMetaValue\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x10\n\x0e_workflowRunIdB\x14\n\x12_additionalMetaKeyB\x16\n\x14_additionalMetaValue\"7\n\x1eSubscribeToWorkflowRunsRequest\x12\x15\n\rworkflowRunId\x18\x01 \x01(\t\"\xb2\x02\n\rWorkflowEvent\x12\x15\n\rworkflowRunId\x18\x01 \x01(\t\x12#\n\x0cresourceType\x18\x02 \x01(\x0e\x32\r.ResourceType\x12%\n\teventType\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x12\n\nresourceId\x18\x04 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0c\x65ventPayload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x18\n\x0bstepRetries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x17\n\nretryCount\x18\t \x01(\x05H\x01\x88\x01\x01\x42\x0e\n\x0c_stepRetriesB\r\n\x0b_retryCount\"\xa8\x01\n\x10WorkflowRunEvent\x12\x15\n\rworkflowRunId\x18\x01 \x01(\t\x12(\n\teventType\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x32\n\x0e\x65ventTimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x8a\x01\n\rStepRunResult\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12\x16\n\x0estepReadableId\x18\x02 \x01(\t\x12\x10\n\x08jobRunId\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"W\n\rOverridesData\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x16\n\x0e\x63\x61llerFilename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"U\n\x10HeartbeatRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12/\n\x0bheartbeatAt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"F\n\x15RefreshTimeoutRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12\x1a\n\x12incrementTimeoutBy\x18\x02 \x01(\t\"G\n\x16RefreshTimeoutResponse\x12-\n\ttimeoutAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\'\n\x12ReleaseSlotRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse*7\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xf8\x06\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64ispatcher.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"V\n\x0cWorkerLabels\x12\x15\n\x08strValue\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08intValue\x18\x02 \x01(\x05H\x01\x88\x01\x01\x42\x0b\n\t_strValueB\x0b\n\t_intValue\"\xc8\x01\n\x0bRuntimeInfo\x12\x17\n\nsdkVersion\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1c\n\x08language\x18\x02 \x01(\x0e\x32\x05.SDKSH\x01\x88\x01\x01\x12\x1c\n\x0flanguageVersion\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x0f\n\x02os\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x12\n\x05\x65xtra\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_sdkVersionB\x0b\n\t_languageB\x12\n\x10_languageVersionB\x05\n\x03_osB\x08\n\x06_extra\"\xc0\x02\n\x15WorkerRegisterRequest\x12\x12\n\nworkerName\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63tions\x18\x02 \x03(\t\x12\x10\n\x08services\x18\x03 \x03(\t\x12\x14\n\x07maxRuns\x18\x04 \x01(\x05H\x00\x88\x01\x01\x12\x32\n\x06labels\x18\x05 \x03(\x0b\x32\".WorkerRegisterRequest.LabelsEntry\x12\x16\n\twebhookId\x18\x06 \x01(\tH\x01\x88\x01\x01\x12&\n\x0bruntimeInfo\x18\x07 \x01(\x0b\x32\x0c.RuntimeInfoH\x02\x88\x01\x01\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\x42\n\n\x08_maxRunsB\x0c\n\n_webhookIdB\x0e\n\x0c_runtimeInfo\"P\n\x16WorkerRegisterResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\x12\x12\n\nworkerName\x18\x03 \x01(\t\"\xa3\x01\n\x19UpsertWorkerLabelsRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12\x36\n\x06labels\x18\x02 \x03(\x0b\x32&.UpsertWorkerLabelsRequest.LabelsEntry\x1a<\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.WorkerLabels:\x02\x38\x01\"@\n\x1aUpsertWorkerLabelsResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\"\xf6\x04\n\x0e\x41ssignedAction\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x15\n\rworkflowRunId\x18\x02 \x01(\t\x12\x18\n\x10getGroupKeyRunId\x18\x03 \x01(\t\x12\r\n\x05jobId\x18\x04 \x01(\t\x12\x0f\n\x07jobName\x18\x05 \x01(\t\x12\x10\n\x08jobRunId\x18\x06 \x01(\t\x12\x0e\n\x06stepId\x18\x07 \x01(\t\x12\x11\n\tstepRunId\x18\x08 \x01(\t\x12\x10\n\x08\x61\x63tionId\x18\t \x01(\t\x12\x1f\n\nactionType\x18\n \x01(\x0e\x32\x0b.ActionType\x12\x15\n\ractionPayload\x18\x0b \x01(\t\x12\x10\n\x08stepName\x18\x0c \x01(\t\x12\x12\n\nretryCount\x18\r \x01(\x05\x12 \n\x13\x61\x64\x64itional_metadata\x18\x0e \x01(\tH\x00\x88\x01\x01\x12!\n\x14\x63hild_workflow_index\x18\x0f \x01(\x05H\x01\x88\x01\x01\x12\x1f\n\x12\x63hild_workflow_key\x18\x10 \x01(\tH\x02\x88\x01\x01\x12#\n\x16parent_workflow_run_id\x18\x11 \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08priority\x18\x12 \x01(\x05\x12\x17\n\nworkflowId\x18\x13 \x01(\tH\x04\x88\x01\x01\x12\x1e\n\x11workflowVersionId\x18\x14 \x01(\tH\x05\x88\x01\x01\x42\x16\n\x14_additional_metadataB\x17\n\x15_child_workflow_indexB\x15\n\x13_child_workflow_keyB\x19\n\x17_parent_workflow_run_idB\r\n\x0b_workflowIdB\x14\n\x12_workflowVersionId\"\'\n\x13WorkerListenRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\",\n\x18WorkerUnsubscribeRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\"?\n\x19WorkerUnsubscribeResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\"\xe1\x01\n\x13GroupKeyActionEvent\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12\x15\n\rworkflowRunId\x18\x02 \x01(\t\x12\x18\n\x10getGroupKeyRunId\x18\x03 \x01(\t\x12\x10\n\x08\x61\x63tionId\x18\x04 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\teventType\x18\x06 \x01(\x0e\x32\x18.GroupKeyActionEventType\x12\x14\n\x0c\x65ventPayload\x18\x07 \x01(\t\"\xc4\x02\n\x0fStepActionEvent\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12\r\n\x05jobId\x18\x02 \x01(\t\x12\x10\n\x08jobRunId\x18\x03 \x01(\t\x12\x0e\n\x06stepId\x18\x04 \x01(\t\x12\x11\n\tstepRunId\x18\x05 \x01(\t\x12\x10\n\x08\x61\x63tionId\x18\x06 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\teventType\x18\x08 \x01(\x0e\x32\x14.StepActionEventType\x12\x14\n\x0c\x65ventPayload\x18\t \x01(\t\x12\x17\n\nretryCount\x18\n \x01(\x05H\x00\x88\x01\x01\x12\x1b\n\x0eshouldNotRetry\x18\x0b \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_retryCountB\x11\n\x0f_shouldNotRetry\"9\n\x13\x41\x63tionEventResponse\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x10\n\x08workerId\x18\x02 \x01(\t\"\xc0\x01\n SubscribeToWorkflowEventsRequest\x12\x1a\n\rworkflowRunId\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x1e\n\x11\x61\x64\x64itionalMetaKey\x18\x02 \x01(\tH\x01\x88\x01\x01\x12 \n\x13\x61\x64\x64itionalMetaValue\x18\x03 \x01(\tH\x02\x88\x01\x01\x42\x10\n\x0e_workflowRunIdB\x14\n\x12_additionalMetaKeyB\x16\n\x14_additionalMetaValue\"7\n\x1eSubscribeToWorkflowRunsRequest\x12\x15\n\rworkflowRunId\x18\x01 \x01(\t\"\xda\x02\n\rWorkflowEvent\x12\x15\n\rworkflowRunId\x18\x01 \x01(\t\x12#\n\x0cresourceType\x18\x02 \x01(\x0e\x32\r.ResourceType\x12%\n\teventType\x18\x03 \x01(\x0e\x32\x12.ResourceEventType\x12\x12\n\nresourceId\x18\x04 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0c\x65ventPayload\x18\x06 \x01(\t\x12\x0e\n\x06hangup\x18\x07 \x01(\x08\x12\x18\n\x0bstepRetries\x18\x08 \x01(\x05H\x00\x88\x01\x01\x12\x17\n\nretryCount\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x17\n\neventIndex\x18\n \x01(\x03H\x02\x88\x01\x01\x42\x0e\n\x0c_stepRetriesB\r\n\x0b_retryCountB\r\n\x0b_eventIndex\"\xa8\x01\n\x10WorkflowRunEvent\x12\x15\n\rworkflowRunId\x18\x01 \x01(\t\x12(\n\teventType\x18\x02 \x01(\x0e\x32\x15.WorkflowRunEventType\x12\x32\n\x0e\x65ventTimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x07results\x18\x04 \x03(\x0b\x32\x0e.StepRunResult\"\x8a\x01\n\rStepRunResult\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12\x16\n\x0estepReadableId\x18\x02 \x01(\t\x12\x10\n\x08jobRunId\x18\x03 \x01(\t\x12\x12\n\x05\x65rror\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06output\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06_errorB\t\n\x07_output\"W\n\rOverridesData\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x16\n\x0e\x63\x61llerFilename\x18\x04 \x01(\t\"\x17\n\x15OverridesDataResponse\"U\n\x10HeartbeatRequest\x12\x10\n\x08workerId\x18\x01 \x01(\t\x12/\n\x0bheartbeatAt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x13\n\x11HeartbeatResponse\"F\n\x15RefreshTimeoutRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12\x1a\n\x12incrementTimeoutBy\x18\x02 \x01(\t\"G\n\x16RefreshTimeoutResponse\x12-\n\ttimeoutAt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\'\n\x12ReleaseSlotRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\"\x15\n\x13ReleaseSlotResponse*7\n\x04SDKS\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02GO\x10\x01\x12\n\n\x06PYTHON\x10\x02\x12\x0e\n\nTYPESCRIPT\x10\x03*N\n\nActionType\x12\x12\n\x0eSTART_STEP_RUN\x10\x00\x12\x13\n\x0f\x43\x41NCEL_STEP_RUN\x10\x01\x12\x17\n\x13START_GET_GROUP_KEY\x10\x02*\xa2\x01\n\x17GroupKeyActionEventType\x12 \n\x1cGROUP_KEY_EVENT_TYPE_UNKNOWN\x10\x00\x12 \n\x1cGROUP_KEY_EVENT_TYPE_STARTED\x10\x01\x12\"\n\x1eGROUP_KEY_EVENT_TYPE_COMPLETED\x10\x02\x12\x1f\n\x1bGROUP_KEY_EVENT_TYPE_FAILED\x10\x03*\xac\x01\n\x13StepActionEventType\x12\x1b\n\x17STEP_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1b\n\x17STEP_EVENT_TYPE_STARTED\x10\x01\x12\x1d\n\x19STEP_EVENT_TYPE_COMPLETED\x10\x02\x12\x1a\n\x16STEP_EVENT_TYPE_FAILED\x10\x03\x12 \n\x1cSTEP_EVENT_TYPE_ACKNOWLEDGED\x10\x04*e\n\x0cResourceType\x12\x19\n\x15RESOURCE_TYPE_UNKNOWN\x10\x00\x12\x1a\n\x16RESOURCE_TYPE_STEP_RUN\x10\x01\x12\x1e\n\x1aRESOURCE_TYPE_WORKFLOW_RUN\x10\x02*\xfe\x01\n\x11ResourceEventType\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_UNKNOWN\x10\x00\x12\x1f\n\x1bRESOURCE_EVENT_TYPE_STARTED\x10\x01\x12!\n\x1dRESOURCE_EVENT_TYPE_COMPLETED\x10\x02\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_FAILED\x10\x03\x12!\n\x1dRESOURCE_EVENT_TYPE_CANCELLED\x10\x04\x12!\n\x1dRESOURCE_EVENT_TYPE_TIMED_OUT\x10\x05\x12\x1e\n\x1aRESOURCE_EVENT_TYPE_STREAM\x10\x06*<\n\x14WorkflowRunEventType\x12$\n WORKFLOW_RUN_EVENT_TYPE_FINISHED\x10\x00\x32\xf8\x06\n\nDispatcher\x12=\n\x08Register\x12\x16.WorkerRegisterRequest\x1a\x17.WorkerRegisterResponse\"\x00\x12\x33\n\x06Listen\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x35\n\x08ListenV2\x12\x14.WorkerListenRequest\x1a\x0f.AssignedAction\"\x00\x30\x01\x12\x34\n\tHeartbeat\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponse\"\x00\x12R\n\x19SubscribeToWorkflowEvents\x12!.SubscribeToWorkflowEventsRequest\x1a\x0e.WorkflowEvent\"\x00\x30\x01\x12S\n\x17SubscribeToWorkflowRuns\x12\x1f.SubscribeToWorkflowRunsRequest\x1a\x11.WorkflowRunEvent\"\x00(\x01\x30\x01\x12?\n\x13SendStepActionEvent\x12\x10.StepActionEvent\x1a\x14.ActionEventResponse\"\x00\x12G\n\x17SendGroupKeyActionEvent\x12\x14.GroupKeyActionEvent\x1a\x14.ActionEventResponse\"\x00\x12<\n\x10PutOverridesData\x12\x0e.OverridesData\x1a\x16.OverridesDataResponse\"\x00\x12\x46\n\x0bUnsubscribe\x12\x19.WorkerUnsubscribeRequest\x1a\x1a.WorkerUnsubscribeResponse\"\x00\x12\x43\n\x0eRefreshTimeout\x12\x16.RefreshTimeoutRequest\x1a\x17.RefreshTimeoutResponse\"\x00\x12:\n\x0bReleaseSlot\x12\x13.ReleaseSlotRequest\x1a\x14.ReleaseSlotResponse\"\x00\x12O\n\x12UpsertWorkerLabels\x12\x1a.UpsertWorkerLabelsRequest\x1a\x1b.UpsertWorkerLabelsResponse\"\x00\x42GZEgithub.com/hatchet-dev/hatchet/internal/services/dispatcher/contractsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -27,20 +27,20 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['_WORKERREGISTERREQUEST_LABELSENTRY']._serialized_options = b'8\001' _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._loaded_options = None _globals['_UPSERTWORKERLABELSREQUEST_LABELSENTRY']._serialized_options = b'8\001' - _globals['_SDKS']._serialized_start=3684 - _globals['_SDKS']._serialized_end=3739 - _globals['_ACTIONTYPE']._serialized_start=3741 - _globals['_ACTIONTYPE']._serialized_end=3819 - _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_start=3822 - _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_end=3984 - _globals['_STEPACTIONEVENTTYPE']._serialized_start=3987 - _globals['_STEPACTIONEVENTTYPE']._serialized_end=4159 - _globals['_RESOURCETYPE']._serialized_start=4161 - _globals['_RESOURCETYPE']._serialized_end=4262 - _globals['_RESOURCEEVENTTYPE']._serialized_start=4265 - _globals['_RESOURCEEVENTTYPE']._serialized_end=4519 - _globals['_WORKFLOWRUNEVENTTYPE']._serialized_start=4521 - _globals['_WORKFLOWRUNEVENTTYPE']._serialized_end=4581 + _globals['_SDKS']._serialized_start=3724 + _globals['_SDKS']._serialized_end=3779 + _globals['_ACTIONTYPE']._serialized_start=3781 + _globals['_ACTIONTYPE']._serialized_end=3859 + _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_start=3862 + _globals['_GROUPKEYACTIONEVENTTYPE']._serialized_end=4024 + _globals['_STEPACTIONEVENTTYPE']._serialized_start=4027 + _globals['_STEPACTIONEVENTTYPE']._serialized_end=4199 + _globals['_RESOURCETYPE']._serialized_start=4201 + _globals['_RESOURCETYPE']._serialized_end=4302 + _globals['_RESOURCEEVENTTYPE']._serialized_start=4305 + _globals['_RESOURCEEVENTTYPE']._serialized_end=4559 + _globals['_WORKFLOWRUNEVENTTYPE']._serialized_start=4561 + _globals['_WORKFLOWRUNEVENTTYPE']._serialized_end=4621 _globals['_WORKERLABELS']._serialized_start=53 _globals['_WORKERLABELS']._serialized_end=139 _globals['_RUNTIMEINFO']._serialized_start=142 @@ -76,27 +76,27 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_start=2575 _globals['_SUBSCRIBETOWORKFLOWRUNSREQUEST']._serialized_end=2630 _globals['_WORKFLOWEVENT']._serialized_start=2633 - _globals['_WORKFLOWEVENT']._serialized_end=2939 - _globals['_WORKFLOWRUNEVENT']._serialized_start=2942 - _globals['_WORKFLOWRUNEVENT']._serialized_end=3110 - _globals['_STEPRUNRESULT']._serialized_start=3113 - _globals['_STEPRUNRESULT']._serialized_end=3251 - _globals['_OVERRIDESDATA']._serialized_start=3253 - _globals['_OVERRIDESDATA']._serialized_end=3340 - _globals['_OVERRIDESDATARESPONSE']._serialized_start=3342 - _globals['_OVERRIDESDATARESPONSE']._serialized_end=3365 - _globals['_HEARTBEATREQUEST']._serialized_start=3367 - _globals['_HEARTBEATREQUEST']._serialized_end=3452 - _globals['_HEARTBEATRESPONSE']._serialized_start=3454 - _globals['_HEARTBEATRESPONSE']._serialized_end=3473 - _globals['_REFRESHTIMEOUTREQUEST']._serialized_start=3475 - _globals['_REFRESHTIMEOUTREQUEST']._serialized_end=3545 - _globals['_REFRESHTIMEOUTRESPONSE']._serialized_start=3547 - _globals['_REFRESHTIMEOUTRESPONSE']._serialized_end=3618 - _globals['_RELEASESLOTREQUEST']._serialized_start=3620 - _globals['_RELEASESLOTREQUEST']._serialized_end=3659 - _globals['_RELEASESLOTRESPONSE']._serialized_start=3661 - _globals['_RELEASESLOTRESPONSE']._serialized_end=3682 - _globals['_DISPATCHER']._serialized_start=4584 - _globals['_DISPATCHER']._serialized_end=5472 + _globals['_WORKFLOWEVENT']._serialized_end=2979 + _globals['_WORKFLOWRUNEVENT']._serialized_start=2982 + _globals['_WORKFLOWRUNEVENT']._serialized_end=3150 + _globals['_STEPRUNRESULT']._serialized_start=3153 + _globals['_STEPRUNRESULT']._serialized_end=3291 + _globals['_OVERRIDESDATA']._serialized_start=3293 + _globals['_OVERRIDESDATA']._serialized_end=3380 + _globals['_OVERRIDESDATARESPONSE']._serialized_start=3382 + _globals['_OVERRIDESDATARESPONSE']._serialized_end=3405 + _globals['_HEARTBEATREQUEST']._serialized_start=3407 + _globals['_HEARTBEATREQUEST']._serialized_end=3492 + _globals['_HEARTBEATRESPONSE']._serialized_start=3494 + _globals['_HEARTBEATRESPONSE']._serialized_end=3513 + _globals['_REFRESHTIMEOUTREQUEST']._serialized_start=3515 + _globals['_REFRESHTIMEOUTREQUEST']._serialized_end=3585 + _globals['_REFRESHTIMEOUTRESPONSE']._serialized_start=3587 + _globals['_REFRESHTIMEOUTRESPONSE']._serialized_end=3658 + _globals['_RELEASESLOTREQUEST']._serialized_start=3660 + _globals['_RELEASESLOTREQUEST']._serialized_end=3699 + _globals['_RELEASESLOTRESPONSE']._serialized_start=3701 + _globals['_RELEASESLOTRESPONSE']._serialized_end=3722 + _globals['_DISPATCHER']._serialized_start=4624 + _globals['_DISPATCHER']._serialized_end=5512 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi index e59654169..abf3b7085 100644 --- a/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/dispatcher_pb2.pyi @@ -295,7 +295,7 @@ class SubscribeToWorkflowRunsRequest(_message.Message): def __init__(self, workflowRunId: _Optional[str] = ...) -> None: ... class WorkflowEvent(_message.Message): - __slots__ = ("workflowRunId", "resourceType", "eventType", "resourceId", "eventTimestamp", "eventPayload", "hangup", "stepRetries", "retryCount") + __slots__ = ("workflowRunId", "resourceType", "eventType", "resourceId", "eventTimestamp", "eventPayload", "hangup", "stepRetries", "retryCount", "eventIndex") WORKFLOWRUNID_FIELD_NUMBER: _ClassVar[int] RESOURCETYPE_FIELD_NUMBER: _ClassVar[int] EVENTTYPE_FIELD_NUMBER: _ClassVar[int] @@ -305,6 +305,7 @@ class WorkflowEvent(_message.Message): HANGUP_FIELD_NUMBER: _ClassVar[int] STEPRETRIES_FIELD_NUMBER: _ClassVar[int] RETRYCOUNT_FIELD_NUMBER: _ClassVar[int] + EVENTINDEX_FIELD_NUMBER: _ClassVar[int] workflowRunId: str resourceType: ResourceType eventType: ResourceEventType @@ -314,7 +315,8 @@ class WorkflowEvent(_message.Message): hangup: bool stepRetries: int retryCount: int - def __init__(self, workflowRunId: _Optional[str] = ..., resourceType: _Optional[_Union[ResourceType, str]] = ..., eventType: _Optional[_Union[ResourceEventType, str]] = ..., resourceId: _Optional[str] = ..., eventTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., eventPayload: _Optional[str] = ..., hangup: bool = ..., stepRetries: _Optional[int] = ..., retryCount: _Optional[int] = ...) -> None: ... + eventIndex: int + def __init__(self, workflowRunId: _Optional[str] = ..., resourceType: _Optional[_Union[ResourceType, str]] = ..., eventType: _Optional[_Union[ResourceEventType, str]] = ..., resourceId: _Optional[str] = ..., eventTimestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., eventPayload: _Optional[str] = ..., hangup: bool = ..., stepRetries: _Optional[int] = ..., retryCount: _Optional[int] = ..., eventIndex: _Optional[int] = ...) -> None: ... class WorkflowRunEvent(_message.Message): __slots__ = ("workflowRunId", "eventType", "eventTimestamp", "results") diff --git a/sdks/python/hatchet_sdk/contracts/events_pb2.py b/sdks/python/hatchet_sdk/contracts/events_pb2.py index e4d318f4e..08611bdfc 100644 --- a/sdks/python/hatchet_sdk/contracts/events_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/events_pb2.py @@ -15,7 +15,7 @@ _sym_db = _symbol_database.Default() from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x65vents.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xd2\x01\n\x05\x45vent\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventId\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x0f\n\x07payload\x18\x04 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x12\x61\x64\x64itionalMetadata\x18\x06 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05scope\x18\x07 \x01(\tH\x01\x88\x01\x01\x42\x15\n\x13_additionalMetadataB\x08\n\x06_scope\" \n\x06\x45vents\x12\x16\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x06.Event\"\xc2\x01\n\rPutLogRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12-\n\tcreatedAt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x12\n\x05level\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x10\n\x08metadata\x18\x05 \x01(\t\x12\x1b\n\x0etaskRetryCount\x18\x06 \x01(\x05H\x01\x88\x01\x01\x42\x08\n\x06_levelB\x11\n\x0f_taskRetryCount\"\x10\n\x0ePutLogResponse\"|\n\x15PutStreamEventRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12-\n\tcreatedAt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07message\x18\x03 \x01(\x0c\x12\x10\n\x08metadata\x18\x05 \x01(\t\"\x18\n\x16PutStreamEventResponse\"9\n\x14\x42ulkPushEventRequest\x12!\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x11.PushEventRequest\"\xde\x01\n\x10PushEventRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x12\x61\x64\x64itionalMetadata\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08priority\x18\x05 \x01(\x05H\x01\x88\x01\x01\x12\x12\n\x05scope\x18\x06 \x01(\tH\x02\x88\x01\x01\x42\x15\n\x13_additionalMetadataB\x0b\n\t_priorityB\x08\n\x06_scope\"%\n\x12ReplayEventRequest\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\t2\x88\x02\n\rEventsService\x12#\n\x04Push\x12\x11.PushEventRequest\x1a\x06.Event\"\x00\x12,\n\x08\x42ulkPush\x12\x15.BulkPushEventRequest\x1a\x07.Events\"\x00\x12\x32\n\x11ReplaySingleEvent\x12\x13.ReplayEventRequest\x1a\x06.Event\"\x00\x12+\n\x06PutLog\x12\x0e.PutLogRequest\x1a\x0f.PutLogResponse\"\x00\x12\x43\n\x0ePutStreamEvent\x12\x16.PutStreamEventRequest\x1a\x17.PutStreamEventResponse\"\x00\x42\x45ZCgithub.com/hatchet-dev/hatchet/internal/services/ingestor/contractsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0c\x65vents.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xd2\x01\n\x05\x45vent\x12\x10\n\x08tenantId\x18\x01 \x01(\t\x12\x0f\n\x07\x65ventId\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x12\x0f\n\x07payload\x18\x04 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x12\x61\x64\x64itionalMetadata\x18\x06 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05scope\x18\x07 \x01(\tH\x01\x88\x01\x01\x42\x15\n\x13_additionalMetadataB\x08\n\x06_scope\" \n\x06\x45vents\x12\x16\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x06.Event\"\xc2\x01\n\rPutLogRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12-\n\tcreatedAt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x12\n\x05level\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x10\n\x08metadata\x18\x05 \x01(\t\x12\x1b\n\x0etaskRetryCount\x18\x06 \x01(\x05H\x01\x88\x01\x01\x42\x08\n\x06_levelB\x11\n\x0f_taskRetryCount\"\x10\n\x0ePutLogResponse\"\xa4\x01\n\x15PutStreamEventRequest\x12\x11\n\tstepRunId\x18\x01 \x01(\t\x12-\n\tcreatedAt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07message\x18\x03 \x01(\x0c\x12\x10\n\x08metadata\x18\x05 \x01(\t\x12\x17\n\neventIndex\x18\x06 \x01(\x03H\x00\x88\x01\x01\x42\r\n\x0b_eventIndex\"\x18\n\x16PutStreamEventResponse\"9\n\x14\x42ulkPushEventRequest\x12!\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x11.PushEventRequest\"\xde\x01\n\x10PushEventRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\t\x12\x32\n\x0e\x65ventTimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x12\x61\x64\x64itionalMetadata\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08priority\x18\x05 \x01(\x05H\x01\x88\x01\x01\x12\x12\n\x05scope\x18\x06 \x01(\tH\x02\x88\x01\x01\x42\x15\n\x13_additionalMetadataB\x0b\n\t_priorityB\x08\n\x06_scope\"%\n\x12ReplayEventRequest\x12\x0f\n\x07\x65ventId\x18\x01 \x01(\t2\x88\x02\n\rEventsService\x12#\n\x04Push\x12\x11.PushEventRequest\x1a\x06.Event\"\x00\x12,\n\x08\x42ulkPush\x12\x15.BulkPushEventRequest\x1a\x07.Events\"\x00\x12\x32\n\x11ReplaySingleEvent\x12\x13.ReplayEventRequest\x1a\x06.Event\"\x00\x12+\n\x06PutLog\x12\x0e.PutLogRequest\x1a\x0f.PutLogResponse\"\x00\x12\x43\n\x0ePutStreamEvent\x12\x16.PutStreamEventRequest\x1a\x17.PutStreamEventResponse\"\x00\x42\x45ZCgithub.com/hatchet-dev/hatchet/internal/services/ingestor/contractsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -31,16 +31,16 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['_PUTLOGREQUEST']._serialized_end=491 _globals['_PUTLOGRESPONSE']._serialized_start=493 _globals['_PUTLOGRESPONSE']._serialized_end=509 - _globals['_PUTSTREAMEVENTREQUEST']._serialized_start=511 - _globals['_PUTSTREAMEVENTREQUEST']._serialized_end=635 - _globals['_PUTSTREAMEVENTRESPONSE']._serialized_start=637 - _globals['_PUTSTREAMEVENTRESPONSE']._serialized_end=661 - _globals['_BULKPUSHEVENTREQUEST']._serialized_start=663 - _globals['_BULKPUSHEVENTREQUEST']._serialized_end=720 - _globals['_PUSHEVENTREQUEST']._serialized_start=723 - _globals['_PUSHEVENTREQUEST']._serialized_end=945 - _globals['_REPLAYEVENTREQUEST']._serialized_start=947 - _globals['_REPLAYEVENTREQUEST']._serialized_end=984 - _globals['_EVENTSSERVICE']._serialized_start=987 - _globals['_EVENTSSERVICE']._serialized_end=1251 + _globals['_PUTSTREAMEVENTREQUEST']._serialized_start=512 + _globals['_PUTSTREAMEVENTREQUEST']._serialized_end=676 + _globals['_PUTSTREAMEVENTRESPONSE']._serialized_start=678 + _globals['_PUTSTREAMEVENTRESPONSE']._serialized_end=702 + _globals['_BULKPUSHEVENTREQUEST']._serialized_start=704 + _globals['_BULKPUSHEVENTREQUEST']._serialized_end=761 + _globals['_PUSHEVENTREQUEST']._serialized_start=764 + _globals['_PUSHEVENTREQUEST']._serialized_end=986 + _globals['_REPLAYEVENTREQUEST']._serialized_start=988 + _globals['_REPLAYEVENTREQUEST']._serialized_end=1025 + _globals['_EVENTSSERVICE']._serialized_start=1028 + _globals['_EVENTSSERVICE']._serialized_end=1292 # @@protoc_insertion_point(module_scope) diff --git a/sdks/python/hatchet_sdk/contracts/events_pb2.pyi b/sdks/python/hatchet_sdk/contracts/events_pb2.pyi index 9661e64d9..dd545ffad 100644 --- a/sdks/python/hatchet_sdk/contracts/events_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/events_pb2.pyi @@ -51,16 +51,18 @@ class PutLogResponse(_message.Message): def __init__(self) -> None: ... class PutStreamEventRequest(_message.Message): - __slots__ = ("stepRunId", "createdAt", "message", "metadata") + __slots__ = ("stepRunId", "createdAt", "message", "metadata", "eventIndex") STEPRUNID_FIELD_NUMBER: _ClassVar[int] CREATEDAT_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] METADATA_FIELD_NUMBER: _ClassVar[int] + EVENTINDEX_FIELD_NUMBER: _ClassVar[int] stepRunId: str createdAt: _timestamp_pb2.Timestamp message: bytes metadata: str - def __init__(self, stepRunId: _Optional[str] = ..., createdAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., message: _Optional[bytes] = ..., metadata: _Optional[str] = ...) -> None: ... + eventIndex: int + def __init__(self, stepRunId: _Optional[str] = ..., createdAt: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., message: _Optional[bytes] = ..., metadata: _Optional[str] = ..., eventIndex: _Optional[int] = ...) -> None: ... class PutStreamEventResponse(_message.Message): __slots__ = () diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py index cdf6c89fd..0ebf222bf 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.py @@ -16,7 +16,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__ from hatchet_sdk.contracts.v1.shared import condition_pb2 as v1_dot_shared_dot_condition__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"Z\n\x12\x43\x61ncelTasksRequest\x12\x13\n\x0b\x65xternalIds\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"Z\n\x12ReplayTasksRequest\x12\x13\n\x0b\x65xternalIds\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\x82\x01\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xf6\x03\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilterB\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priority\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe4\x01\n\x13\x44\x65siredWorkerLabels\x12\x15\n\x08strValue\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08intValue\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0b\n\t_strValueB\x0b\n\t_intValueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb1\x04\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xb7\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12v1/workflows.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x19v1/shared/condition.proto\"Z\n\x12\x43\x61ncelTasksRequest\x12\x13\n\x0b\x65xternalIds\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"Z\n\x12ReplayTasksRequest\x12\x13\n\x0b\x65xternalIds\x18\x01 \x03(\t\x12$\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0f.v1.TasksFilterH\x00\x88\x01\x01\x42\t\n\x07_filter\"\xb7\x01\n\x0bTasksFilter\x12\x10\n\x08statuses\x18\x01 \x03(\t\x12)\n\x05since\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\x05until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12\x14\n\x0cworkflow_ids\x18\x04 \x03(\t\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x05 \x03(\tB\x08\n\x06_until\".\n\x13\x43\x61ncelTasksResponse\x12\x17\n\x0f\x63\x61ncelled_tasks\x18\x01 \x03(\t\"-\n\x13ReplayTasksResponse\x12\x16\n\x0ereplayed_tasks\x18\x01 \x03(\t\"\x82\x01\n\x19TriggerWorkflowRunRequest\x12\x15\n\rworkflow_name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\x0c\x12\x1b\n\x13\x61\x64\x64itional_metadata\x18\x03 \x01(\x0c\x12\x15\n\x08priority\x18\x04 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_priority\"1\n\x1aTriggerWorkflowRunResponse\x12\x13\n\x0b\x65xternal_id\x18\x01 \x01(\t\"\xf6\x03\n\x1c\x43reateWorkflowVersionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x16\n\x0e\x65vent_triggers\x18\x04 \x03(\t\x12\x15\n\rcron_triggers\x18\x05 \x03(\t\x12!\n\x05tasks\x18\x06 \x03(\x0b\x32\x12.v1.CreateTaskOpts\x12$\n\x0b\x63oncurrency\x18\x07 \x01(\x0b\x32\x0f.v1.Concurrency\x12\x17\n\ncron_input\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x30\n\x0fon_failure_task\x18\t \x01(\x0b\x32\x12.v1.CreateTaskOptsH\x01\x88\x01\x01\x12\'\n\x06sticky\x18\n \x01(\x0e\x32\x12.v1.StickyStrategyH\x02\x88\x01\x01\x12\x1d\n\x10\x64\x65\x66\x61ult_priority\x18\x0b \x01(\x05H\x03\x88\x01\x01\x12(\n\x0f\x63oncurrency_arr\x18\x0c \x03(\x0b\x32\x0f.v1.Concurrency\x12*\n\x0f\x64\x65\x66\x61ult_filters\x18\r \x03(\x0b\x32\x11.v1.DefaultFilterB\r\n\x0b_cron_inputB\x12\n\x10_on_failure_taskB\t\n\x07_stickyB\x13\n\x11_default_priority\"T\n\rDefaultFilter\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x07payload\x18\x03 \x01(\x0cH\x00\x88\x01\x01\x42\n\n\x08_payload\"\x93\x01\n\x0b\x43oncurrency\x12\x12\n\nexpression\x18\x01 \x01(\t\x12\x15\n\x08max_runs\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x39\n\x0elimit_strategy\x18\x03 \x01(\x0e\x32\x1c.v1.ConcurrencyLimitStrategyH\x01\x88\x01\x01\x42\x0b\n\t_max_runsB\x11\n\x0f_limit_strategy\"\xe4\x01\n\x13\x44\x65siredWorkerLabels\x12\x15\n\x08strValue\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08intValue\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x15\n\x08required\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x32\n\ncomparator\x18\x04 \x01(\x0e\x32\x19.v1.WorkerLabelComparatorH\x03\x88\x01\x01\x12\x13\n\x06weight\x18\x05 \x01(\x05H\x04\x88\x01\x01\x42\x0b\n\t_strValueB\x0b\n\t_intValueB\x0b\n\t_requiredB\r\n\x0b_comparatorB\t\n\x07_weight\"\xb1\x04\n\x0e\x43reateTaskOpts\x12\x13\n\x0breadable_id\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x0f\n\x07timeout\x18\x03 \x01(\t\x12\x0e\n\x06inputs\x18\x04 \x01(\t\x12\x0f\n\x07parents\x18\x05 \x03(\t\x12\x0f\n\x07retries\x18\x06 \x01(\x05\x12,\n\x0brate_limits\x18\x07 \x03(\x0b\x32\x17.v1.CreateTaskRateLimit\x12;\n\rworker_labels\x18\x08 \x03(\x0b\x32$.v1.CreateTaskOpts.WorkerLabelsEntry\x12\x1b\n\x0e\x62\x61\x63koff_factor\x18\t \x01(\x02H\x00\x88\x01\x01\x12 \n\x13\x62\x61\x63koff_max_seconds\x18\n \x01(\x05H\x01\x88\x01\x01\x12$\n\x0b\x63oncurrency\x18\x0b \x03(\x0b\x32\x0f.v1.Concurrency\x12+\n\nconditions\x18\x0c \x01(\x0b\x32\x12.v1.TaskConditionsH\x02\x88\x01\x01\x12\x1d\n\x10schedule_timeout\x18\r \x01(\tH\x03\x88\x01\x01\x1aL\n\x11WorkerLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.v1.DesiredWorkerLabels:\x02\x38\x01\x42\x11\n\x0f_backoff_factorB\x16\n\x14_backoff_max_secondsB\r\n\x0b_conditionsB\x13\n\x11_schedule_timeout\"\xfd\x01\n\x13\x43reateTaskRateLimit\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x12\n\x05units\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12\x15\n\x08key_expr\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nunits_expr\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x1e\n\x11limit_values_expr\x18\x05 \x01(\tH\x03\x88\x01\x01\x12,\n\x08\x64uration\x18\x06 \x01(\x0e\x32\x15.v1.RateLimitDurationH\x04\x88\x01\x01\x42\x08\n\x06_unitsB\x0b\n\t_key_exprB\r\n\x0b_units_exprB\x14\n\x12_limit_values_exprB\x0b\n\t_duration\"@\n\x1d\x43reateWorkflowVersionResponse\x12\n\n\x02id\x18\x01 \x01(\t\x12\x13\n\x0bworkflow_id\x18\x02 \x01(\t*$\n\x0eStickyStrategy\x12\x08\n\x04SOFT\x10\x00\x12\x08\n\x04HARD\x10\x01*]\n\x11RateLimitDuration\x12\n\n\x06SECOND\x10\x00\x12\n\n\x06MINUTE\x10\x01\x12\x08\n\x04HOUR\x10\x02\x12\x07\n\x03\x44\x41Y\x10\x03\x12\x08\n\x04WEEK\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06*\x7f\n\x18\x43oncurrencyLimitStrategy\x12\x16\n\x12\x43\x41NCEL_IN_PROGRESS\x10\x00\x12\x0f\n\x0b\x44ROP_NEWEST\x10\x01\x12\x10\n\x0cQUEUE_NEWEST\x10\x02\x12\x15\n\x11GROUP_ROUND_ROBIN\x10\x03\x12\x11\n\rCANCEL_NEWEST\x10\x04*\x85\x01\n\x15WorkerLabelComparator\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x10\n\x0cGREATER_THAN\x10\x02\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x03\x12\r\n\tLESS_THAN\x10\x04\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x05\x32\xb7\x02\n\x0c\x41\x64minService\x12R\n\x0bPutWorkflow\x12 .v1.CreateWorkflowVersionRequest\x1a!.v1.CreateWorkflowVersionResponse\x12>\n\x0b\x43\x61ncelTasks\x12\x16.v1.CancelTasksRequest\x1a\x17.v1.CancelTasksResponse\x12>\n\x0bReplayTasks\x12\x16.v1.ReplayTasksRequest\x1a\x17.v1.ReplayTasksResponse\x12S\n\x12TriggerWorkflowRun\x12\x1d.v1.TriggerWorkflowRunRequest\x1a\x1e.v1.TriggerWorkflowRunResponseBBZ@github.com/hatchet-dev/hatchet/internal/services/shared/proto/v1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi index ab6675a9c..2401257c8 100644 --- a/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi +++ b/sdks/python/hatchet_sdk/contracts/v1/workflows_pb2.pyi @@ -157,8 +157,8 @@ class DefaultFilter(_message.Message): PAYLOAD_FIELD_NUMBER: _ClassVar[int] expression: str scope: str - payload: str - def __init__(self, expression: _Optional[str] = ..., scope: _Optional[str] = ..., payload: _Optional[str] = ...) -> None: ... + payload: bytes + def __init__(self, expression: _Optional[str] = ..., scope: _Optional[str] = ..., payload: _Optional[bytes] = ...) -> None: ... class Concurrency(_message.Message): __slots__ = ("expression", "max_runs", "limit_strategy") diff --git a/sdks/python/hatchet_sdk/exceptions.py b/sdks/python/hatchet_sdk/exceptions.py index 87952e378..44969930c 100644 --- a/sdks/python/hatchet_sdk/exceptions.py +++ b/sdks/python/hatchet_sdk/exceptions.py @@ -1,2 +1,100 @@ -class NonRetryableException(Exception): +import traceback + + +class NonRetryableException(Exception): # noqa: N818 + pass + + +class DedupeViolationError(Exception): + """Raised by the Hatchet library to indicate that a workflow has already been run with this deduplication value.""" + + +class TaskRunError(Exception): + def __init__( + self, + exc: str, + exc_type: str, + trace: str, + ) -> None: + self.exc = exc + self.exc_type = exc_type + self.trace = trace + + def __str__(self) -> str: + return self.serialize() + + def __repr__(self) -> str: + return str(self) + + def serialize(self) -> str: + if not self.exc_type or not self.exc: + return "" + + return ( + self.exc_type.replace(": ", ":::") + + ": " + + self.exc.replace("\n", "\\\n") + + "\n" + + self.trace + ) + + @classmethod + def deserialize(cls, serialized: str) -> "TaskRunError": + if not serialized: + return cls( + exc="", + exc_type="", + trace="", + ) + + try: + header, trace = serialized.split("\n", 1) + exc_type, exc = header.split(": ", 1) + except ValueError: + ## If we get here, we saw an error that was not serialized how we expected, + ## but was also not empty. So we return it as-is and use `HatchetError` as the type. + return cls( + exc=serialized, + exc_type="HatchetError", + trace="", + ) + + exc_type = exc_type.replace(":::", ": ") + exc = exc.replace("\\\n", "\n") + + return cls( + exc=exc, + exc_type=exc_type, + trace=trace, + ) + + @classmethod + def from_exception(cls, exc: Exception) -> "TaskRunError": + return cls( + exc=str(exc), + exc_type=type(exc).__name__, + trace="".join( + traceback.format_exception(type(exc), exc, exc.__traceback__) + ), + ) + + +class FailedTaskRunExceptionGroup(Exception): # noqa: N818 + def __init__(self, message: str, exceptions: list[TaskRunError]): + self.message = message + self.exceptions = exceptions + + super().__init__(message) + + def __str__(self) -> str: + result = [self.message.strip()] + + for i, exc in enumerate(self.exceptions, 1): + result.append(f"\n--- Exception {i} ---") + result.append(str(exc)) + + return "\n".join(result) + + +class LoopAlreadyRunningError(Exception): pass diff --git a/sdks/python/hatchet_sdk/features/cron.py b/sdks/python/hatchet_sdk/features/cron.py index b2a63845b..1b7bdcfaa 100644 --- a/sdks/python/hatchet_sdk/features/cron.py +++ b/sdks/python/hatchet_sdk/features/cron.py @@ -106,8 +106,8 @@ class CronClient(BaseRestClient): create_cron_workflow_trigger_request=CreateCronWorkflowTriggerRequest( cronName=cron_name, cronExpression=validated_input.expression, - input=dict(validated_input.input), - additionalMetadata=dict(validated_input.additional_metadata), + input=validated_input.input, + additionalMetadata=validated_input.additional_metadata, priority=priority, ), ) diff --git a/sdks/python/hatchet_sdk/features/filters.py b/sdks/python/hatchet_sdk/features/filters.py index d99df7d24..3c0463738 100644 --- a/sdks/python/hatchet_sdk/features/filters.py +++ b/sdks/python/hatchet_sdk/features/filters.py @@ -111,7 +111,7 @@ class FiltersClient(BaseRestClient): workflow_id: str, expression: str, scope: str, - payload: JSONSerializableMapping = {}, + payload: JSONSerializableMapping | None = None, ) -> V1Filter: """ Create a new filter. @@ -130,7 +130,7 @@ class FiltersClient(BaseRestClient): workflowId=workflow_id, expression=expression, scope=scope, - payload=dict(payload), + payload=payload, ), ) @@ -139,7 +139,7 @@ class FiltersClient(BaseRestClient): workflow_id: str, expression: str, scope: str, - payload: JSONSerializableMapping = {}, + payload: JSONSerializableMapping | None = None, ) -> V1Filter: """ Create a new filter. diff --git a/sdks/python/hatchet_sdk/features/runs.py b/sdks/python/hatchet_sdk/features/runs.py index 15a8b1470..0384d8e2d 100644 --- a/sdks/python/hatchet_sdk/features/runs.py +++ b/sdks/python/hatchet_sdk/features/runs.py @@ -247,7 +247,7 @@ class RunsClient(BaseRestClient): self, workflow_name: str, input: JSONSerializableMapping, - additional_metadata: JSONSerializableMapping = {}, + additional_metadata: JSONSerializableMapping | None = None, priority: int | None = None, ) -> V1WorkflowRunDetails: """ @@ -267,8 +267,8 @@ class RunsClient(BaseRestClient): tenant=self.client_config.tenant_id, v1_trigger_workflow_run_request=V1TriggerWorkflowRunRequest( workflowName=self.client_config.apply_namespace(workflow_name), - input=dict(input), - additionalMetadata=dict(additional_metadata), + input=input, + additionalMetadata=additional_metadata, priority=priority, ), ) @@ -277,7 +277,7 @@ class RunsClient(BaseRestClient): self, workflow_name: str, input: JSONSerializableMapping, - additional_metadata: JSONSerializableMapping = {}, + additional_metadata: JSONSerializableMapping | None = None, priority: int | None = None, ) -> V1WorkflowRunDetails: """ diff --git a/sdks/python/hatchet_sdk/features/scheduled.py b/sdks/python/hatchet_sdk/features/scheduled.py index 39b41b258..2eddcc808 100644 --- a/sdks/python/hatchet_sdk/features/scheduled.py +++ b/sdks/python/hatchet_sdk/features/scheduled.py @@ -1,6 +1,5 @@ import asyncio import datetime -from typing import Optional from hatchet_sdk.clients.rest.api.workflow_api import WorkflowApi from hatchet_sdk.clients.rest.api.workflow_run_api import WorkflowRunApi @@ -62,8 +61,8 @@ class ScheduledClient(BaseRestClient): workflow=self.client_config.apply_namespace(workflow_name), schedule_workflow_run_request=ScheduleWorkflowRunRequest( triggerAt=trigger_at, - input=dict(input), - additionalMetadata=dict(additional_metadata), + input=input, + additionalMetadata=additional_metadata, ), ) @@ -124,9 +123,9 @@ class ScheduledClient(BaseRestClient): workflow_id: str | None = None, parent_workflow_run_id: str | None = None, statuses: list[ScheduledRunStatus] | None = None, - additional_metadata: Optional[JSONSerializableMapping] = None, - order_by_field: Optional[ScheduledWorkflowsOrderByField] = None, - order_by_direction: Optional[WorkflowRunOrderByDirection] = None, + additional_metadata: JSONSerializableMapping | None = None, + order_by_field: ScheduledWorkflowsOrderByField | None = None, + order_by_direction: WorkflowRunOrderByDirection | None = None, ) -> ScheduledWorkflowsList: """ Retrieves a list of scheduled workflows based on provided filters. @@ -161,9 +160,9 @@ class ScheduledClient(BaseRestClient): workflow_id: str | None = None, parent_workflow_run_id: str | None = None, statuses: list[ScheduledRunStatus] | None = None, - additional_metadata: Optional[JSONSerializableMapping] = None, - order_by_field: Optional[ScheduledWorkflowsOrderByField] = None, - order_by_direction: Optional[WorkflowRunOrderByDirection] = None, + additional_metadata: JSONSerializableMapping | None = None, + order_by_field: ScheduledWorkflowsOrderByField | None = None, + order_by_direction: WorkflowRunOrderByDirection | None = None, ) -> ScheduledWorkflowsList: """ Retrieves a list of scheduled workflows based on provided filters. diff --git a/sdks/python/hatchet_sdk/hatchet.py b/sdks/python/hatchet_sdk/hatchet.py index 15d7772c0..b450ba00e 100644 --- a/sdks/python/hatchet_sdk/hatchet.py +++ b/sdks/python/hatchet_sdk/hatchet.py @@ -1,8 +1,9 @@ import asyncio import logging +from collections.abc import Callable from datetime import timedelta from functools import cached_property -from typing import Any, Callable, Type, Union, cast, overload +from typing import Any, cast, overload from hatchet_sdk import Context, DurableContext from hatchet_sdk.client import Client @@ -181,8 +182,8 @@ class Hatchet: name: str, slots: int = 100, durable_slots: int = 1_000, - labels: dict[str, Union[str, int]] = {}, - workflows: list[BaseWorkflow[Any]] = [], + labels: dict[str, str | int] | None = None, + workflows: list[BaseWorkflow[Any]] | None = None, lifespan: LifespanFn | None = None, ) -> Worker: """ @@ -227,14 +228,14 @@ class Hatchet: name: str, description: str | None = None, input_validator: None = None, - on_events: list[str] = [], - on_crons: list[str] = [], + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, concurrency: ConcurrencyExpression | list[ConcurrencyExpression] | None = None, task_defaults: TaskDefaults = TaskDefaults(), - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Workflow[EmptyModel]: ... @overload @@ -243,15 +244,15 @@ class Hatchet: *, name: str, description: str | None = None, - input_validator: Type[TWorkflowInput], - on_events: list[str] = [], - on_crons: list[str] = [], + input_validator: type[TWorkflowInput], + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, concurrency: ConcurrencyExpression | list[ConcurrencyExpression] | None = None, task_defaults: TaskDefaults = TaskDefaults(), - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Workflow[TWorkflowInput]: ... def workflow( @@ -259,15 +260,15 @@ class Hatchet: *, name: str, description: str | None = None, - input_validator: Type[TWorkflowInput] | None = None, - on_events: list[str] = [], - on_crons: list[str] = [], + input_validator: type[TWorkflowInput] | None = None, + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, concurrency: ConcurrencyExpression | list[ConcurrencyExpression] | None = None, task_defaults: TaskDefaults = TaskDefaults(), - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Workflow[EmptyModel] | Workflow[TWorkflowInput]: """ Define a Hatchet workflow, which can then declare `task`s and be `run`, `schedule`d, and so on. @@ -302,15 +303,15 @@ class Hatchet: name=name, version=version, description=description, - on_events=on_events, - on_crons=on_crons, + on_events=on_events or [], + on_crons=on_crons or [], sticky=sticky, concurrency=concurrency, input_validator=input_validator - or cast(Type[TWorkflowInput], EmptyModel), + or cast(type[TWorkflowInput], EmptyModel), task_defaults=task_defaults, default_priority=default_priority, - default_filters=default_filters, + default_filters=default_filters or [], ), self, ) @@ -322,8 +323,8 @@ class Hatchet: name: str | None = None, description: str | None = None, input_validator: None = None, - on_events: list[str] = [], - on_crons: list[str] = [], + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, @@ -331,11 +332,11 @@ class Hatchet: schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Callable[ [Callable[[EmptyModel, Context], R | CoroutineLike[R]]], Standalone[EmptyModel, R], @@ -347,9 +348,9 @@ class Hatchet: *, name: str | None = None, description: str | None = None, - input_validator: Type[TWorkflowInput], - on_events: list[str] = [], - on_crons: list[str] = [], + input_validator: type[TWorkflowInput], + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, @@ -357,11 +358,11 @@ class Hatchet: schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Callable[ [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]], Standalone[TWorkflowInput, R], @@ -372,9 +373,9 @@ class Hatchet: *, name: str | None = None, description: str | None = None, - input_validator: Type[TWorkflowInput] | None = None, - on_events: list[str] = [], - on_crons: list[str] = [], + input_validator: type[TWorkflowInput] | None = None, + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, @@ -382,11 +383,11 @@ class Hatchet: schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> ( Callable[ [Callable[[EmptyModel, Context], R | CoroutineLike[R]]], @@ -447,13 +448,13 @@ class Hatchet: name=inferred_name, version=version, description=description, - on_events=on_events, - on_crons=on_crons, + on_events=on_events or [], + on_crons=on_crons or [], sticky=sticky, default_priority=default_priority, input_validator=input_validator - or cast(Type[TWorkflowInput], EmptyModel), - default_filters=default_filters, + or cast(type[TWorkflowInput], EmptyModel), + default_filters=default_filters or [], ), self, ) @@ -471,8 +472,8 @@ class Hatchet: execution_timeout=execution_timeout, parents=[], retries=retries, - rate_limits=rate_limits, - desired_worker_labels=desired_worker_labels, + rate_limits=rate_limits or [], + desired_worker_labels=desired_worker_labels or {}, backoff_factor=backoff_factor, backoff_max_seconds=backoff_max_seconds, concurrency=_concurrency, @@ -494,8 +495,8 @@ class Hatchet: name: str | None = None, description: str | None = None, input_validator: None = None, - on_events: list[str] = [], - on_crons: list[str] = [], + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, @@ -503,11 +504,11 @@ class Hatchet: schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Callable[ [Callable[[EmptyModel, DurableContext], R | CoroutineLike[R]]], Standalone[EmptyModel, R], @@ -519,9 +520,9 @@ class Hatchet: *, name: str | None = None, description: str | None = None, - input_validator: Type[TWorkflowInput], - on_events: list[str] = [], - on_crons: list[str] = [], + input_validator: type[TWorkflowInput], + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, @@ -529,11 +530,11 @@ class Hatchet: schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> Callable[ [Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]], Standalone[TWorkflowInput, R], @@ -544,9 +545,9 @@ class Hatchet: *, name: str | None = None, description: str | None = None, - input_validator: Type[TWorkflowInput] | None = None, - on_events: list[str] = [], - on_crons: list[str] = [], + input_validator: type[TWorkflowInput] | None = None, + on_events: list[str] | None = None, + on_crons: list[str] | None = None, version: str | None = None, sticky: StickyStrategy | None = None, default_priority: int = 1, @@ -554,11 +555,11 @@ class Hatchet: schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - default_filters: list[DefaultFilter] = [], + default_filters: list[DefaultFilter] | None = None, ) -> ( Callable[ [Callable[[EmptyModel, DurableContext], R | CoroutineLike[R]]], @@ -618,13 +619,13 @@ class Hatchet: name=inferred_name, version=version, description=description, - on_events=on_events, - on_crons=on_crons, + on_events=on_events or [], + on_crons=on_crons or [], sticky=sticky, input_validator=input_validator - or cast(Type[TWorkflowInput], EmptyModel), + or cast(type[TWorkflowInput], EmptyModel), default_priority=default_priority, - default_filters=default_filters, + default_filters=default_filters or [], ), self, ) @@ -642,8 +643,8 @@ class Hatchet: execution_timeout=execution_timeout, parents=[], retries=retries, - rate_limits=rate_limits, - desired_worker_labels=desired_worker_labels, + rate_limits=rate_limits or [], + desired_worker_labels=desired_worker_labels or {}, backoff_factor=backoff_factor, backoff_max_seconds=backoff_max_seconds, concurrency=_concurrency, diff --git a/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py b/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py index 8c5c47c48..601b3e5ae 100644 --- a/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py +++ b/sdks/python/hatchet_sdk/opentelemetry/instrumentor.py @@ -1,6 +1,7 @@ import json +from collections.abc import Callable, Collection, Coroutine from importlib.metadata import version -from typing import Any, Callable, Collection, Coroutine, Union, cast +from typing import Any, cast from hatchet_sdk.contracts import workflows_pb2 as v0_workflow_protos from hatchet_sdk.utils.typing import JSONSerializableMapping @@ -23,10 +24,10 @@ try: TraceContextTextMapPropagator, ) from wrapt import wrap_function_wrapper # type: ignore[import-untyped] -except (RuntimeError, ImportError, ModuleNotFoundError): +except (RuntimeError, ImportError, ModuleNotFoundError) as e: raise ModuleNotFoundError( "To use the HatchetInstrumentor, you must install Hatchet's `otel` extra using (e.g.) `pip install hatchet-sdk[otel]`" - ) + ) from e import inspect from datetime import datetime @@ -204,7 +205,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] super().__init__() def instrumentation_dependencies(self) -> Collection[str]: - return tuple() + return () def _instrument(self, **kwargs: InstrumentKwargs) -> None: self._tracer = get_tracer(__name__, hatchet_sdk_version, self.tracer_provider) @@ -394,11 +395,11 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] options = PushEventOptions( **options.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(options.additional_metadata), + options.additional_metadata, ), ) - return wrapped(event_key, dict(payload), options) + return wrapped(event_key, payload, options) ## IMPORTANT: Keep these types in sync with the wrapped method's signature def _wrap_bulk_push_event( @@ -432,7 +433,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] BulkPushEventWithMetadata( **event.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(event.additional_metadata), + event.additional_metadata, ), ) for event in bulk_events @@ -494,7 +495,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] options = TriggerWorkflowOptions( **options.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(options.additional_metadata), + options.additional_metadata, ), ) @@ -551,19 +552,18 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] options = TriggerWorkflowOptions( **options.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(options.additional_metadata), + options.additional_metadata, ), ) return await wrapped(workflow_name, payload, options) - def _ts_to_iso(self, ts: Union[datetime, timestamp_pb2.Timestamp]) -> str: + def _ts_to_iso(self, ts: datetime | timestamp_pb2.Timestamp) -> str: if isinstance(ts, datetime): return ts.isoformat() - elif isinstance(ts, timestamp_pb2.Timestamp): + if isinstance(ts, timestamp_pb2.Timestamp): return ts.ToJsonString() - else: - raise TypeError(f"Unsupported type for timestamp conversion: {type(ts)}") + raise TypeError(f"Unsupported type for timestamp conversion: {type(ts)}") ## IMPORTANT: Keep these types in sync with the wrapped method's signature def _wrap_schedule_workflow( @@ -571,7 +571,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] wrapped: Callable[ [ str, - list[Union[datetime, timestamp_pb2.Timestamp]], + list[datetime | timestamp_pb2.Timestamp], JSONSerializableMapping, ScheduleTriggerWorkflowOptions, ], @@ -580,14 +580,14 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] instance: AdminClient, args: tuple[ str, - list[Union[datetime, timestamp_pb2.Timestamp]], + list[datetime | timestamp_pb2.Timestamp], JSONSerializableMapping, ScheduleTriggerWorkflowOptions, ], kwargs: dict[ str, str - | list[Union[datetime, timestamp_pb2.Timestamp]] + | list[datetime | timestamp_pb2.Timestamp] | JSONSerializableMapping | ScheduleTriggerWorkflowOptions, ], @@ -595,7 +595,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] params = self.extract_bound_args(wrapped, args, kwargs) workflow_name = cast(str, params[0]) - schedules = cast(list[Union[datetime, timestamp_pb2.Timestamp]], params[1]) + schedules = cast(list[datetime | timestamp_pb2.Timestamp], params[1]) input = cast(JSONSerializableMapping, params[2]) options = cast( ScheduleTriggerWorkflowOptions, @@ -633,7 +633,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] options = ScheduleTriggerWorkflowOptions( **options.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(options.additional_metadata), + options.additional_metadata, ), ) @@ -673,7 +673,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] options=TriggerWorkflowOptions( **config.options.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(config.options.additional_metadata), + config.options.additional_metadata, ), ), ) @@ -705,7 +705,7 @@ class HatchetInstrumentor(BaseInstrumentor): # type: ignore[misc] options=TriggerWorkflowOptions( **config.options.model_dump(exclude={"additional_metadata"}), additional_metadata=_inject_traceparent_into_metadata( - dict(config.options.additional_metadata), + config.options.additional_metadata, ), ), ) diff --git a/sdks/python/hatchet_sdk/runnables/action.py b/sdks/python/hatchet_sdk/runnables/action.py index a3b279274..0c961ec16 100644 --- a/sdks/python/hatchet_sdk/runnables/action.py +++ b/sdks/python/hatchet_sdk/runnables/action.py @@ -121,5 +121,4 @@ class Action(BaseModel): """ if self.action_type == ActionType.START_GET_GROUP_KEY: return f"{self.get_group_key_run_id}/{self.retry_count}" - else: - return f"{self.step_run_id}/{self.retry_count}" + return f"{self.step_run_id}/{self.retry_count}" diff --git a/sdks/python/hatchet_sdk/runnables/contextvars.py b/sdks/python/hatchet_sdk/runnables/contextvars.py index 80fea51ee..0b6f3e832 100644 --- a/sdks/python/hatchet_sdk/runnables/contextvars.py +++ b/sdks/python/hatchet_sdk/runnables/contextvars.py @@ -1,4 +1,5 @@ import asyncio +import threading from collections import Counter from contextvars import ContextVar @@ -15,3 +16,21 @@ ctx_worker_id: ContextVar[str | None] = ContextVar("ctx_worker_id", default=None workflow_spawn_indices = Counter[ActionKey]() spawn_index_lock = asyncio.Lock() + + +class TaskCounter: + def __init__(self) -> None: + self._count = 0 + self._lock = threading.Lock() + + def increment(self) -> int: + with self._lock: + self._count += 1 + return self._count + + @property + def value(self) -> int: + return self._count + + +task_count = TaskCounter() diff --git a/sdks/python/hatchet_sdk/runnables/task.py b/sdks/python/hatchet_sdk/runnables/task.py index 60ecc03eb..422134e6a 100644 --- a/sdks/python/hatchet_sdk/runnables/task.py +++ b/sdks/python/hatchet_sdk/runnables/task.py @@ -1,5 +1,5 @@ -from datetime import timedelta -from typing import TYPE_CHECKING, Any, Callable, Generic, Union, cast, get_type_hints +from collections.abc import Callable +from typing import TYPE_CHECKING, Any, Generic, cast, get_type_hints from hatchet_sdk.context.context import Context, DurableContext from hatchet_sdk.contracts.v1.shared.condition_pb2 import TaskConditions @@ -40,28 +40,30 @@ if TYPE_CHECKING: class Task(Generic[TWorkflowInput, R]): def __init__( self, - _fn: Union[ + _fn: ( Callable[[TWorkflowInput, Context], R | CoroutineLike[R]] - | Callable[[TWorkflowInput, Context], AwaitableLike[R]], - Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]] - | Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]], - ], + | Callable[[TWorkflowInput, Context], AwaitableLike[R]] + | ( + Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]] + | Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]] + ) + ), is_durable: bool, type: StepType, workflow: "Workflow[TWorkflowInput]", name: str, - execution_timeout: Duration = timedelta(seconds=60), - schedule_timeout: Duration = timedelta(minutes=5), - parents: "list[Task[TWorkflowInput, Any]]" = [], - retries: int = 0, - rate_limits: list[CreateTaskRateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabels] = {}, - backoff_factor: float | None = None, - backoff_max_seconds: int | None = None, - concurrency: list[ConcurrencyExpression] = [], - wait_for: list[Condition | OrGroup] = [], - skip_if: list[Condition | OrGroup] = [], - cancel_if: list[Condition | OrGroup] = [], + execution_timeout: Duration, + schedule_timeout: Duration, + parents: "list[Task[TWorkflowInput, Any]] | None", + retries: int, + rate_limits: list[CreateTaskRateLimit] | None, + desired_worker_labels: dict[str, DesiredWorkerLabels] | None, + backoff_factor: float | None, + backoff_max_seconds: int | None, + concurrency: list[ConcurrencyExpression] | None, + wait_for: list[Condition | OrGroup] | None, + skip_if: list[Condition | OrGroup] | None, + cancel_if: list[Condition | OrGroup] | None, ) -> None: self.is_durable = is_durable @@ -74,17 +76,17 @@ class Task(Generic[TWorkflowInput, R]): self.execution_timeout = execution_timeout self.schedule_timeout = schedule_timeout self.name = name - self.parents = parents + self.parents = parents or [] self.retries = retries - self.rate_limits = rate_limits - self.desired_worker_labels = desired_worker_labels + self.rate_limits = rate_limits or [] + self.desired_worker_labels = desired_worker_labels or {} self.backoff_factor = backoff_factor self.backoff_max_seconds = backoff_max_seconds - self.concurrency = concurrency + self.concurrency = concurrency or [] - self.wait_for = self._flatten_conditions(wait_for) - self.skip_if = self._flatten_conditions(skip_if) - self.cancel_if = self._flatten_conditions(cancel_if) + self.wait_for = self._flatten_conditions(wait_for or []) + self.skip_if = self._flatten_conditions(skip_if or []) + self.cancel_if = self._flatten_conditions(cancel_if or []) return_type = get_type_hints(_fn).get("return") @@ -179,13 +181,19 @@ class Task(Generic[TWorkflowInput, R]): raise ValueError("Conditions must have unique readable data keys.") user_events = [ - c.to_proto() for c in conditions if isinstance(c, UserEventCondition) + c.to_proto(self.workflow.client.config) + for c in conditions + if isinstance(c, UserEventCondition) ] parent_overrides = [ - c.to_proto() for c in conditions if isinstance(c, ParentCondition) + c.to_proto(self.workflow.client.config) + for c in conditions + if isinstance(c, ParentCondition) ] sleep_conditions = [ - c.to_proto() for c in conditions if isinstance(c, SleepCondition) + c.to_proto(self.workflow.client.config) + for c in conditions + if isinstance(c, SleepCondition) ] return TaskConditions( diff --git a/sdks/python/hatchet_sdk/runnables/types.py b/sdks/python/hatchet_sdk/runnables/types.py index a7f9b4d4a..ee0acdd9d 100644 --- a/sdks/python/hatchet_sdk/runnables/types.py +++ b/sdks/python/hatchet_sdk/runnables/types.py @@ -1,7 +1,8 @@ import asyncio import json +from collections.abc import Callable from enum import Enum -from typing import Any, Callable, ParamSpec, Type, TypeGuard, TypeVar, Union +from typing import Any, ParamSpec, TypeGuard, TypeVar from pydantic import BaseModel, ConfigDict, Field @@ -11,7 +12,7 @@ from hatchet_sdk.contracts.v1.workflows_pb2 import DefaultFilter as DefaultFilte from hatchet_sdk.utils.timedelta_to_expression import Duration from hatchet_sdk.utils.typing import AwaitableLike, JSONSerializableMapping -ValidTaskReturnType = Union[BaseModel, JSONSerializableMapping, None] +ValidTaskReturnType = BaseModel | JSONSerializableMapping | None R = TypeVar("R", bound=ValidTaskReturnType) P = ParamSpec("P") @@ -78,7 +79,7 @@ class DefaultFilter(BaseModel): return DefaultFilterProto( expression=self.expression, scope=self.scope, - payload=payload_json, + payload=payload_json.encode("utf-8"), ) @@ -92,7 +93,7 @@ class WorkflowConfig(BaseModel): on_crons: list[str] = Field(default_factory=list) sticky: StickyStrategy | None = None concurrency: ConcurrencyExpression | list[ConcurrencyExpression] | None = None - input_validator: Type[BaseModel] = EmptyModel + input_validator: type[BaseModel] = EmptyModel default_priority: int | None = None task_defaults: TaskDefaults = TaskDefaults() @@ -107,7 +108,7 @@ class StepType(str, Enum): AsyncFunc = Callable[[TWorkflowInput, Context], AwaitableLike[R]] SyncFunc = Callable[[TWorkflowInput, Context], R] -TaskFunc = Union[AsyncFunc[TWorkflowInput, R], SyncFunc[TWorkflowInput, R]] +TaskFunc = AsyncFunc[TWorkflowInput, R] | SyncFunc[TWorkflowInput, R] def is_async_fn( @@ -124,9 +125,9 @@ def is_sync_fn( DurableAsyncFunc = Callable[[TWorkflowInput, DurableContext], AwaitableLike[R]] DurableSyncFunc = Callable[[TWorkflowInput, DurableContext], R] -DurableTaskFunc = Union[ - DurableAsyncFunc[TWorkflowInput, R], DurableSyncFunc[TWorkflowInput, R] -] +DurableTaskFunc = ( + DurableAsyncFunc[TWorkflowInput, R] | DurableSyncFunc[TWorkflowInput, R] +) def is_durable_async_fn( diff --git a/sdks/python/hatchet_sdk/runnables/workflow.py b/sdks/python/hatchet_sdk/runnables/workflow.py index f266f50dc..9b463965f 100644 --- a/sdks/python/hatchet_sdk/runnables/workflow.py +++ b/sdks/python/hatchet_sdk/runnables/workflow.py @@ -1,7 +1,8 @@ import asyncio +from collections.abc import Callable from datetime import datetime, timedelta, timezone from functools import cached_property -from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar, cast, get_type_hints +from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, get_type_hints from google.protobuf import timestamp_pb2 from pydantic import BaseModel, model_validator @@ -11,6 +12,7 @@ from hatchet_sdk.clients.admin import ( TriggerWorkflowOptions, WorkflowRunTriggerConfig, ) +from hatchet_sdk.clients.listeners.run_event_listener import RunEventListener from hatchet_sdk.clients.rest.models.cron_workflows import CronWorkflows from hatchet_sdk.clients.rest.models.v1_filter import V1Filter from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus @@ -371,7 +373,7 @@ class BaseWorkflow(Generic[TWorkflowInput]): self, expression: str, scope: str, - payload: JSONSerializableMapping = {}, + payload: JSONSerializableMapping | None = None, ) -> V1Filter: """ Create a new filter. @@ -393,7 +395,7 @@ class BaseWorkflow(Generic[TWorkflowInput]): self, expression: str, scope: str, - payload: JSONSerializableMapping = {}, + payload: JSONSerializableMapping | None = None, ) -> V1Filter: """ Create a new filter. @@ -458,7 +460,7 @@ class BaseWorkflow(Generic[TWorkflowInput]): cron_name: str, expression: str, input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()), - additional_metadata: JSONSerializableMapping = {}, + additional_metadata: JSONSerializableMapping | None = None, priority: int | None = None, ) -> CronWorkflows: """ @@ -477,7 +479,7 @@ class BaseWorkflow(Generic[TWorkflowInput]): cron_name=cron_name, expression=expression, input=self._serialize_input(input), - additional_metadata=additional_metadata, + additional_metadata=additional_metadata or {}, priority=priority, ) @@ -486,7 +488,7 @@ class BaseWorkflow(Generic[TWorkflowInput]): cron_name: str, expression: str, input: TWorkflowInput = cast(TWorkflowInput, EmptyModel()), - additional_metadata: JSONSerializableMapping = {}, + additional_metadata: JSONSerializableMapping | None = None, priority: int | None = None, ) -> CronWorkflows: """ @@ -505,7 +507,7 @@ class BaseWorkflow(Generic[TWorkflowInput]): cron_name=cron_name, expression=expression, input=self._serialize_input(input), - additional_metadata=additional_metadata, + additional_metadata=additional_metadata or {}, priority=priority, ) @@ -620,7 +622,7 @@ class Workflow(BaseWorkflow[TWorkflowInput]): """ Run the workflow asynchronously and wait for it to complete. - This method triggers a workflow run, blocks until completion, and returns the final result. + This method triggers a workflow run, awaits until completion, and returns the final result. :param input: The input data for the workflow, must match the workflow's input type. :param options: Additional options for workflow execution like metadata and parent workflow ID. @@ -716,16 +718,16 @@ class Workflow(BaseWorkflow[TWorkflowInput]): name: str | None = None, schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), - parents: list[Task[TWorkflowInput, Any]] = [], + parents: list[Task[TWorkflowInput, Any]] | None = None, retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - concurrency: list[ConcurrencyExpression] = [], - wait_for: list[Condition | OrGroup] = [], - skip_if: list[Condition | OrGroup] = [], - cancel_if: list[Condition | OrGroup] = [], + concurrency: list[ConcurrencyExpression] | None = None, + wait_for: list[Condition | OrGroup] | None = None, + skip_if: list[Condition | OrGroup] | None = None, + cancel_if: list[Condition | OrGroup] | None = None, ) -> Callable[ [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]], Task[TWorkflowInput, R], @@ -784,10 +786,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]): schedule_timeout=computed_params.schedule_timeout, parents=parents, retries=computed_params.retries, - rate_limits=[r.to_proto() for r in rate_limits], + rate_limits=[r.to_proto() for r in rate_limits or []], desired_worker_labels={ key: transform_desired_worker_label(d) - for key, d in desired_worker_labels.items() + for key, d in (desired_worker_labels or {}).items() }, backoff_factor=computed_params.backoff_factor, backoff_max_seconds=computed_params.backoff_max_seconds, @@ -808,16 +810,16 @@ class Workflow(BaseWorkflow[TWorkflowInput]): name: str | None = None, schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), - parents: list[Task[TWorkflowInput, Any]] = [], + parents: list[Task[TWorkflowInput, Any]] | None = None, retries: int = 0, - rate_limits: list[RateLimit] = [], - desired_worker_labels: dict[str, DesiredWorkerLabel] = {}, + rate_limits: list[RateLimit] | None = None, + desired_worker_labels: dict[str, DesiredWorkerLabel] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - concurrency: list[ConcurrencyExpression] = [], - wait_for: list[Condition | OrGroup] = [], - skip_if: list[Condition | OrGroup] = [], - cancel_if: list[Condition | OrGroup] = [], + concurrency: list[ConcurrencyExpression] | None = None, + wait_for: list[Condition | OrGroup] | None = None, + skip_if: list[Condition | OrGroup] | None = None, + cancel_if: list[Condition | OrGroup] | None = None, ) -> Callable[ [Callable[[TWorkflowInput, DurableContext], R | CoroutineLike[R]]], Task[TWorkflowInput, R], @@ -880,10 +882,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]): schedule_timeout=computed_params.schedule_timeout, parents=parents, retries=computed_params.retries, - rate_limits=[r.to_proto() for r in rate_limits], + rate_limits=[r.to_proto() for r in rate_limits or []], desired_worker_labels={ key: transform_desired_worker_label(d) - for key, d in desired_worker_labels.items() + for key, d in (desired_worker_labels or {}).items() }, backoff_factor=computed_params.backoff_factor, backoff_max_seconds=computed_params.backoff_max_seconds, @@ -905,10 +907,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]): schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], + rate_limits: list[RateLimit] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - concurrency: list[ConcurrencyExpression] = [], + concurrency: list[ConcurrencyExpression] | None = None, ) -> Callable[ [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]], Task[TWorkflowInput, R], @@ -947,10 +949,15 @@ class Workflow(BaseWorkflow[TWorkflowInput]): execution_timeout=execution_timeout, schedule_timeout=schedule_timeout, retries=retries, - rate_limits=[r.to_proto() for r in rate_limits], + rate_limits=[r.to_proto() for r in rate_limits or []], backoff_factor=backoff_factor, backoff_max_seconds=backoff_max_seconds, concurrency=concurrency, + desired_worker_labels=None, + parents=None, + wait_for=None, + skip_if=None, + cancel_if=None, ) if self._on_failure_task: @@ -968,10 +975,10 @@ class Workflow(BaseWorkflow[TWorkflowInput]): schedule_timeout: Duration = timedelta(minutes=5), execution_timeout: Duration = timedelta(seconds=60), retries: int = 0, - rate_limits: list[RateLimit] = [], + rate_limits: list[RateLimit] | None = None, backoff_factor: float | None = None, backoff_max_seconds: int | None = None, - concurrency: list[ConcurrencyExpression] = [], + concurrency: list[ConcurrencyExpression] | None = None, ) -> Callable[ [Callable[[TWorkflowInput, Context], R | CoroutineLike[R]]], Task[TWorkflowInput, R], @@ -1010,11 +1017,15 @@ class Workflow(BaseWorkflow[TWorkflowInput]): execution_timeout=execution_timeout, schedule_timeout=schedule_timeout, retries=retries, - rate_limits=[r.to_proto() for r in rate_limits], + rate_limits=[r.to_proto() for r in rate_limits or []], backoff_factor=backoff_factor, backoff_max_seconds=backoff_max_seconds, concurrency=concurrency, - parents=[], + parents=None, + desired_worker_labels=None, + wait_for=None, + skip_if=None, + cancel_if=None, ) if self._on_success_task: @@ -1087,6 +1098,9 @@ class TaskRunRef(Generic[TWorkflowInput, R]): return self._s._extract_result(result) + def stream(self) -> RunEventListener: + return self._wrr.stream() + class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): def __init__( @@ -1123,13 +1137,14 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): options: TriggerWorkflowOptions = TriggerWorkflowOptions(), ) -> R: """ - Synchronously trigger a workflow run without waiting for it to complete. - This method is useful for starting a workflow run and immediately returning a reference to the run without blocking while the workflow runs. + Run the workflow synchronously and wait for it to complete. + + This method triggers a workflow run, blocks until completion, and returns the extracted result. :param input: The input data for the workflow. :param options: Additional options for workflow execution. - :returns: A `WorkflowRunRef` object representing the reference to the workflow run. + :returns: The extracted result of the workflow execution. """ return self._extract_result(self._workflow.run(input, options)) @@ -1141,12 +1156,12 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): """ Run the workflow asynchronously and wait for it to complete. - This method triggers a workflow run, blocks until completion, and returns the final result. + This method triggers a workflow run, awaits until completion, and returns the extracted result. :param input: The input data for the workflow, must match the workflow's input type. :param options: Additional options for workflow execution like metadata and parent workflow ID. - :returns: The result of the workflow execution as a dictionary. + :returns: The extracted result of the workflow execution. """ result = await self._workflow.aio_run(input, options) return self._extract_result(result) @@ -1157,14 +1172,14 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): options: TriggerWorkflowOptions = TriggerWorkflowOptions(), ) -> TaskRunRef[TWorkflowInput, R]: """ - Run the workflow synchronously and wait for it to complete. + Trigger a workflow run without waiting for it to complete. - This method triggers a workflow run, blocks until completion, and returns the final result. + This method triggers a workflow run and immediately returns a reference to the run without blocking while the workflow runs. :param input: The input data for the workflow, must match the workflow's input type. :param options: Additional options for workflow execution like metadata and parent workflow ID. - :returns: The result of the workflow execution as a dictionary. + :returns: A `TaskRunRef` object representing the reference to the workflow run. """ ref = self._workflow.run_no_wait(input, options) @@ -1182,7 +1197,7 @@ class Standalone(BaseWorkflow[TWorkflowInput], Generic[TWorkflowInput, R]): :param input: The input data for the workflow. :param options: Additional options for workflow execution. - :returns: A `WorkflowRunRef` object representing the reference to the workflow run. + :returns: A `TaskRunRef` object representing the reference to the workflow run. """ ref = await self._workflow.aio_run_no_wait(input, options) diff --git a/sdks/python/hatchet_sdk/utils/proto_enums.py b/sdks/python/hatchet_sdk/utils/proto_enums.py index 6c41d0203..696bc9dc7 100644 --- a/sdks/python/hatchet_sdk/utils/proto_enums.py +++ b/sdks/python/hatchet_sdk/utils/proto_enums.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Type, TypeVar, overload +from typing import TypeVar, overload from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper @@ -26,19 +26,19 @@ def convert_python_enum_to_proto( @overload def convert_proto_enum_to_python( - value: TProtoEnumValue, python_enum_class: Type[TPythonEnum], proto_enum: TProtoEnum + value: TProtoEnumValue, python_enum_class: type[TPythonEnum], proto_enum: TProtoEnum ) -> TPythonEnum: ... @overload def convert_proto_enum_to_python( - value: None, python_enum_class: Type[TPythonEnum], proto_enum: TProtoEnum + value: None, python_enum_class: type[TPythonEnum], proto_enum: TProtoEnum ) -> None: ... def convert_proto_enum_to_python( value: TProtoEnumValue | None, - python_enum_class: Type[TPythonEnum], + python_enum_class: type[TPythonEnum], proto_enum: TProtoEnum, ) -> TPythonEnum | None: if value is None: diff --git a/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py b/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py index a7fde5765..f2cedddcb 100644 --- a/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py +++ b/sdks/python/hatchet_sdk/utils/timedelta_to_expression.py @@ -18,7 +18,6 @@ def timedelta_to_expr(td: Duration) -> str: ## IMPORTANT: We only support hours, minutes, and seconds on the engine if seconds % HOUR == 0: return f"{seconds // HOUR}h" - elif seconds % MINUTE == 0: + if seconds % MINUTE == 0: return f"{seconds // MINUTE}m" - else: - return f"{seconds}s" + return f"{seconds}s" diff --git a/sdks/python/hatchet_sdk/utils/typing.py b/sdks/python/hatchet_sdk/utils/typing.py index b71fea520..5bf50fe99 100644 --- a/sdks/python/hatchet_sdk/utils/typing.py +++ b/sdks/python/hatchet_sdk/utils/typing.py @@ -1,20 +1,11 @@ import sys -from typing import ( - Any, - Awaitable, - Coroutine, - Generator, - Mapping, - Type, - TypeAlias, - TypeGuard, - TypeVar, -) +from collections.abc import Awaitable, Coroutine, Generator +from typing import Any, Literal, TypeAlias, TypeGuard, TypeVar from pydantic import BaseModel -def is_basemodel_subclass(model: Any) -> TypeGuard[Type[BaseModel]]: +def is_basemodel_subclass(model: Any) -> TypeGuard[type[BaseModel]]: try: return issubclass(model, BaseModel) except TypeError: @@ -22,18 +13,21 @@ def is_basemodel_subclass(model: Any) -> TypeGuard[Type[BaseModel]]: class TaskIOValidator(BaseModel): - workflow_input: Type[BaseModel] | None = None - step_output: Type[BaseModel] | None = None + workflow_input: type[BaseModel] | None = None + step_output: type[BaseModel] | None = None -JSONSerializableMapping = Mapping[str, Any] +JSONSerializableMapping = dict[str, Any] _T_co = TypeVar("_T_co", covariant=True) if sys.version_info >= (3, 12): - AwaitableLike: TypeAlias = Awaitable[_T_co] # noqa: Y047 - CoroutineLike: TypeAlias = Coroutine[Any, Any, _T_co] # noqa: Y047 + AwaitableLike: TypeAlias = Awaitable[_T_co] + CoroutineLike: TypeAlias = Coroutine[Any, Any, _T_co] else: AwaitableLike: TypeAlias = Generator[Any, None, _T_co] | Awaitable[_T_co] CoroutineLike: TypeAlias = Generator[Any, None, _T_co] | Coroutine[Any, Any, _T_co] + +STOP_LOOP_TYPE = Literal["STOP_LOOP"] +STOP_LOOP: STOP_LOOP_TYPE = "STOP_LOOP" # Sentinel object to stop the loop diff --git a/sdks/python/hatchet_sdk/waits.py b/sdks/python/hatchet_sdk/waits.py index 4399c0fe1..6657c15a3 100644 --- a/sdks/python/hatchet_sdk/waits.py +++ b/sdks/python/hatchet_sdk/waits.py @@ -6,6 +6,7 @@ from uuid import uuid4 from pydantic import BaseModel, Field +from hatchet_sdk.config import ClientConfig from hatchet_sdk.contracts.v1.shared.condition_pb2 import Action as ProtoAction from hatchet_sdk.contracts.v1.shared.condition_pb2 import ( BaseMatchCondition, @@ -53,7 +54,7 @@ class Condition(ABC): @abstractmethod def to_proto( - self, + self, config: ClientConfig ) -> UserEventMatchCondition | ParentOverrideMatchCondition | SleepMatchCondition: pass @@ -71,7 +72,7 @@ class SleepCondition(Condition): self.duration = duration - def to_proto(self) -> SleepMatchCondition: + def to_proto(self, config: ClientConfig) -> SleepMatchCondition: return SleepMatchCondition( base=self.base.to_proto(), sleep_for=timedelta_to_expr(self.duration), @@ -95,10 +96,10 @@ class UserEventCondition(Condition): self.event_key = event_key self.expression = expression - def to_proto(self) -> UserEventMatchCondition: + def to_proto(self, config: ClientConfig) -> UserEventMatchCondition: return UserEventMatchCondition( base=self.base.to_proto(), - user_event_key=self.event_key, + user_event_key=config.apply_namespace(self.event_key), ) @@ -124,7 +125,7 @@ class ParentCondition(Condition): self.parent = parent - def to_proto(self) -> ParentOverrideMatchCondition: + def to_proto(self, config: ClientConfig) -> ParentOverrideMatchCondition: return ParentOverrideMatchCondition( base=self.base.to_proto(), parent_readable_id=self.parent.name, diff --git a/sdks/python/hatchet_sdk/worker/action_listener_process.py b/sdks/python/hatchet_sdk/worker/action_listener_process.py index 5ae65cfd2..9540a3032 100644 --- a/sdks/python/hatchet_sdk/worker/action_listener_process.py +++ b/sdks/python/hatchet_sdk/worker/action_listener_process.py @@ -4,9 +4,10 @@ import signal import time from dataclasses import dataclass from multiprocessing import Queue -from typing import Any, Literal +from typing import Any import grpc +from grpc.aio import UnaryUnaryCall from hatchet_sdk.client import Client from hatchet_sdk.clients.dispatcher.action_listener import ( @@ -19,6 +20,9 @@ from hatchet_sdk.config import ClientConfig from hatchet_sdk.contracts.dispatcher_pb2 import ( GROUP_KEY_EVENT_TYPE_STARTED, STEP_EVENT_TYPE_STARTED, + ActionEventResponse, + GroupKeyActionEvent, + StepActionEvent, ) from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action, ActionType @@ -29,6 +33,7 @@ from hatchet_sdk.runnables.contextvars import ( ctx_workflow_run_id, ) from hatchet_sdk.utils.backoff import exp_backoff_sleep +from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE ACTION_EVENT_RETRY_COUNT = 5 @@ -41,9 +46,6 @@ class ActionEvent: should_not_retry: bool -STOP_LOOP_TYPE = Literal["STOP_LOOP"] -STOP_LOOP: STOP_LOOP_TYPE = "STOP_LOOP" # Sentinel object to stop the loop - BLOCKED_THREAD_WARNING = "THE TIME TO START THE TASK RUN IS TOO LONG, THE EVENT LOOP MAY BE BLOCKED. See https://docs.hatchet.run/blog/warning-event-loop-blocked for details and debugging help." @@ -56,9 +58,9 @@ class WorkerActionListenerProcess: config: ClientConfig, action_queue: "Queue[Action]", event_queue: "Queue[ActionEvent | STOP_LOOP_TYPE]", - handle_kill: bool = True, - debug: bool = False, - labels: dict[str, str | int] = {}, + handle_kill: bool, + debug: bool, + labels: dict[str, str | int], ) -> None: self.name = name self.actions = actions @@ -75,6 +77,14 @@ class WorkerActionListenerProcess: self.action_loop_task: asyncio.Task[None] | None = None self.event_send_loop_task: asyncio.Task[None] | None = None self.running_step_runs: dict[str, float] = {} + self.step_action_events: set[ + asyncio.Task[UnaryUnaryCall[StepActionEvent, ActionEventResponse] | None] + ] = set() + self.group_key_action_events: set[ + asyncio.Task[ + UnaryUnaryCall[GroupKeyActionEvent, ActionEventResponse] | None + ] + ] = set() if self.debug: logger.setLevel(logging.DEBUG) @@ -144,20 +154,21 @@ class WorkerActionListenerProcess: break logger.debug(f"tx: event: {event.action.action_id}/{event.type}") - asyncio.create_task(self.send_event(event)) + t = asyncio.create_task(self.send_event(event)) + self.step_action_events.add(t) + t.add_done_callback(lambda t: self.step_action_events.discard(t)) async def start_blocked_main_loop(self) -> None: threshold = 1 while not self.killing: count = 0 - for _, start_time in self.running_step_runs.items(): + for start_time in self.running_step_runs.values(): diff = self.now() - start_time if diff > threshold: count += 1 if count > 0: logger.warning(f"{BLOCKED_THREAD_WARNING}: Waiting Steps {count}") - print(asyncio.current_task()) await asyncio.sleep(1) async def send_event(self, event: ActionEvent, retry_attempt: int = 1) -> None: @@ -187,7 +198,7 @@ class WorkerActionListenerProcess: self.now() ) - asyncio.create_task( + send_started_event_task = asyncio.create_task( self.dispatcher_client.send_step_action_event( event.action, event.type, @@ -195,14 +206,23 @@ class WorkerActionListenerProcess: event.should_not_retry, ) ) + + self.step_action_events.add(send_started_event_task) + send_started_event_task.add_done_callback( + lambda t: self.step_action_events.discard(t) + ) case ActionType.CANCEL_STEP_RUN: logger.debug("unimplemented event send") case ActionType.START_GET_GROUP_KEY: - asyncio.create_task( + get_group_key_task = asyncio.create_task( self.dispatcher_client.send_group_key_action_event( event.action, event.type, event.payload ) ) + self.group_key_action_events.add(get_group_key_task) + get_group_key_task.add_done_callback( + lambda t: self.group_key_action_events.discard(t) + ) case _: logger.error("unknown action type for event send") except Exception as e: @@ -317,7 +337,7 @@ def worker_action_listener_process(*args: Any, **kwargs: Any) -> None: process = WorkerActionListenerProcess(*args, **kwargs) await process.start() # Keep the process running - while not process.killing: + while not process.killing: # noqa: ASYNC110 await asyncio.sleep(0.1) asyncio.run(run()) diff --git a/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py b/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py index 8824e57fe..ecec2074f 100644 --- a/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py +++ b/sdks/python/hatchet_sdk/worker/runner/run_loop_manager.py @@ -1,19 +1,17 @@ import asyncio import logging from multiprocessing import Queue -from typing import Any, Literal, TypeVar +from typing import Any, TypeVar from hatchet_sdk.client import Client from hatchet_sdk.config import ClientConfig from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action from hatchet_sdk.runnables.task import Task +from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE from hatchet_sdk.worker.action_listener_process import ActionEvent from hatchet_sdk.worker.runner.runner import Runner -from hatchet_sdk.worker.runner.utils.capture_logs import capture_logs - -STOP_LOOP_TYPE = Literal["STOP_LOOP"] -STOP_LOOP: STOP_LOOP_TYPE = "STOP_LOOP" +from hatchet_sdk.worker.runner.utils.capture_logs import AsyncLogSender, capture_logs T = TypeVar("T") @@ -28,10 +26,10 @@ class WorkerActionRunLoopManager: action_queue: "Queue[Action | STOP_LOOP_TYPE]", event_queue: "Queue[ActionEvent]", loop: asyncio.AbstractEventLoop, - handle_kill: bool = True, - debug: bool = False, - labels: dict[str, str | int] = {}, - lifespan_context: Any | None = None, + handle_kill: bool, + debug: bool, + labels: dict[str, str | int] | None, + lifespan_context: Any | None, ) -> None: self.name = name self.action_registry = action_registry @@ -52,15 +50,19 @@ class WorkerActionRunLoopManager: self.runner: Runner | None = None self.client = Client(config=self.config, debug=self.debug) + self.start_loop_manager_task: asyncio.Task[None] | None = None + self.log_sender = AsyncLogSender(self.client.event) + self.log_task = self.loop.create_task(self.log_sender.consume()) + self.start() def start(self) -> None: - k = self.loop.create_task(self.aio_start()) # noqa: F841 + self.start_loop_manager_task = self.loop.create_task(self.aio_start()) async def aio_start(self, retry_count: int = 1) -> None: await capture_logs( self.client.log_interceptor, - self.client.event, + self.log_sender, self._async_start, )() @@ -75,6 +77,7 @@ class WorkerActionRunLoopManager: self.killing = True self.action_queue.put(STOP_LOOP) + self.log_sender.publish(STOP_LOOP) async def wait_for_tasks(self) -> None: if self.runner: @@ -89,6 +92,7 @@ class WorkerActionRunLoopManager: self.action_registry, self.labels, self.lifespan_context, + self.log_sender, ) logger.debug(f"'{self.name}' waiting for {list(self.action_registry.keys())}") diff --git a/sdks/python/hatchet_sdk/worker/runner/runner.py b/sdks/python/hatchet_sdk/worker/runner/runner.py index 972afacb9..c57ad6f6d 100644 --- a/sdks/python/hatchet_sdk/worker/runner/runner.py +++ b/sdks/python/hatchet_sdk/worker/runner/runner.py @@ -1,14 +1,14 @@ import asyncio -import contextvars import ctypes import functools import json -import traceback +from collections.abc import Callable from concurrent.futures import ThreadPoolExecutor +from contextlib import suppress from enum import Enum from multiprocessing import Queue from threading import Thread, current_thread -from typing import Any, Callable, Dict, Literal, cast, overload +from typing import Any, Literal, cast, overload from pydantic import BaseModel @@ -30,7 +30,7 @@ from hatchet_sdk.contracts.dispatcher_pb2 import ( STEP_EVENT_TYPE_FAILED, STEP_EVENT_TYPE_STARTED, ) -from hatchet_sdk.exceptions import NonRetryableException +from hatchet_sdk.exceptions import NonRetryableException, TaskRunError from hatchet_sdk.features.runs import RunsClient from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action, ActionKey, ActionType @@ -40,12 +40,17 @@ from hatchet_sdk.runnables.contextvars import ( ctx_worker_id, ctx_workflow_run_id, spawn_index_lock, + task_count, workflow_spawn_indices, ) from hatchet_sdk.runnables.task import Task from hatchet_sdk.runnables.types import R, TWorkflowInput from hatchet_sdk.worker.action_listener_process import ActionEvent -from hatchet_sdk.worker.runner.utils.capture_logs import copy_context_vars +from hatchet_sdk.worker.runner.utils.capture_logs import ( + AsyncLogSender, + ContextVarToCopy, + copy_context_vars, +) class WorkerStatus(Enum): @@ -61,10 +66,11 @@ class Runner: event_queue: "Queue[ActionEvent]", config: ClientConfig, slots: int, - handle_kill: bool = True, - action_registry: dict[str, Task[TWorkflowInput, R]] = {}, - labels: dict[str, str | int] = {}, - lifespan_context: Any | None = None, + handle_kill: bool, + action_registry: dict[str, Task[TWorkflowInput, R]], + labels: dict[str, str | int] | None, + lifespan_context: Any | None, + log_sender: AsyncLogSender, ): # We store the config so we can dynamically create clients for the dispatcher client. self.config = config @@ -72,13 +78,14 @@ class Runner: self.slots = slots self.tasks: dict[ActionKey, asyncio.Task[Any]] = {} # Store run ids and futures self.contexts: dict[ActionKey, Context] = {} # Store run ids and contexts - self.action_registry = action_registry + self.action_registry = action_registry or {} self.event_queue = event_queue # The thread pool is used for synchronous functions which need to run concurrently self.thread_pool = ThreadPoolExecutor(max_workers=slots) - self.threads: Dict[ActionKey, Thread] = {} # Store run ids and threads + self.threads: dict[ActionKey, Thread] = {} # Store run ids and threads + self.running_tasks = set[asyncio.Task[Exception | None]]() self.killing = False self.handle_kill = handle_kill @@ -101,10 +108,11 @@ class Runner: self.durable_event_listener = DurableEventListener(self.config) self.worker_context = WorkerContext( - labels=labels, client=Client(config=config).dispatcher + labels=labels or {}, client=Client(config=config).dispatcher ) self.lifespan_context = lifespan_context + self.log_sender = log_sender if self.config.enable_thread_pool_monitoring: self.start_background_monitoring() @@ -116,67 +124,68 @@ class Runner: if self.worker_context.id() is None: self.worker_context._worker_id = action.worker_id + t: asyncio.Task[Exception | None] | None = None match action.action_type: case ActionType.START_STEP_RUN: log = f"run: start step: {action.action_id}/{action.step_run_id}" logger.info(log) - asyncio.create_task(self.handle_start_step_run(action)) + t = asyncio.create_task(self.handle_start_step_run(action)) case ActionType.CANCEL_STEP_RUN: log = f"cancel: step run: {action.action_id}/{action.step_run_id}/{action.retry_count}" logger.info(log) - asyncio.create_task(self.handle_cancel_action(action)) + t = asyncio.create_task(self.handle_cancel_action(action)) case ActionType.START_GET_GROUP_KEY: log = f"run: get group key: {action.action_id}/{action.get_group_key_run_id}" logger.info(log) - asyncio.create_task(self.handle_start_group_key_run(action)) + t = asyncio.create_task(self.handle_start_group_key_run(action)) case _: log = f"unknown action type: {action.action_type}" logger.error(log) + if t is not None: + self.running_tasks.add(t) + t.add_done_callback(lambda task: self.running_tasks.discard(task)) + def step_run_callback(self, action: Action) -> Callable[[asyncio.Task[Any]], None]: def inner_callback(task: asyncio.Task[Any]) -> None: self.cleanup_run_id(action.key) - errored = False - cancelled = task.cancelled() - output = None + if task.cancelled(): + return - # Get the output from the future try: - if not cancelled: - output = task.result() + output = task.result() except Exception as e: - errored = True - should_not_retry = isinstance(e, NonRetryableException) + exc = TaskRunError.from_exception(e) + # This except is coming from the application itself, so we want to send that to the Hatchet instance self.event_queue.put( ActionEvent( action=action, type=STEP_EVENT_TYPE_FAILED, - payload=str(pretty_format_exception(f"{e}", e)), + payload=exc.serialize(), should_not_retry=should_not_retry, ) ) logger.error( - f"failed step run: {action.action_id}/{action.step_run_id}" + f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize()}" ) - if not errored and not cancelled: - self.event_queue.put( - ActionEvent( - action=action, - type=STEP_EVENT_TYPE_COMPLETED, - payload=self.serialize_output(output), - should_not_retry=False, - ) - ) + return - logger.info( - f"finished step run: {action.action_id}/{action.step_run_id}" + self.event_queue.put( + ActionEvent( + action=action, + type=STEP_EVENT_TYPE_COMPLETED, + payload=self.serialize_output(output), + should_not_retry=False, ) + ) + + logger.info(f"finished step run: {action.action_id}/{action.step_run_id}") return inner_callback @@ -186,51 +195,46 @@ class Runner: def inner_callback(task: asyncio.Task[Any]) -> None: self.cleanup_run_id(action.key) - errored = False - cancelled = task.cancelled() - output = None + if task.cancelled(): + return - # Get the output from the future try: - if not cancelled: - output = task.result() + output = task.result() except Exception as e: - errored = True + exc = TaskRunError.from_exception(e) + self.event_queue.put( ActionEvent( action=action, type=GROUP_KEY_EVENT_TYPE_FAILED, - payload=str(pretty_format_exception(f"{e}", e)), + payload=exc.serialize(), should_not_retry=False, ) ) logger.error( - f"failed step run: {action.action_id}/{action.step_run_id}" + f"failed step run: {action.action_id}/{action.step_run_id}\n{exc.serialize()}" ) - if not errored and not cancelled: - self.event_queue.put( - ActionEvent( - action=action, - type=GROUP_KEY_EVENT_TYPE_COMPLETED, - payload=self.serialize_output(output), - should_not_retry=False, - ) - ) + return - logger.info( - f"finished step run: {action.action_id}/{action.step_run_id}" + self.event_queue.put( + ActionEvent( + action=action, + type=GROUP_KEY_EVENT_TYPE_COMPLETED, + payload=self.serialize_output(output), + should_not_retry=False, ) + ) + + logger.info(f"finished step run: {action.action_id}/{action.step_run_id}") return inner_callback def thread_action_func( self, ctx: Context, task: Task[TWorkflowInput, R], action: Action ) -> R: - if action.step_run_id: - self.threads[action.key] = current_thread() - elif action.get_group_key_run_id: + if action.step_run_id or action.get_group_key_run_id: self.threads[action.key] = current_thread() return task.call(ctx) @@ -250,28 +254,36 @@ class Runner: try: if task.is_async_function: return await task.aio_call(ctx) - else: - pfunc = functools.partial( - # we must copy the context vars to the new thread, as only asyncio natively supports - # contextvars - copy_context_vars, - contextvars.copy_context().items(), - self.thread_action_func, - ctx, - task, - action, - ) - - loop = asyncio.get_event_loop() - return await loop.run_in_executor(self.thread_pool, pfunc) - except Exception as e: - logger.error( - pretty_format_exception( - f"exception raised in action ({action.action_id}, retry={action.retry_count}):\n{e}", - e, - ) + pfunc = functools.partial( + # we must copy the context vars to the new thread, as only asyncio natively supports + # contextvars + copy_context_vars, + [ + ContextVarToCopy( + name="ctx_step_run_id", + value=action.step_run_id, + ), + ContextVarToCopy( + name="ctx_workflow_run_id", + value=action.workflow_run_id, + ), + ContextVarToCopy( + name="ctx_worker_id", + value=action.worker_id, + ), + ContextVarToCopy( + name="ctx_action_key", + value=action.key, + ), + ], + self.thread_action_func, + ctx, + task, + action, ) - raise e + + loop = asyncio.get_event_loop() + return await loop.run_in_executor(self.thread_pool, pfunc) finally: self.cleanup_run_id(action.key) @@ -295,7 +307,7 @@ class Runner: while True: await self.log_thread_pool_status() - for key in self.threads.keys(): + for key in self.threads: if key not in self.tasks: logger.debug(f"Potential zombie thread found for key {key}") @@ -350,6 +362,7 @@ class Runner: worker=self.worker_context, runs_client=self.runs_client, lifespan_context=self.lifespan_context, + log_sender=self.log_sender, ) ## IMPORTANT: Keep this method's signature in sync with the wrapper in the OTel instrumentor @@ -361,7 +374,8 @@ class Runner: if action_func: context = self.create_context( - action, True if action_func.is_durable else False + action, + True if action_func.is_durable else False, # noqa: SIM210 ) self.contexts[action.key] = context @@ -382,11 +396,12 @@ class Runner: task.add_done_callback(self.step_run_callback(action)) self.tasks[action.key] = task - try: + task_count.increment() + + ## FIXME: Handle cancelled exceptions and other special exceptions + ## that we don't want to suppress here + with suppress(Exception): await task - except Exception: - # do nothing, this should be caught in the callback - pass ## Once the step run completes, we need to remove the workflow spawn index ## so we don't leak memory @@ -444,7 +459,7 @@ class Runner: res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(ident), exc) if res == 0: raise ValueError("Invalid thread ID") - elif res != 1: + if res != 1: logger.error("PyThreadState_SetAsyncExc failed") # Call with exception set to 0 is needed to cleanup properly. @@ -505,8 +520,3 @@ class Runner: logger.info(f"waiting for {running} tasks to finish...") await asyncio.sleep(1) running = len(self.tasks.keys()) - - -def pretty_format_exception(message: str, e: Exception) -> str: - trace = "".join(traceback.format_exception(type(e), e, e.__traceback__)) - return f"{message}\n{trace}" diff --git a/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py b/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py index c9c2aed0a..d29880701 100644 --- a/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py +++ b/sdks/python/hatchet_sdk/worker/runner/utils/capture_logs.py @@ -1,73 +1,114 @@ +import asyncio import functools import logging -from concurrent.futures import ThreadPoolExecutor -from contextvars import ContextVar +from collections.abc import Awaitable, Callable from io import StringIO -from typing import Any, Awaitable, Callable, ItemsView, ParamSpec, TypeVar +from typing import Literal, ParamSpec, TypeVar + +from pydantic import BaseModel from hatchet_sdk.clients.events import EventClient from hatchet_sdk.logger import logger -from hatchet_sdk.runnables.contextvars import ctx_step_run_id, ctx_workflow_run_id +from hatchet_sdk.runnables.contextvars import ( + ctx_action_key, + ctx_step_run_id, + ctx_worker_id, + ctx_workflow_run_id, +) +from hatchet_sdk.utils.typing import STOP_LOOP, STOP_LOOP_TYPE T = TypeVar("T") P = ParamSpec("P") +class ContextVarToCopy(BaseModel): + name: Literal[ + "ctx_workflow_run_id", "ctx_step_run_id", "ctx_action_key", "ctx_worker_id" + ] + value: str | None + + def copy_context_vars( - ctx_vars: ItemsView[ContextVar[Any], Any], + ctx_vars: list[ContextVarToCopy], func: Callable[P, T], *args: P.args, **kwargs: P.kwargs, ) -> T: - for var, value in ctx_vars: - var.set(value) + for var in ctx_vars: + if var.name == "ctx_workflow_run_id": + ctx_workflow_run_id.set(var.value) + elif var.name == "ctx_step_run_id": + ctx_step_run_id.set(var.value) + elif var.name == "ctx_action_key": + ctx_action_key.set(var.value) + elif var.name == "ctx_worker_id": + ctx_worker_id.set(var.value) + else: + raise ValueError(f"Unknown context variable name: {var.name}") + return func(*args, **kwargs) -class InjectingFilter(logging.Filter): - # For some reason, only the InjectingFilter has access to the contextvars method sr.get(), - # otherwise we would use emit within the CustomLogHandler - def filter(self, record: logging.LogRecord) -> bool: - ## TODO: Change how we do this to not assign to the log record - record.workflow_run_id = ctx_workflow_run_id.get() - record.step_run_id = ctx_step_run_id.get() - return True +class LogRecord(BaseModel): + message: str + step_run_id: str + + +class AsyncLogSender: + def __init__(self, event_client: EventClient): + self.event_client = event_client + self.q = asyncio.Queue[LogRecord | STOP_LOOP_TYPE](maxsize=1000) + + async def consume(self) -> None: + while True: + record = await self.q.get() + + if record == STOP_LOOP: + break + + try: + self.event_client.log( + message=record.message, step_run_id=record.step_run_id + ) + except Exception as e: + logger.error(f"Error logging: {e}") + + def publish(self, record: LogRecord | STOP_LOOP_TYPE) -> None: + try: + self.q.put_nowait(record) + except asyncio.QueueFull: + logger.warning("Log queue is full, dropping log message") class CustomLogHandler(logging.StreamHandler): # type: ignore[type-arg] - def __init__(self, event_client: EventClient, stream: StringIO | None = None): + def __init__(self, log_sender: AsyncLogSender, stream: StringIO): super().__init__(stream) - self.logger_thread_pool = ThreadPoolExecutor(max_workers=1) - self.event_client = event_client - def _log(self, line: str, step_run_id: str | None) -> None: - try: - if not step_run_id: - return - - self.event_client.log(message=line, step_run_id=step_run_id) - except Exception as e: - logger.error(f"Error logging: {e}") + self.log_sender = log_sender def emit(self, record: logging.LogRecord) -> None: super().emit(record) log_entry = self.format(record) + step_run_id = ctx_step_run_id.get() - ## TODO: Change how we do this to not assign to the log record - self.logger_thread_pool.submit(self._log, log_entry, record.step_run_id) # type: ignore + if not step_run_id: + return + + self.log_sender.publish(LogRecord(message=log_entry, step_run_id=step_run_id)) def capture_logs( - logger: logging.Logger, event_client: "EventClient", func: Callable[P, Awaitable[T]] + logger: logging.Logger, log_sender: AsyncLogSender, func: Callable[P, Awaitable[T]] ) -> Callable[P, Awaitable[T]]: @functools.wraps(func) async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: log_stream = StringIO() - custom_handler = CustomLogHandler(event_client, log_stream) + custom_handler = CustomLogHandler(log_sender, log_stream) custom_handler.setLevel(logging.INFO) - custom_handler.addFilter(InjectingFilter()) - logger.addHandler(custom_handler) + + if not any(h for h in logger.handlers if isinstance(h, CustomLogHandler)): + logger.addHandler(custom_handler) try: result = await func(*args, **kwargs) diff --git a/sdks/python/hatchet_sdk/worker/worker.py b/sdks/python/hatchet_sdk/worker/worker.py index c4f9cf97a..f6bc78636 100644 --- a/sdks/python/hatchet_sdk/worker/worker.py +++ b/sdks/python/hatchet_sdk/worker/worker.py @@ -5,13 +5,14 @@ import os import re import signal import sys -from contextlib import AsyncExitStack, asynccontextmanager +from collections.abc import AsyncGenerator, Callable +from contextlib import AsyncExitStack, asynccontextmanager, suppress from dataclasses import dataclass, field from enum import Enum from multiprocessing import Queue from multiprocessing.process import BaseProcess from types import FrameType -from typing import Any, AsyncGenerator, Callable, TypeVar, Union +from typing import Any, TypeVar from warnings import warn from aiohttp import web @@ -23,26 +24,22 @@ from pydantic import BaseModel from hatchet_sdk.client import Client from hatchet_sdk.config import ClientConfig from hatchet_sdk.contracts.v1.workflows_pb2 import CreateWorkflowVersionRequest +from hatchet_sdk.exceptions import LoopAlreadyRunningError from hatchet_sdk.logger import logger from hatchet_sdk.runnables.action import Action +from hatchet_sdk.runnables.contextvars import task_count from hatchet_sdk.runnables.task import Task from hatchet_sdk.runnables.workflow import BaseWorkflow +from hatchet_sdk.utils.typing import STOP_LOOP_TYPE from hatchet_sdk.worker.action_listener_process import ( ActionEvent, worker_action_listener_process, ) -from hatchet_sdk.worker.runner.run_loop_manager import ( - STOP_LOOP_TYPE, - WorkerActionRunLoopManager, -) +from hatchet_sdk.worker.runner.run_loop_manager import WorkerActionRunLoopManager T = TypeVar("T") -class LoopAlreadyRunningException(Exception): - pass - - class WorkerStatus(Enum): INITIALIZED = 1 STARTING = 2 @@ -60,7 +57,7 @@ class HealthCheckResponse(BaseModel): name: str slots: int actions: list[str] - labels: dict[str, Union[str, int]] + labels: dict[str, str | int] python_version: str @@ -75,10 +72,8 @@ async def _create_async_context_manager( try: yield finally: - try: + with suppress(StopAsyncIteration): await anext(gen) - except StopAsyncIteration: - pass class Worker: @@ -88,11 +83,11 @@ class Worker: config: ClientConfig, slots: int, durable_slots: int, - labels: dict[str, Union[str, int]] = {}, + labels: dict[str, str | int] | None = None, debug: bool = False, owned_loop: bool = True, handle_kill: bool = True, - workflows: list[BaseWorkflow[Any]] = [], + workflows: list[BaseWorkflow[Any]] | None = None, lifespan: LifespanFn | None = None, ) -> None: self.config = config @@ -100,7 +95,7 @@ class Worker: self.slots = slots self.durable_slots = durable_slots self.debug = debug - self.labels = labels + self.labels = labels or {} self.handle_kill = handle_kill self.owned_loop = owned_loop @@ -120,11 +115,11 @@ class Worker: self.ctx = multiprocessing.get_context("spawn") - self.action_queue: "Queue[Action | STOP_LOOP_TYPE]" = self.ctx.Queue() - self.event_queue: "Queue[ActionEvent]" = self.ctx.Queue() + self.action_queue: Queue[Action | STOP_LOOP_TYPE] = self.ctx.Queue() + self.event_queue: Queue[ActionEvent] = self.ctx.Queue() - self.durable_action_queue: "Queue[Action | STOP_LOOP_TYPE]" = self.ctx.Queue() - self.durable_event_queue: "Queue[ActionEvent]" = self.ctx.Queue() + self.durable_action_queue: Queue[Action | STOP_LOOP_TYPE] = self.ctx.Queue() + self.durable_event_queue: Queue[ActionEvent] = self.ctx.Queue() self.loop: asyncio.AbstractEventLoop | None @@ -143,7 +138,7 @@ class Worker: self.lifespan = lifespan self.lifespan_stack: AsyncExitStack | None = None - self.register_workflows(workflows) + self.register_workflows(workflows or []) def register_workflow_from_opts(self, opts: CreateWorkflowVersionRequest) -> None: try: @@ -187,7 +182,7 @@ class Worker: def _setup_loop(self) -> None: try: asyncio.get_running_loop() - raise LoopAlreadyRunningException( + raise LoopAlreadyRunningError( "An event loop is already running. This worker requires its own dedicated event loop. " "Make sure you're not using asyncio.run() or other loop-creating functions in the main thread." ) @@ -248,6 +243,7 @@ class Worker: warn( "Passing a custom event loop is deprecated and will be removed in the future. This option no longer has any effect", DeprecationWarning, + stacklevel=1, ) self._setup_loop() @@ -397,8 +393,16 @@ class Worker: if self.loop: self.loop.create_task(self.exit_gracefully()) break - else: - self._status = WorkerStatus.HEALTHY + + if ( + self.config.terminate_worker_after_num_tasks + and task_count.value >= self.config.terminate_worker_after_num_tasks + ): + if self.loop: + self.loop.create_task(self.exit_gracefully()) + break + + self._status = WorkerStatus.HEALTHY await asyncio.sleep(1) except Exception as e: logger.error(f"error checking listener health: {e}") diff --git a/sdks/python/hatchet_sdk/workflow_run.py b/sdks/python/hatchet_sdk/workflow_run.py index 65844f1e3..ae55b5b46 100644 --- a/sdks/python/hatchet_sdk/workflow_run.py +++ b/sdks/python/hatchet_sdk/workflow_run.py @@ -47,11 +47,13 @@ class WorkflowRunRef: while True: try: details = self.runs_client.get(self.workflow_run_id) - except Exception: + except Exception as e: retries += 1 if retries > 10: - raise ValueError(f"Workflow run {self.workflow_run_id} not found") + raise ValueError( + f"Workflow run {self.workflow_run_id} not found" + ) from e time.sleep(1) continue diff --git a/sdks/python/poetry.lock b/sdks/python/poetry.lock index e7d8c915a..f08b97578 100644 --- a/sdks/python/poetry.lock +++ b/sdks/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -6,6 +6,7 @@ version = "2.6.1" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, @@ -17,6 +18,7 @@ version = "3.12.13" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5421af8f22a98f640261ee48aae3a37f0c41371e99412d55eaf2f8a46d5dad29"}, {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fcda86f6cb318ba36ed8f1396a6a4a3fd8f856f84d426584392083d10da4de0"}, @@ -125,6 +127,7 @@ version = "2.9.1" description = "Simple retry client for aiohttp" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54"}, {file = "aiohttp_retry-2.9.1.tar.gz", hash = "sha256:8eb75e904ed4ee5c2ec242fefe85bf04240f685391c4879d8f541d6028ff01f1"}, @@ -139,6 +142,7 @@ version = "1.3.2" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, @@ -153,6 +157,7 @@ version = "0.5.2" description = "Generator-based operators for asynchronous iteration" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "aiostream-0.5.2-py3-none-any.whl", hash = "sha256:054660370be9d37f6fe3ece3851009240416bd082e469fd90cc8673d3818cf71"}, {file = "aiostream-0.5.2.tar.gz", hash = "sha256:b71b519a2d66c38f0872403ab86417955b77352f08d9ad02ad46fc3926b389f4"}, @@ -167,6 +172,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main", "docs"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -178,6 +184,7 @@ version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, @@ -200,6 +207,8 @@ version = "5.0.1" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.11\"" files = [ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, @@ -211,6 +220,7 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, @@ -230,6 +240,7 @@ version = "2.2.1" description = "Function decoration for backoff and retry" optional = false python-versions = ">=3.7,<4.0" +groups = ["docs"] files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, @@ -241,6 +252,7 @@ version = "4.13.4" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" +groups = ["docs"] files = [ {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, @@ -263,6 +275,7 @@ version = "24.10.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.9" +groups = ["lint"] files = [ {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, @@ -309,6 +322,7 @@ version = "0.0.2" description = "Dummy package for Beautiful Soup (beautifulsoup4)" optional = false python-versions = "*" +groups = ["docs"] files = [ {file = "bs4-0.0.2-py2.py3-none-any.whl", hash = "sha256:abf8742c0805ef7f662dce4b51cca104cffe52b835238afc169142ab9b3fbccc"}, {file = "bs4-0.0.2.tar.gz", hash = "sha256:a48685c58f50fe127722417bae83fe6badf500d54b55f7e39ffe43b798653925"}, @@ -323,6 +337,7 @@ version = "0.2.0" description = "Pure Python implementation of Google Common Expression Language" optional = false python-versions = "<4.0,>=3.8" +groups = ["main"] files = [ {file = "cel_python-0.2.0-py3-none-any.whl", hash = "sha256:478ff73def7b39d51e6982f95d937a57c2b088c491c578fe5cecdbd79f476f60"}, {file = "cel_python-0.2.0.tar.gz", hash = "sha256:75de72a5cf223ec690b236f0cc24da267219e667bd3e7f8f4f20595fcc1c0c0f"}, @@ -342,10 +357,12 @@ version = "2025.6.15" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" +groups = ["main", "docs"] files = [ {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, ] +markers = {main = "extra == \"otel\""} [[package]] name = "charset-normalizer" @@ -353,6 +370,7 @@ version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main", "docs"] files = [ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, @@ -447,6 +465,7 @@ files = [ {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, ] +markers = {main = "extra == \"otel\""} [[package]] name = "click" @@ -454,6 +473,7 @@ version = "8.2.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" +groups = ["docs", "lint"] files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -468,10 +488,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["docs", "lint", "test"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {lint = "platform_system == \"Windows\"", test = "sys_platform == \"win32\""} [[package]] name = "distro" @@ -479,6 +501,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["docs"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -490,6 +513,7 @@ version = "0.0.12" description = "Parse Python docstrings in reST, Google and Numpydoc format" optional = false python-versions = "<4.0,>=3.7" +groups = ["docs"] files = [ {file = "docstring_parser_fork-0.0.12-py3-none-any.whl", hash = "sha256:55d7cbbc8b367655efd64372b9a0b33a49bae930a8ddd5cdc4c6112312e28a87"}, {file = "docstring_parser_fork-0.0.12.tar.gz", hash = "sha256:b44c5e0be64ae80f395385f01497d381bd094a57221fd9ff020987d06857b2a0"}, @@ -501,6 +525,8 @@ version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["docs", "test"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, @@ -512,12 +538,28 @@ typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "execnet" +version = "2.1.1" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, + {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + [[package]] name = "frozenlist" version = "1.7.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc4df77d638aa2ed703b878dd093725b72a824c3c546c076e8fdf276f78ee84a"}, {file = "frozenlist-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:716a9973a2cc963160394f701964fe25012600f3d311f60c790400b00e568b61"}, @@ -631,6 +673,7 @@ version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." optional = false python-versions = "*" +groups = ["docs"] files = [ {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, @@ -648,10 +691,12 @@ version = "1.70.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" +groups = ["main", "docs"] files = [ {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" @@ -665,6 +710,7 @@ version = "1.7.3" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, @@ -679,6 +725,7 @@ version = "1.53.0.6" description = "Mypy stubs for gRPC" optional = false python-versions = ">=3.6" +groups = ["lint"] files = [ {file = "grpc_stubs-1.53.0.6-py3-none-any.whl", hash = "sha256:3ffc5a6b5bd84ac46f3d84e2434e97936c1262b47b71b462bdedc43caaf227e1"}, {file = "grpc_stubs-1.53.0.6.tar.gz", hash = "sha256:70a0840747bd73c2c82fe819699bbf4fcf6d59bd0ed27a4713a240e0c697e1ff"}, @@ -693,6 +740,7 @@ version = "1.73.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.9" +groups = ["main", "docs", "lint"] files = [ {file = "grpcio-1.73.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d050197eeed50f858ef6c51ab09514856f957dba7b1f7812698260fc9cc417f6"}, {file = "grpcio-1.73.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:ebb8d5f4b0200916fb292a964a4d41210de92aba9007e33d8551d85800ea16cb"}, @@ -756,6 +804,7 @@ version = "1.71.0" description = "Protobuf code generator for gRPC" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "grpcio_tools-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:f4ad7f0d756546902597053d70b3af2606fbd70d7972876cd75c1e241d22ae00"}, {file = "grpcio_tools-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:64bdb291df61cf570b5256777ad5fe2b1db6d67bc46e55dc56a0a862722ae329"}, @@ -821,6 +870,7 @@ version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -832,6 +882,7 @@ version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, @@ -853,6 +904,7 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -877,6 +929,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main", "docs"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -891,10 +944,12 @@ version = "8.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] zipp = ">=3.20" @@ -914,6 +969,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" +groups = ["test"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -925,6 +981,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["lint"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -939,6 +996,7 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["docs"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -956,6 +1014,7 @@ version = "0.10.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "jiter-0.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2fb72b02478f06a900a5782de2ef47e0396b3e1f7d5aba30daeb1fce66f303"}, {file = "jiter-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32bb468e3af278f095d3fa5b90314728a6916d89ba3d0ffb726dd9bf7367285e"}, @@ -1042,6 +1101,7 @@ version = "1.0.1" description = "JSON Matching Expressions" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, @@ -1053,6 +1113,7 @@ version = "3.0.0b3" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.9" +groups = ["docs"] files = [ {file = "langfuse-3.0.0b3-py3-none-any.whl", hash = "sha256:7e748eb83e9b6aa003f7b7072e6ac24ab43ea93af4f5381a10930de489f37e46"}, {file = "langfuse-3.0.0b3.tar.gz", hash = "sha256:8d9c42bf15cf378f1bf65ad8d3b52408fb439861c153196df6680dfca2f30a6e"}, @@ -1079,6 +1140,7 @@ version = "0.12.0" description = "a modern parsing library" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "lark-0.12.0-py2.py3-none-any.whl", hash = "sha256:ed1d891cbcf5151ead1c1d14663bf542443e579e63a76ae175b01b899bd854ca"}, {file = "lark-0.12.0.tar.gz", hash = "sha256:7da76fcfddadabbbbfd949bbae221efd33938451d90b1fefbbc423c3cccf48ef"}, @@ -1095,6 +1157,7 @@ version = "3.8" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, @@ -1110,6 +1173,7 @@ version = "1.1.0" description = "Convert HTML to markdown." optional = false python-versions = "*" +groups = ["docs"] files = [ {file = "markdownify-1.1.0-py3-none-any.whl", hash = "sha256:32a5a08e9af02c8a6528942224c91b933b4bd2c7d078f9012943776fc313eeef"}, {file = "markdownify-1.1.0.tar.gz", hash = "sha256:449c0bbbf1401c5112379619524f33b63490a8fa479456d41de9dc9e37560ebd"}, @@ -1125,6 +1189,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1195,6 +1260,7 @@ version = "1.3.4" description = "A deep merge function for 🐍." optional = false python-versions = ">=3.6" +groups = ["docs"] files = [ {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, @@ -1206,6 +1272,7 @@ version = "1.6.1" description = "Project documentation with Markdown." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, @@ -1236,6 +1303,7 @@ version = "1.4.2" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "mkdocs_autorefs-1.4.2-py3-none-any.whl", hash = "sha256:83d6d777b66ec3c372a1aad4ae0cf77c243ba5bcda5bf0c6b8a2c5e7a3d89f13"}, {file = "mkdocs_autorefs-1.4.2.tar.gz", hash = "sha256:e2ebe1abd2b67d597ed19378c0fff84d73d1dbce411fce7a7cc6f161888b6749"}, @@ -1252,6 +1320,7 @@ version = "0.2.0" description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, @@ -1268,6 +1337,7 @@ version = "0.29.1" description = "Automatic documentation from sources, for MkDocs." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "mkdocstrings-0.29.1-py3-none-any.whl", hash = "sha256:37a9736134934eea89cbd055a513d40a020d87dfcae9e3052c2a6b8cd4af09b6"}, {file = "mkdocstrings-0.29.1.tar.gz", hash = "sha256:8722f8f8c5cd75da56671e0a0c1bbed1df9946c0cef74794d6141b34011abd42"}, @@ -1293,6 +1363,7 @@ version = "1.16.12" description = "A Python handler for mkdocstrings." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "mkdocstrings_python-1.16.12-py3-none-any.whl", hash = "sha256:22ded3a63b3d823d57457a70ff9860d5a4de9e8b1e482876fc9baabaf6f5f374"}, {file = "mkdocstrings_python-1.16.12.tar.gz", hash = "sha256:9b9eaa066e0024342d433e332a41095c4e429937024945fea511afe58f63175d"}, @@ -1310,6 +1381,7 @@ version = "6.4.4" description = "multidict implementation" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "multidict-6.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8adee3ac041145ffe4488ea73fa0a622b464cc25340d98be76924d0cda8545ff"}, {file = "multidict-6.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b61e98c3e2a861035aaccd207da585bdcacef65fe01d7a0d07478efac005e028"}, @@ -1426,6 +1498,7 @@ version = "1.16.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.9" +groups = ["lint"] files = [ {file = "mypy-1.16.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4f0fed1022a63c6fec38f28b7fc77fca47fd490445c69d0a66266c59dd0b88a"}, {file = "mypy-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86042bbf9f5a05ea000d3203cf87aa9d0ccf9a01f73f71c58979eb9249f46d72"}, @@ -1480,6 +1553,7 @@ version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.8" +groups = ["lint"] files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -1491,6 +1565,7 @@ version = "1.87.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "openai-1.87.0-py3-none-any.whl", hash = "sha256:f9bcae02ac4fff6522276eee85d33047335cfb692b863bd8261353ce4ada5692"}, {file = "openai-1.87.0.tar.gz", hash = "sha256:5c69764171e0db9ef993e7a4d8a01fd8ff1026b66f8bdd005b9461782b6e7dfc"}, @@ -1517,10 +1592,12 @@ version = "1.34.1" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_api-1.34.1-py3-none-any.whl", hash = "sha256:b7df4cb0830d5a6c29ad0c0691dbae874d8daefa934b8b1d642de48323d32a8c"}, {file = "opentelemetry_api-1.34.1.tar.gz", hash = "sha256:64f0bd06d42824843731d05beea88d4d4b6ae59f9fe347ff7dfa2cc14233bbb3"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] importlib-metadata = ">=6.0,<8.8.0" @@ -1532,6 +1609,8 @@ version = "0.55b1" description = "OpenTelemetry Python Distro" optional = true python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"otel\"" files = [ {file = "opentelemetry_distro-0.55b1-py3-none-any.whl", hash = "sha256:6b9dc9bf78b221206096f964e9cdf9bbba4d703725e1115de4b8c83cad1e45cc"}, {file = "opentelemetry_distro-0.55b1.tar.gz", hash = "sha256:da442bf137ab48f531b87d2ec80a19eada53b54c153ad96f0689f946a8d9bcd3"}, @@ -1551,10 +1630,12 @@ version = "1.34.1" description = "OpenTelemetry Collector Exporters" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_exporter_otlp-1.34.1-py3-none-any.whl", hash = "sha256:f4a453e9cde7f6362fd4a090d8acf7881d1dc585540c7b65cbd63e36644238d4"}, {file = "opentelemetry_exporter_otlp-1.34.1.tar.gz", hash = "sha256:71c9ad342d665d9e4235898d205db17c5764cd7a69acb8a5dcd6d5e04c4c9988"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] opentelemetry-exporter-otlp-proto-grpc = "1.34.1" @@ -1566,10 +1647,12 @@ version = "1.34.1" description = "OpenTelemetry Protobuf encoding" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_exporter_otlp_proto_common-1.34.1-py3-none-any.whl", hash = "sha256:8e2019284bf24d3deebbb6c59c71e6eef3307cd88eff8c633e061abba33f7e87"}, {file = "opentelemetry_exporter_otlp_proto_common-1.34.1.tar.gz", hash = "sha256:b59a20a927facd5eac06edaf87a07e49f9e4a13db487b7d8a52b37cb87710f8b"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] opentelemetry-proto = "1.34.1" @@ -1580,10 +1663,12 @@ version = "1.34.1" description = "OpenTelemetry Collector Protobuf over gRPC Exporter" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_exporter_otlp_proto_grpc-1.34.1-py3-none-any.whl", hash = "sha256:04bb8b732b02295be79f8a86a4ad28fae3d4ddb07307a98c7aa6f331de18cca6"}, {file = "opentelemetry_exporter_otlp_proto_grpc-1.34.1.tar.gz", hash = "sha256:7c841b90caa3aafcfc4fee58487a6c71743c34c6dc1787089d8b0578bbd794dd"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] googleapis-common-protos = ">=1.52,<2.0" @@ -1603,10 +1688,12 @@ version = "1.34.1" description = "OpenTelemetry Collector Protobuf over HTTP Exporter" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_exporter_otlp_proto_http-1.34.1-py3-none-any.whl", hash = "sha256:5251f00ca85872ce50d871f6d3cc89fe203b94c3c14c964bbdc3883366c705d8"}, {file = "opentelemetry_exporter_otlp_proto_http-1.34.1.tar.gz", hash = "sha256:aaac36fdce46a8191e604dcf632e1f9380c7d5b356b27b3e0edb5610d9be28ad"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] googleapis-common-protos = ">=1.52,<2.0" @@ -1623,6 +1710,8 @@ version = "0.55b1" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = true python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"otel\"" files = [ {file = "opentelemetry_instrumentation-0.55b1-py3-none-any.whl", hash = "sha256:cbb1496b42bc394e01bc63701b10e69094e8564e281de063e4328d122cc7a97e"}, {file = "opentelemetry_instrumentation-0.55b1.tar.gz", hash = "sha256:2dc50aa207b9bfa16f70a1a0571e011e737a9917408934675b89ef4d5718c87b"}, @@ -1640,10 +1729,12 @@ version = "1.34.1" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_proto-1.34.1-py3-none-any.whl", hash = "sha256:eb4bb5ac27f2562df2d6857fc557b3a481b5e298bc04f94cc68041f00cebcbd2"}, {file = "opentelemetry_proto-1.34.1.tar.gz", hash = "sha256:16286214e405c211fc774187f3e4bbb1351290b8dfb88e8948af209ce85b719e"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] protobuf = ">=5.0,<6.0" @@ -1654,10 +1745,12 @@ version = "1.34.1" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_sdk-1.34.1-py3-none-any.whl", hash = "sha256:308effad4059562f1d92163c61c8141df649da24ce361827812c40abb2a1e96e"}, {file = "opentelemetry_sdk-1.34.1.tar.gz", hash = "sha256:8091db0d763fcd6098d4781bbc80ff0971f94e260739aa6afe6fd379cdf3aa4d"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] opentelemetry-api = "1.34.1" @@ -1670,10 +1763,12 @@ version = "0.55b1" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "opentelemetry_semantic_conventions-0.55b1-py3-none-any.whl", hash = "sha256:5da81dfdf7d52e3d37f8fe88d5e771e191de924cfff5f550ab0b8f7b2409baed"}, {file = "opentelemetry_semantic_conventions-0.55b1.tar.gz", hash = "sha256:ef95b1f009159c28d7a7849f5cbc71c4c34c845bb514d66adfdf1b3fff3598b3"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] opentelemetry-api = "1.34.1" @@ -1685,10 +1780,12 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "docs", "lint", "test"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] +markers = {main = "extra == \"otel\""} [[package]] name = "pathspec" @@ -1696,6 +1793,7 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" +groups = ["docs", "lint"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -1707,6 +1805,7 @@ version = "4.3.8" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" +groups = ["docs", "lint"] files = [ {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, @@ -1723,6 +1822,7 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" +groups = ["test"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -1738,6 +1838,7 @@ version = "0.21.1" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "prometheus_client-0.21.1-py3-none-any.whl", hash = "sha256:594b45c410d6f4f8888940fe80b5cc2521b305a1fafe1c58609ef715a001f301"}, {file = "prometheus_client-0.21.1.tar.gz", hash = "sha256:252505a722ac04b0456be05c05f75f45d760c2911ffc45f2a06bcaed9f3ae3fb"}, @@ -1752,6 +1853,7 @@ version = "0.3.2" description = "Accelerated property cache" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770"}, {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3"}, @@ -1859,6 +1961,7 @@ version = "5.29.5" description = "" optional = false python-versions = ">=3.8" +groups = ["main", "docs"] files = [ {file = "protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079"}, {file = "protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc"}, @@ -1879,6 +1982,7 @@ version = "6.1.1" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["lint"] files = [ {file = "psutil-6.1.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9ccc4316f24409159897799b83004cb1e24f9819b0dcf9c0b68bdcb6cefee6a8"}, {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ca9609c77ea3b8481ab005da74ed894035936223422dc591d6772b147421f777"}, @@ -1909,6 +2013,7 @@ version = "3.2.9" description = "PostgreSQL database adapter for Python" optional = false python-versions = ">=3.8" +groups = ["test"] files = [ {file = "psycopg-3.2.9-py3-none-any.whl", hash = "sha256:01a8dadccdaac2123c916208c96e06631641c0566b22005493f09663c7a8d3b6"}, {file = "psycopg-3.2.9.tar.gz", hash = "sha256:2fbb46fcd17bc81f993f28c47f1ebea38d66ae97cc2dbc3cad73b37cefbff700"}, @@ -1933,6 +2038,7 @@ version = "3.2.6" description = "Connection Pool for Psycopg" optional = false python-versions = ">=3.8" +groups = ["test"] files = [ {file = "psycopg_pool-3.2.6-py3-none-any.whl", hash = "sha256:5887318a9f6af906d041a0b1dc1c60f8f0dda8340c2572b74e10907b51ed5da7"}, {file = "psycopg_pool-3.2.6.tar.gz", hash = "sha256:0f92a7817719517212fbfe2fd58b8c35c1850cdd2a80d36b581ba2085d9148e5"}, @@ -1947,6 +2053,7 @@ version = "2.11.7" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, @@ -1968,6 +2075,7 @@ version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -2079,6 +2187,7 @@ version = "2.9.1" description = "Settings management using Pydantic" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, @@ -2102,6 +2211,7 @@ version = "0.6.6" description = "A Python docstring linter that checks arguments, returns, yields, and raises sections" optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "pydoclint-0.6.6-py2.py3-none-any.whl", hash = "sha256:7ce8ed36f60f9201bf1c1edacb32c55eb051af80fdd7304480c6419ee0ced43c"}, {file = "pydoclint-0.6.6.tar.gz", hash = "sha256:22862a8494d05cdf22574d6533f4c47933c0ae1674b0f8b961d6ef42536eaa69"}, @@ -2121,6 +2231,7 @@ version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["test"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, @@ -2135,6 +2246,7 @@ version = "10.15" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "pymdown_extensions-10.15-py3-none-any.whl", hash = "sha256:46e99bb272612b0de3b7e7caf6da8dd5f4ca5212c0b273feb9304e236c484e5f"}, {file = "pymdown_extensions-10.15.tar.gz", hash = "sha256:0e5994e32155f4b03504f939e501b981d306daf7ec2aa1cd2eb6bd300784f8f7"}, @@ -2153,6 +2265,7 @@ version = "8.4.0" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.9" +groups = ["test"] files = [ {file = "pytest-8.4.0-py3-none-any.whl", hash = "sha256:f40f825768ad76c0977cbacdf1fd37c6f7a468e460ea6a0636078f8972d4517e"}, {file = "pytest-8.4.0.tar.gz", hash = "sha256:14d920b48472ea0dbf68e45b96cd1ffda4705f33307dcc86c676c1b5104838a6"}, @@ -2176,6 +2289,7 @@ version = "0.25.3" description = "Pytest support for asyncio" optional = false python-versions = ">=3.9" +groups = ["test"] files = [ {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, @@ -2194,6 +2308,7 @@ version = "1.1.5" description = "pytest plugin that allows you to add environment variables." optional = false python-versions = ">=3.8" +groups = ["test"] files = [ {file = "pytest_env-1.1.5-py3-none-any.whl", hash = "sha256:ce90cf8772878515c24b31cd97c7fa1f4481cd68d588419fd45f10ecaee6bc30"}, {file = "pytest_env-1.1.5.tar.gz", hash = "sha256:91209840aa0e43385073ac464a554ad2947cc2fd663a9debf88d03b01e0cc1cf"}, @@ -2212,6 +2327,7 @@ version = "1.7.0" description = "Adds the ability to retry flaky tests in CI environments" optional = false python-versions = ">=3.9" +groups = ["test"] files = [ {file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"}, {file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"}, @@ -2223,12 +2339,34 @@ pytest = ">=7.0.0" [package.extras] dev = ["black", "flake8", "isort", "mypy"] +[[package]] +name = "pytest-xdist" +version = "3.7.0" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.9" +groups = ["test"] +files = [ + {file = "pytest_xdist-3.7.0-py3-none-any.whl", hash = "sha256:7d3fbd255998265052435eb9daa4e99b62e6fb9cfb6efd1f858d4d8c0c7f0ca0"}, + {file = "pytest_xdist-3.7.0.tar.gz", hash = "sha256:f9248c99a7c15b7d2f90715df93610353a485827bc06eefb6566d23f6400f126"}, +] + +[package.dependencies] +execnet = ">=2.1" +pytest = ">=7.0.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "docs"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2243,6 +2381,7 @@ version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, @@ -2257,6 +2396,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main", "docs"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2319,6 +2459,7 @@ version = "1.1" description = "A custom YAML tag for referencing environment variables in YAML files." optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04"}, {file = "pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff"}, @@ -2333,10 +2474,12 @@ version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "docs"] files = [ {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] +markers = {main = "extra == \"otel\""} [package.dependencies] certifi = ">=2017.4.17" @@ -2354,6 +2497,7 @@ version = "0.9.10" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["lint"] files = [ {file = "ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d"}, {file = "ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d"}, @@ -2381,6 +2525,7 @@ version = "80.9.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"}, {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"}, @@ -2401,6 +2546,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "docs"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -2412,6 +2558,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["docs"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -2423,6 +2570,7 @@ version = "2.7" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" +groups = ["docs"] files = [ {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, @@ -2434,6 +2582,7 @@ version = "9.1.2" description = "Retry code until it succeeds" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138"}, {file = "tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb"}, @@ -2449,6 +2598,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["docs", "lint", "test"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2490,6 +2641,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["docs"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -2511,6 +2663,7 @@ version = "4.12.0.20250516" description = "Typing stubs for beautifulsoup4" optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "types_beautifulsoup4-4.12.0.20250516-py3-none-any.whl", hash = "sha256:5923399d4a1ba9cc8f0096fe334cc732e130269541d66261bb42ab039c0376ee"}, {file = "types_beautifulsoup4-4.12.0.20250516.tar.gz", hash = "sha256:aa19dd73b33b70d6296adf92da8ab8a0c945c507e6fb7d5db553415cc77b417e"}, @@ -2525,6 +2678,7 @@ version = "1.1.11.20250516" description = "Typing stubs for html5lib" optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "types_html5lib-1.1.11.20250516-py3-none-any.whl", hash = "sha256:5e407b14b1bd2b9b1107cbd1e2e19d4a0c46d60febd231c7ab7313d7405663c1"}, {file = "types_html5lib-1.1.11.20250516.tar.gz", hash = "sha256:65043a6718c97f7d52567cc0cdf41efbfc33b1f92c6c0c5e19f60a7ec69ae720"}, @@ -2536,6 +2690,7 @@ version = "5.29.1.20250403" description = "Typing stubs for protobuf" optional = false python-versions = ">=3.9" +groups = ["lint"] files = [ {file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"}, {file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"}, @@ -2547,6 +2702,7 @@ version = "6.1.0.20241221" description = "Typing stubs for psutil" optional = false python-versions = ">=3.8" +groups = ["lint"] files = [ {file = "types_psutil-6.1.0.20241221-py3-none-any.whl", hash = "sha256:8498dbe13285a9ba7d4b2fa934c569cc380efc74e3dacdb34ae16d2cdf389ec3"}, {file = "types_psutil-6.1.0.20241221.tar.gz", hash = "sha256:600f5a36bd5e0eb8887f0e3f3ff2cf154d90690ad8123c8a707bba4ab94d3185"}, @@ -2558,6 +2714,7 @@ version = "2.9.0.20250516" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93"}, {file = "types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5"}, @@ -2569,6 +2726,7 @@ version = "6.0.12.20250516" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"}, {file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"}, @@ -2580,6 +2738,7 @@ version = "2.32.4.20250611" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" +groups = ["lint"] files = [ {file = "types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072"}, {file = "types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826"}, @@ -2594,6 +2753,7 @@ version = "4.14.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" +groups = ["main", "docs", "lint", "test"] files = [ {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, @@ -2605,6 +2765,7 @@ version = "0.4.1" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, @@ -2619,6 +2780,8 @@ version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["test"] +markers = "sys_platform == \"win32\"" files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -2630,6 +2793,7 @@ version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" +groups = ["main", "docs", "lint"] files = [ {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, @@ -2647,6 +2811,7 @@ version = "6.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.9" +groups = ["docs"] files = [ {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, @@ -2689,6 +2854,7 @@ version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" +groups = ["main", "docs"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -2770,6 +2936,7 @@ files = [ {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, ] +markers = {main = "extra == \"otel\""} [[package]] name = "yarl" @@ -2777,6 +2944,7 @@ version = "1.20.1" description = "Yet another URL library" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4"}, {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a"}, @@ -2895,10 +3063,12 @@ version = "3.23.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" +groups = ["main", "docs"] files = [ {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, ] +markers = {main = "extra == \"otel\""} [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] @@ -2912,6 +3082,6 @@ type = ["pytest-mypy"] otel = ["opentelemetry-api", "opentelemetry-distro", "opentelemetry-exporter-otlp", "opentelemetry-exporter-otlp-proto-http", "opentelemetry-instrumentation", "opentelemetry-sdk"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.10" -content-hash = "749734ef3c074760e407d02cc0262316ba1b508036fdab5f53353fc54a30d8d9" +content-hash = "831588fde18dba39cf49e84ab47d7ec616702a8591984cba854ef8e4bcb00d3b" diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index 27ac2a1b0..506a73837 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -52,6 +52,7 @@ pytest-asyncio = "^0.25.3" pytest-env = "^1.1.5" pytest-retry = "^1.7.0" psycopg = { extras = ["pool"], version = "^3.2.6" } +pytest-xdist = "^3.7.0" [tool.poetry.group.docs.dependencies] @@ -62,7 +63,6 @@ markdownify = "^1.1.0" openai = "^1.75.0" bs4 = "^0.0.2" types-beautifulsoup4 = "^4.12.0.20250204" -langfuse = "3.0.0b3" [tool.poetry.extras] otel = [ @@ -137,6 +137,78 @@ exclude = [ ] target-version = "py310" +[tool.ruff.lint] +select = [ + # Pyflakes + "F", + # pycodestyle errors + "E", + # pycodestyle warnings + "W", + # isort + "I", + # pep8-naming + "N", + # pyupgrade (modern Python idioms) + "UP", + # flake8-async (async/await) + "ASYNC", + # flake8-bugbear (likely bugs) + "B", + # flake8-simplify (code simplification) + "SIM", + # flake8-comprehensions (list/dict comprehensions) + "C4", + # flake8-pie (misc improvements) + "PIE", + # flake8-return (return statement improvements) + "RET", + # flake8-unused-arguments + "ARG", + # flake8-logging-format + "G", + # Ruff-specific rules + "RUF", + # Datetime (timezone) rules + "DTZ", + "FIX", + ## Performance-related rules + "PERF", +] + +ignore = [ + # Allow long lines + "E501", + # Allow complex variable names + "N806", + # Don't enforce specific exception types initially + "B008", + # Allow unused function arguments + "ARG001", + "ARG002", + # Allow empty returns + "RET503", + + ## FIXME (ironic) - re-enable this later + "FIX", + + "RET506", +] +exclude = [ + "examples/*", + "docs/*", + "hatchet_sdk/clients/rest/api/*", + "hatchet_sdk/clients/rest/models/*", + "hatchet_sdk/contracts", + "hatchet_sdk/clients/rest/api_client.py", + "hatchet_sdk/clients/rest/configuration.py", + "hatchet_sdk/clients/rest/exceptions.py", + "hatchet_sdk/clients/rest/rest.py", + "hatchet_sdk/v0/*", + "site/*", + "tests/*", +] + [tool.poetry.scripts] api = "examples.api.api:main" diff --git a/sdks/python/tests/test_rest_api.py b/sdks/python/tests/test_rest_api.py index 607e1cc76..b9dfd9187 100644 --- a/sdks/python/tests/test_rest_api.py +++ b/sdks/python/tests/test_rest_api.py @@ -35,20 +35,27 @@ async def test_get_run(hatchet: Hatchet) -> None: @pytest.mark.asyncio(loop_scope="session") async def test_list_workflows(hatchet: Hatchet) -> None: - workflows = await hatchet.workflows.aio_list( - workflow_name=dag_workflow.config.name, limit=1, offset=0 - ) + workflows = await hatchet.workflows.aio_list(workflow_name=dag_workflow.config.name) assert workflows.rows - assert len(workflows.rows) == 1 + assert len(workflows.rows) >= 1 - workflow = workflows.rows[0] + relevant_wf = next( + iter( + [ + wf + for wf in workflows.rows + if wf.name == hatchet.config.apply_namespace(dag_workflow.config.name) + ] + ), + None, + ) - """Using endswith because of namespacing in CI""" - assert workflow.name.endswith(dag_workflow.config.name) + assert relevant_wf is not None - fetched_workflow = await hatchet.workflows.aio_get(workflow.metadata.id) + fetched_workflow = await hatchet.workflows.aio_get(relevant_wf.metadata.id) - """Using endswith because of namespacing in CI""" - assert fetched_workflow.name.endswith(dag_workflow.config.name) - assert fetched_workflow.metadata.id == workflow.metadata.id + assert fetched_workflow.name == hatchet.config.apply_namespace( + dag_workflow.config.name + ) + assert fetched_workflow.metadata.id == relevant_wf.metadata.id diff --git a/sdks/python/tests/worker_fixture.py b/sdks/python/tests/worker_fixture.py index cf94ed706..744de2593 100644 --- a/sdks/python/tests/worker_fixture.py +++ b/sdks/python/tests/worker_fixture.py @@ -2,10 +2,10 @@ import logging import os import subprocess import time +from collections.abc import Callable, Generator from contextlib import contextmanager from io import BytesIO from threading import Thread -from typing import Callable, Generator import psutil import requests