[Python] Refactor: v2.0.0 Prep (#3165)

* refactor: overloads for run methods, deprecate _no_wait flavors

* refactor: same thing for run_many flavors

* fix: use gather_max_concurrency for gathering run results

* refactor: deprecate a bunch of stuff on the context and core hatchet client

* refactor: runs client deprecations

* refactor: add deprecation warning to go duration string durations

* refactor: durable tasks must be async

* chore: changelog

* fix: copilot comments

* fix: couple more

* chore: rm `debug=True` from all the examples

* chore: more debug params

* fix: more deprecations

* fix: more warnings

* fix: non-utc timezones

* chore: deprecate more internal stuff

* fix: a bunch more internal-only stuff, remove non-v2 listener logic

* fix: test

* chore: make a bunch more things internal

* feat: priority enum

* refactor: top-level `types` directory

* refactor: start reworking labels

* fix: some type checker issues

* fix: rm transform method in favor of instance method

* fix: internal worker label types

* fix: more types

* refactor: finish labels

* fix: labels

* chore: gen

* fix: rm internal glue pydantic model

* fix: removed `owned_loop`, register workflows on worker start instead of init

* fix: deprecate ctx getter in favor of property

* refactor: more label cleanup, prepare to remove worker context

* fix: more deprecations

* refactor: get rid of a pydantic a few places we don't need validation

* refactor: plan to remove `BulkPushEventOptions`

* chore: changelog

* chore: changelog

* refactor: trigger types

* fix: pydantic model default

* fix: instrumentor types

* refactor: add `seen_at` to event

* refactor: remove some more protobuf types

* fix: rm unneeded ts_to_iso

* refactor: clean up more examples

* fix: more warnings

* chore: gen

* chore: more warnings

* fix: one more

* fix: warning, namespace

* fix: linters

* fix: double import

* fix: ugh, cursor

* fix: clean up a bunch of suboptimal tests

* fix: overload signatures

* chore: gen

* chore: revert opts change

* chore: one more revert

* feat: start reworking option passing to remove pydantic models

* refactor: worker opt

* fix: type cleanup

* refactor: keep working out signature details

* fix: changelog

* fix: deprecate some streaming methods

* fix: linters

* fix: rebase

* chore: rm some unused stuff

* chore: rm more unused stuff

* fix: rm more uses of `options`

* fix: more deprecation warnings

* fix: instrumentor wrapping

* fix: add test for instrumentor signature

* chore: deprecate upsert labels on the worker context thingy

* fix: deprecate more stuff on the worker context

* feat: add `worker_labels_dict` property

* fix: label types for workers

* chore: update changelog

* fix: version

* refactor: durable_eviction -> eviction_policy

* fix: lint

* fix: instrumentor not passing options properly

* fix: un-remove

* fix: priority

* chore: version

* fix: improve warning log
This commit is contained in:
matt
2026-04-03 16:30:56 -04:00
committed by GitHub
parent 6c832c614f
commit a6650ab84c
263 changed files with 3411 additions and 2315 deletions
+1 -4
View File
@@ -1,6 +1,3 @@
from examples.affinity_workers.worker import affinity_worker_workflow
from hatchet_sdk import TriggerWorkflowOptions
affinity_worker_workflow.run(
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
)
affinity_worker_workflow.run(additional_metadata={"hello": "moon"})
+9 -8
View File
@@ -1,7 +1,7 @@
from hatchet_sdk import Context, EmptyModel, Hatchet, WorkerLabelComparator
from hatchet_sdk import Context, EmptyModel, Hatchet, WorkerLabel, WorkerLabelComparator
from hatchet_sdk.labels import DesiredWorkerLabel
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > AffinityWorkflow
@@ -10,26 +10,27 @@ affinity_worker_workflow = hatchet.workflow(name="AffinityWorkflow")
@affinity_worker_workflow.task(
desired_worker_labels={
"model": DesiredWorkerLabel(value="fancy-ai-model-v2", weight=10),
"memory": DesiredWorkerLabel(
desired_worker_labels=[
DesiredWorkerLabel(key="model", value="fancy-ai-model-v2", weight=10),
DesiredWorkerLabel(
key="memory",
value=256,
required=True,
comparator=WorkerLabelComparator.LESS_THAN,
),
},
],
)
# > AffinityTask
async def step(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
if ctx.worker.labels().get("model") != "fancy-ai-model-v2":
if ctx.worker_labels.get("model") != "fancy-ai-model-v2":
ctx.worker.upsert_labels({"model": "unset"})
# DO WORK TO EVICT OLD MODEL / LOAD NEW MODEL
ctx.worker.upsert_labels({"model": "fancy-ai-model-v2"})
return {"worker": ctx.worker.id()}
return {"worker": ctx.worker_id}
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
def main() -> None:
+1 -1
View File
@@ -2,7 +2,7 @@ import asyncio
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
async def main() -> None:
@@ -7,14 +7,14 @@ from examples.blocked_async.blocking_example_worker import (
non_blocking_sync,
)
non_blocking_sync.run_no_wait()
non_blocking_async.run_no_wait()
non_blocking_sync.run(wait_for_result=False)
non_blocking_async.run(wait_for_result=False)
time.sleep(1)
blocking.run_no_wait()
blocking.run(wait_for_result=False)
time.sleep(1)
non_blocking_sync.run_no_wait()
non_blocking_sync.run(wait_for_result=False)
+1 -4
View File
@@ -1,6 +1,3 @@
from examples.blocked_async.worker import blocked_worker_workflow
from hatchet_sdk import TriggerWorkflowOptions
blocked_worker_workflow.run(
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
)
blocked_worker_workflow.run(additional_metadata={"hello": "moon"})
+1 -1
View File
@@ -4,7 +4,7 @@ from datetime import timedelta
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# WARNING: this is an example of what NOT to do
# This workflow is intentionally blocking the main thread
@@ -11,7 +11,7 @@ class StepOutput(BaseModel):
should_cancel: bool
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
payload_initial_cancel_bug_workflow = hatchet.workflow(
name="payload-initial-cancel-test",
+4 -7
View File
@@ -2,7 +2,6 @@ import asyncio
from examples.bulk_fanout.worker import ParentInput, bulk_parent_wf
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
hatchet = Hatchet()
@@ -12,12 +11,10 @@ async def main() -> None:
workflows=[
bulk_parent_wf.create_bulk_run_item(
input=ParentInput(n=i),
options=TriggerWorkflowOptions(
additional_metadata={
"bulk-trigger": i,
"hello-{i}": "earth-{i}",
}
),
additional_metadata={
"bulk-trigger": i,
"hello-{i}": "earth-{i}",
},
)
for i in range(20)
],
+1 -2
View File
@@ -3,7 +3,6 @@ import random
from examples.bulk_fanout.worker import ParentInput, bulk_parent_wf
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
async def main() -> None:
@@ -22,7 +21,7 @@ async def main() -> None:
# and can have an arbitrary property name.
bulk_parent_wf.run(
input=ParentInput(n=2),
options=TriggerWorkflowOptions(additional_metadata={streamKey: streamVal}),
additional_metadata={streamKey: streamVal},
)
# Stream all events for the additional meta key value
+1 -2
View File
@@ -1,7 +1,6 @@
from examples.bulk_fanout.worker import ParentInput, bulk_parent_wf
from hatchet_sdk import TriggerWorkflowOptions
bulk_parent_wf.run(
ParentInput(n=999),
TriggerWorkflowOptions(additional_metadata={"no-dedupe": "world"}),
additional_metadata={"no-dedupe": "world"},
)
+2 -3
View File
@@ -4,9 +4,8 @@ from typing import Any
from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class ParentInput(BaseModel):
@@ -29,7 +28,7 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, An
bulk_child_wf.create_bulk_run_item(
input=ChildInput(a=str(i)),
key=f"child{i}",
options=TriggerWorkflowOptions(additional_metadata={"hello": "earth"}),
additional_metadata={"hello": "earth"},
)
for i in range(input.n)
]
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
@hatchet.task()
+1 -1
View File
@@ -2,7 +2,7 @@ import time
from examples.cancellation.worker import cancellation_workflow, hatchet
id = cancellation_workflow.run_no_wait()
id = cancellation_workflow.run(wait_for_result=False)
time.sleep(5)
+1 -1
View File
@@ -3,7 +3,7 @@ import time
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
cancellation_workflow = hatchet.workflow(name="CancelWorkflow")
+1 -1
View File
@@ -5,7 +5,7 @@ from hatchet_sdk.context.context import Context
from hatchet_sdk.hatchet import Hatchet
from hatchet_sdk.runnables.types import EmptyModel
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Running a Task from within a Task
+1 -1
View File
@@ -4,7 +4,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class SimpleInput(BaseModel):
@@ -9,7 +9,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class WorkflowInput(BaseModel):
@@ -9,7 +9,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class WorkflowInput(BaseModel):
+1 -1
View File
@@ -10,7 +10,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Workflow
@@ -9,7 +9,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Concurrency Strategy With Key
@@ -2,7 +2,7 @@ import random
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# Create a list of events with desired distribution
events = ["1"] * 10000 + ["0"] * 100
@@ -10,7 +10,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class LoadRRInput(BaseModel):
@@ -9,7 +9,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
SLEEP_TIME = 2
DIGIT_MAX_RUNS = 8
@@ -9,7 +9,7 @@ from hatchet_sdk import (
Hatchet,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
SLEEP_TIME = 2
DIGIT_MAX_RUNS = 8
+1 -1
View File
@@ -2,7 +2,7 @@ import time
from examples.conditions.worker import hatchet, task_condition_workflow
task_condition_workflow.run_no_wait()
task_condition_workflow.run(wait_for_result=False)
time.sleep(5)
+1 -1
View File
@@ -15,7 +15,7 @@ from hatchet_sdk import (
or_,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class StepOutput(BaseModel):
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Workflow Definition Cron Trigger
+1 -1
View File
@@ -15,7 +15,7 @@ class RandomSum(BaseModel):
sum: int
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Define a DAG
dag_workflow = hatchet.workflow(name="DAGWorkflow")
+1 -1
View File
@@ -17,7 +17,7 @@ class Output:
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Task using dataclasses
+3 -5
View File
@@ -2,10 +2,10 @@ import asyncio
from datetime import timedelta
from typing import Any
from hatchet_sdk import Context, EmptyModel, Hatchet, TriggerWorkflowOptions
from hatchet_sdk import Context, EmptyModel, Hatchet
from hatchet_sdk.exceptions import DedupeViolationError
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
dedupe_parent_wf = hatchet.workflow(name="DedupeParent")
dedupe_child_wf = hatchet.workflow(name="DedupeChild")
@@ -21,9 +21,7 @@ async def spawn(input: EmptyModel, ctx: Context) -> dict[str, list[Any]]:
try:
results.append(
dedupe_child_wf.aio_run(
options=TriggerWorkflowOptions(
additional_metadata={"dedupe": "test"}, key=f"child{i}"
),
additional_metadata={"dedupe": "test"},
)
)
except DedupeViolationError as e:
+1 -1
View File
@@ -4,7 +4,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class PrinterInput(BaseModel):
@@ -6,7 +6,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, Depends, DurableContext, EmptyModel, Hatchet
hatchet = Hatchet(debug=False)
hatchet = Hatchet()
SYNC_DEPENDENCY_VALUE = "sync_dependency_value"
ASYNC_DEPENDENCY_VALUE = "async_dependency_value"
+1 -1
View File
@@ -7,7 +7,7 @@ class Output(BaseModel):
message: str
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
@hatchet.task(input_validator=dict)
+2 -2
View File
@@ -9,8 +9,8 @@ from examples.durable.worker import (
AwaitedEvent,
)
durable_workflow.run_no_wait()
ephemeral_workflow.run_no_wait()
durable_workflow.run(wait_for_result=False)
ephemeral_workflow.run(wait_for_result=False)
print("Sleeping")
time.sleep(SLEEP_TIME + 2)
+1 -1
View File
@@ -17,7 +17,7 @@ from hatchet_sdk import (
)
from hatchet_sdk.exceptions import NonDeterminismError
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
dag_child_workflow = hatchet.workflow(name="dag-child-workflow")
+2 -2
View File
@@ -7,8 +7,8 @@ from examples.durable_event.worker import (
hatchet,
)
durable_event_task.run_no_wait()
durable_event_task_with_filter.run_no_wait()
durable_event_task.run(wait_for_result=False)
durable_event_task_with_filter.run(wait_for_result=False)
print("Sleeping")
time.sleep(2)
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import DurableContext, EmptyModel, Hatchet, UserEventCondition
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
EVENT_KEY = "user:update"
+1 -1
View File
@@ -1,4 +1,4 @@
from examples.durable_eviction.worker import evictable_sleep
ref = evictable_sleep.run_no_wait()
ref = evictable_sleep.run(wait_for_result=False)
print(f"Triggered evictable_sleep: workflow_run_id={ref.workflow_run_id}")
+1 -1
View File
@@ -8,7 +8,7 @@ from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet, UserEventC
from hatchet_sdk.runnables.eviction import EvictionPolicy
from pydantic import BaseModel
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
EVICTION_TTL_SECONDS = 5
+1 -1
View File
@@ -1,3 +1,3 @@
from examples.durable_sleep.worker import durable_sleep_task
durable_sleep_task.run_no_wait()
durable_sleep_task.run(wait_for_result=False)
+1 -1
View File
@@ -2,7 +2,7 @@ from datetime import timedelta
from hatchet_sdk import DurableContext, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Durable Sleep
+2 -4
View File
@@ -1,4 +1,4 @@
from hatchet_sdk import Hatchet, PushEventOptions
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.events import BulkPushEventWithMetadata
hatchet = Hatchet()
@@ -10,9 +10,7 @@ hatchet.event.push("user:create", {"should_skip": False})
hatchet.event.push(
"user:create",
{"userId": "1234", "should_skip": False},
options=PushEventOptions(
additional_metadata={"source": "api"} # Arbitrary key-value pair
),
additional_metadata={"source": "api"}, # Arbitrary key-value pair
)
# > Bulk event push
+3 -7
View File
@@ -1,5 +1,5 @@
from examples.events.worker import EVENT_KEY, event_workflow
from hatchet_sdk import Hatchet, PushEventOptions
from hatchet_sdk import Hatchet
hatchet = Hatchet()
@@ -21,9 +21,7 @@ hatchet.event.push(
payload={
"should_skip": True,
},
options=PushEventOptions(
scope="foobarbaz",
),
scope="foobarbaz",
)
# > Trigger a run
@@ -32,7 +30,5 @@ hatchet.event.push(
payload={
"should_skip": False,
},
options=PushEventOptions(
scope="foobarbaz",
),
scope="foobarbaz",
)
+1 -2
View File
@@ -3,7 +3,6 @@ import random
from examples.fanout.worker import ParentInput, parent_wf
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
async def main() -> None:
@@ -24,7 +23,7 @@ async def main() -> None:
parent_wf.run(
ParentInput(n=2),
options=TriggerWorkflowOptions(additional_metadata={streamKey: streamVal}),
additional_metadata={streamKey: streamVal},
)
# Stream all events for the additional meta key value
+1 -2
View File
@@ -2,7 +2,6 @@ import random
from examples.fanout.worker import ParentInput, parent_wf
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
def main() -> None:
@@ -23,7 +22,7 @@ def main() -> None:
parent_wf.run(
ParentInput(n=2),
options=TriggerWorkflowOptions(additional_metadata={streamKey: streamVal}),
additional_metadata={streamKey: streamVal},
)
# Stream all events for the additional meta key value
+1 -2
View File
@@ -3,7 +3,6 @@ from typing import Any
from examples.fanout.worker import ChildInput, ParentInput, child_wf, parent_wf
from hatchet_sdk import Hatchet
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
hatchet = Hatchet()
@@ -11,7 +10,7 @@ hatchet = Hatchet()
async def main() -> None:
await parent_wf.aio_run(
ParentInput(n=2),
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
additional_metadata={"hello": "moon"},
)
+4 -5
View File
@@ -3,9 +3,9 @@ from typing import Any
from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions
from hatchet_sdk import Context, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > FanoutParent
@@ -29,9 +29,8 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:
[
child_wf.create_bulk_run_item(
input=ChildInput(a=str(i)),
options=TriggerWorkflowOptions(
additional_metadata={"hello": "earth"}, key=f"child{i}"
),
additional_metadata={"hello": "earth"},
key=f"child{i}",
)
for i in range(input.n)
],
+2 -2
View File
@@ -1,7 +1,7 @@
import asyncio
from examples.fanout_sync.worker import ParentInput, sync_fanout_parent
from hatchet_sdk import Hatchet, TriggerWorkflowOptions
from hatchet_sdk import Hatchet
hatchet = Hatchet()
@@ -9,7 +9,7 @@ hatchet = Hatchet()
async def main() -> None:
sync_fanout_parent.run(
ParentInput(n=2),
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
additional_metadata={"hello": "moon"},
)
+3 -3
View File
@@ -3,9 +3,9 @@ from typing import Any
from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions
from hatchet_sdk import Context, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class ParentInput(BaseModel):
@@ -31,7 +31,7 @@ def spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:
sync_fanout_child.create_bulk_run_item(
input=ChildInput(a=str(i)),
key=f"child{i}",
options=TriggerWorkflowOptions(additional_metadata={"hello": "earth"}),
additional_metadata={"hello": "earth"},
)
for i in range(input.n)
],
+3 -2
View File
@@ -84,8 +84,9 @@ async def post__create_user__hatchet() -> User:
async with Session() as db:
user = await create_user(db)
await send_welcome_email_task_hatchet.aio_run_no_wait(
WelcomeEmailInput(user_id=user.id)
await send_welcome_email_task_hatchet.aio_run(
WelcomeEmailInput(user_id=user.id),
wait_for_result=False,
)
return user
+1 -1
View File
@@ -7,7 +7,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class Lifespan(BaseModel):
+1 -1
View File
@@ -7,7 +7,7 @@ from pydantic import BaseModel, ConfigDict
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Use the lifespan in a task
-1
View File
@@ -10,7 +10,6 @@ logging.basicConfig(level=logging.INFO)
root_logger = logging.getLogger()
hatchet = Hatchet(
debug=True,
config=ClientConfig(
logger=root_logger,
),
+1 -1
View File
@@ -1,3 +1,3 @@
from examples.non_retryable.worker import non_retryable_workflow
non_retryable_workflow.run_no_wait()
non_retryable_workflow.run(wait_for_result=False)
+1 -1
View File
@@ -1,7 +1,7 @@
from hatchet_sdk import Context, EmptyModel, Hatchet
from hatchet_sdk.exceptions import NonRetryableException
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
non_retryable_workflow = hatchet.workflow(name="NonRetryableWorkflow")
+1 -1
View File
@@ -1,3 +1,3 @@
from examples.on_failure.worker import on_failure_wf_with_details
on_failure_wf_with_details.run_no_wait()
on_failure_wf_with_details.run(wait_for_result=False)
+1 -1
View File
@@ -4,7 +4,7 @@ from datetime import timedelta
from hatchet_sdk import Context, EmptyModel, Hatchet
from hatchet_sdk.exceptions import TaskRunError
hatchet = Hatchet(debug=False)
hatchet = Hatchet()
ERROR_TEXT = "step1 failed"
+1 -1
View File
@@ -1,3 +1,3 @@
from examples.on_success.worker import on_success_workflow
on_success_workflow.run_no_wait()
on_success_workflow.run(wait_for_result=False)
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
on_success_workflow = hatchet.workflow(name="OnSuccessWorkflow")
@@ -1,3 +1,3 @@
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
@@ -1,7 +1,6 @@
from opentelemetry.trace import get_tracer
from examples.opentelemetry_instrumentation.worker import otel_workflow
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
from hatchet_sdk.opentelemetry.instrumentor import HatchetInstrumentor
HatchetInstrumentor().instrument()
@@ -15,12 +14,10 @@ def main() -> None:
# so the worker-side spans become children of this trigger span.
with tracer.start_as_current_span("trigger_otel_data_pipeline"):
result = otel_workflow.run(
options=TriggerWorkflowOptions(
additional_metadata={
"source": "otel-example",
"pipeline": "data-ingest",
},
),
additional_metadata={
"source": "otel-example",
"pipeline": "data-ingest",
},
)
print(f"Workflow result: {result}")
@@ -3,8 +3,9 @@ import asyncio
from examples.opentelemetry_instrumentation.client import hatchet
from examples.opentelemetry_instrumentation.tracer import trace_provider
from examples.opentelemetry_instrumentation.worker import otel_workflow
from hatchet_sdk.clients.admin import TriggerWorkflowOptions
from hatchet_sdk.clients.events import BulkPushEventWithMetadata, PushEventOptions
from hatchet_sdk import (
BulkPushEventWithMetadata,
)
from hatchet_sdk.opentelemetry.instrumentor import HatchetInstrumentor
instrumentor = HatchetInstrumentor(tracer_provider=trace_provider)
@@ -14,17 +15,13 @@ tracer = trace_provider.get_tracer(__name__)
ADDITIONAL_METADATA = {"hello": "world"}
def create_push_options() -> PushEventOptions:
return PushEventOptions(additional_metadata=ADDITIONAL_METADATA)
def push_event() -> None:
print("\npush_event")
with tracer.start_as_current_span("push_event"):
hatchet.event.push(
"otel:event",
{"test": "test"},
options=create_push_options(),
additional_metadata=ADDITIONAL_METADATA,
)
@@ -32,7 +29,9 @@ async def async_push_event() -> None:
print("\nasync_push_event")
with tracer.start_as_current_span("async_push_event"):
await hatchet.event.aio_push(
"otel:event", {"test": "test"}, options=create_push_options()
"otel:event",
{"test": "test"},
additional_metadata=ADDITIONAL_METADATA,
)
@@ -78,7 +77,7 @@ def run_workflow() -> None:
print("\nrun_workflow")
with tracer.start_as_current_span("run_workflow"):
otel_workflow.run(
options=TriggerWorkflowOptions(additional_metadata=ADDITIONAL_METADATA),
additional_metadata=ADDITIONAL_METADATA,
)
@@ -86,7 +85,7 @@ async def async_run_workflow() -> None:
print("\nasync_run_workflow")
with tracer.start_as_current_span("async_run_workflow"):
await otel_workflow.aio_run(
options=TriggerWorkflowOptions(additional_metadata=ADDITIONAL_METADATA),
additional_metadata=ADDITIONAL_METADATA,
)
@@ -96,14 +95,10 @@ def run_workflows() -> None:
otel_workflow.run_many(
[
otel_workflow.create_bulk_run_item(
options=TriggerWorkflowOptions(
additional_metadata=ADDITIONAL_METADATA
)
additional_metadata=ADDITIONAL_METADATA,
),
otel_workflow.create_bulk_run_item(
options=TriggerWorkflowOptions(
additional_metadata=ADDITIONAL_METADATA
)
additional_metadata=ADDITIONAL_METADATA,
),
],
)
@@ -115,14 +110,10 @@ async def async_run_workflows() -> None:
await otel_workflow.aio_run_many(
[
otel_workflow.create_bulk_run_item(
options=TriggerWorkflowOptions(
additional_metadata=ADDITIONAL_METADATA
)
additional_metadata=ADDITIONAL_METADATA,
),
otel_workflow.create_bulk_run_item(
options=TriggerWorkflowOptions(
additional_metadata=ADDITIONAL_METADATA
)
additional_metadata=ADDITIONAL_METADATA,
),
],
)
+20 -14
View File
@@ -11,7 +11,13 @@ import pytest_asyncio
from pydantic import BaseModel
from examples.priority.worker import DEFAULT_PRIORITY, SLEEP_TIME, priority_workflow
from hatchet_sdk import Hatchet, ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions
from hatchet_sdk import (
Hatchet,
ScheduleTriggerWorkflowOptions,
TriggerWorkflowOptions,
Priority as PriorityEnum,
ScheduleWorkflowOptions,
)
from hatchet_sdk.clients.rest.models.v1_task_status import V1TaskStatus
Priority = Literal["low", "medium", "high", "default"]
@@ -23,14 +29,14 @@ class RunPriorityStartedAt(BaseModel):
finished_at: datetime
def priority_to_int(priority: Priority) -> int:
def priority_to_enum(priority: Priority) -> PriorityEnum:
match priority:
case "high":
return 3
return PriorityEnum.HIGH
case "medium":
return 2
return PriorityEnum.MEDIUM
case "low":
return 1
return PriorityEnum.LOW
case "default":
return DEFAULT_PRIORITY
case _:
@@ -39,13 +45,13 @@ def priority_to_int(priority: Priority) -> int:
@pytest_asyncio.fixture(loop_scope="session", scope="function")
async def dummy_runs() -> None:
priority: Priority = "high"
priority = PriorityEnum.HIGH
await priority_workflow.aio_run_many_no_wait(
[
priority_workflow.create_bulk_run_item(
options=TriggerWorkflowOptions(
priority=(priority_to_int(priority)),
priority=priority,
additional_metadata={
"priority": priority,
"key": ix,
@@ -84,7 +90,7 @@ async def test_priority(
[
priority_workflow.create_bulk_run_item(
options=TriggerWorkflowOptions(
priority=(priority_to_int(priority := choice(choices))),
priority=(priority_to_enum(priority := choice(choices))),
additional_metadata={
"priority": priority,
"key": ix,
@@ -138,7 +144,7 @@ async def test_priority(
nxt = runs_ids_started_ats[i + 1]
"""Run start times should be in order of priority"""
assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)
assert priority_to_enum(curr.priority) >= priority_to_enum(nxt.priority)
"""Runs should proceed one at a time"""
assert curr.finished_at <= nxt.finished_at
@@ -172,8 +178,8 @@ async def test_priority_via_scheduling(
*[
priority_workflow.aio_schedule(
run_at=run_at,
options=ScheduleTriggerWorkflowOptions(
priority=(priority_to_int(priority := choice(choices))),
options=ScheduleWorkflowOptions(
priority=(priority_to_enum(priority := choice(choices))),
additional_metadata={
"priority": priority,
"key": ix,
@@ -235,7 +241,7 @@ async def test_priority_via_scheduling(
nxt = runs_ids_started_ats[i + 1]
"""Run start times should be in order of priority"""
assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)
assert priority_to_enum(curr.priority) >= priority_to_enum(nxt.priority)
"""Runs should proceed one at a time"""
assert curr.finished_at <= nxt.finished_at
@@ -266,7 +272,7 @@ async def crons(
"priority": (priority := choice(choices)),
"key": str(i),
},
priority=(priority_to_int(priority)),
priority=(priority_to_enum(priority)),
)
for i in range(n)
]
@@ -351,7 +357,7 @@ async def test_priority_via_cron(
nxt = runs_ids_started_ats[i + 1]
"""Run start times should be in order of priority"""
assert priority_to_int(curr.priority) >= priority_to_int(nxt.priority)
assert priority_to_enum(curr.priority) >= priority_to_enum(nxt.priority)
"""Runs should proceed one at a time"""
assert curr.finished_at <= nxt.finished_at
+24 -28
View File
@@ -1,51 +1,47 @@
from datetime import datetime, timedelta, timezone
from examples.priority.worker import priority_workflow
from hatchet_sdk import ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions
from hatchet_sdk import Priority
priority_workflow.run_no_wait()
priority_workflow.run(wait_for_result=False)
# > Runtime priority
low_prio = priority_workflow.run_no_wait(
options=TriggerWorkflowOptions(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=1,
additional_metadata={"priority": "low", "key": 1},
)
low_prio = priority_workflow.run(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=Priority.LOW,
additional_metadata={"priority": "low", "key": 1},
wait_for_result=False,
)
high_prio = priority_workflow.run_no_wait(
options=TriggerWorkflowOptions(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=3,
additional_metadata={"priority": "high", "key": 1},
)
high_prio = priority_workflow.run(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=Priority.HIGH,
additional_metadata={"priority": "high", "key": 1},
wait_for_result=False,
)
# > Scheduled priority
schedule = priority_workflow.schedule(
run_at=datetime.now(tz=timezone.utc) + timedelta(minutes=1),
options=ScheduleTriggerWorkflowOptions(priority=3),
priority=Priority.HIGH,
)
cron = priority_workflow.create_cron(
cron_name="my-scheduled-cron",
expression="0 * * * *",
priority=3,
priority=Priority.HIGH,
)
# > Default priority
low_prio = priority_workflow.run_no_wait(
options=TriggerWorkflowOptions(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=1,
additional_metadata={"priority": "low", "key": 2},
)
low_prio = priority_workflow.run(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=Priority.LOW,
additional_metadata={"priority": "low", "key": 2},
wait_for_result=False,
)
high_prio = priority_workflow.run_no_wait(
options=TriggerWorkflowOptions(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=3,
additional_metadata={"priority": "high", "key": 2},
)
high_prio = priority_workflow.run(
## 👀 Adding priority and key to metadata to show them in the dashboard
priority=Priority.HIGH,
additional_metadata={"priority": "high", "key": 2},
wait_for_result=False,
)
+3 -3
View File
@@ -1,11 +1,11 @@
import time
from hatchet_sdk import Context, EmptyModel, Hatchet
from hatchet_sdk import Context, EmptyModel, Hatchet, Priority
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Default priority
DEFAULT_PRIORITY = 1
DEFAULT_PRIORITY = Priority.LOW
SLEEP_TIME = 0.25
priority_workflow = hatchet.workflow(
+1 -1
View File
@@ -3,7 +3,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
from hatchet_sdk.rate_limit import RateLimit
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class DynamicRateLimitInput(BaseModel):
+1 -1
View File
@@ -1,7 +1,7 @@
from examples.rate_limit.worker import rate_limit_workflow
from hatchet_sdk.hatchet import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
rate_limit_workflow.run()
rate_limit_workflow.run()
+1 -1
View File
@@ -3,7 +3,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
from hatchet_sdk.rate_limit import RateLimit, RateLimitDuration
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > Workflow
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
simple_workflow = hatchet.workflow(name="SimpleRetryWorkflow")
backoff_workflow = hatchet.workflow(name="BackoffWorkflow")
+1 -1
View File
@@ -19,7 +19,7 @@ class RandomSum(BaseModel):
sum: int
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
run_detail_test_workflow = hatchet.workflow(
name="RunDetailTest", input_validator=MockInput
+2 -2
View File
@@ -1,9 +1,9 @@
import argparse
from hatchet_sdk import Context, EmptyModel, Hatchet
from hatchet_sdk import Context, EmptyModel, Hatchet, WorkerLabel
from pydantic import BaseModel
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class AffinityResult(BaseModel):
+1 -1
View File
@@ -9,7 +9,7 @@ from pydantic import BaseModel, PlainSerializer, ValidationInfo, model_validator
from hatchet_sdk import Context, EmptyModel, Hatchet
from hatchet_sdk.serde import is_in_hatchet_serialization_context
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
def serializor(input_str: str, info: ValidationInfo) -> str:
-155
View File
@@ -1,155 +0,0 @@
# > Simple
import argparse
import asyncio
import signal
import threading
import time
import traceback
from typing import Any
from datetime import datetime, timezone
from pathlib import Path
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
FAILURE_LOG = Path(__file__).parent / "failures.log"
# Track the current worker so we can clean up on Ctrl+C
_current_worker = None
_current_thread = None
# poetry run python ./simple/worker_test.py --suffix new
def log_failure(phase: str, error: Exception) -> None:
"""Log a failure loudly to stderr and append to the failures log file."""
timestamp = datetime.now(timezone.utc).isoformat()
tb = traceback.format_exception(type(error), error, error.__traceback__)
tb_str = "".join(tb)
msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}"
# Loud stderr output
print(f"\n{'!' * 60}", flush=True)
print(f"!!! FAILURE: {phase} !!!", flush=True)
print(msg, flush=True)
print(f"{'!' * 60}\n", flush=True)
# Append to log file
with open(FAILURE_LOG, "a") as f:
f.write(msg)
f.write("-" * 60 + "\n")
@hatchet.task()
def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing simple task!")
return {"result": "Hello, world!"}
@hatchet.durable_task()
async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
def _force_stop_worker(worker: Any, thread: threading.Thread) -> None:
"""Forcefully terminate the worker and its child processes."""
worker.killing = True
worker._terminate_processes()
worker._close_queues()
if worker.loop and worker.loop.is_running():
worker.loop.call_soon_threadsafe(worker.loop.stop)
thread.join(timeout=5)
def start_worker(suffix: str = "") -> tuple[Any, threading.Thread]:
"""Create and start a worker in a background thread."""
name = f"test-worker-{suffix}" if suffix else "test-worker"
worker = hatchet.worker(
name,
workflows=[simple, simple_durable],
slots=10,
)
worker.handle_kill = False # Prevent sys.exit on shutdown
# Restore default signal handlers so Ctrl+C raises KeyboardInterrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
thread = threading.Thread(target=worker.start, daemon=True)
thread.start()
# Give the worker a moment to initialize
time.sleep(2)
print("Worker connected.")
return worker, thread
def stop_worker(worker: Any, thread: threading.Thread) -> None:
"""Stop the worker gracefully."""
try:
if worker.loop and worker.loop.is_running():
asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop)
thread.join(timeout=10)
if thread.is_alive():
_force_stop_worker(worker, thread)
print("Worker disconnected.")
except Exception as e:
log_failure("worker disconnect", e)
def main() -> None:
global _current_worker, _current_thread
parser = argparse.ArgumentParser()
parser.add_argument(
"--suffix",
default="",
help="Suffix to append to the worker name (e.g. 'old' or 'new')",
)
args = parser.parse_args()
try:
while True:
# --- Connect the worker ---
print("\n=== Connecting worker ===")
try:
worker, thread = start_worker(args.suffix)
_current_worker, _current_thread = worker, thread
except Exception as e:
log_failure("worker connect", e)
time.sleep(5)
continue
# --- Trigger tasks every 1 second for 5 seconds ---
for tick in range(5):
time.sleep(1)
print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---")
try:
ref = simple.run_no_wait()
print(f"Task triggered: {ref}")
except Exception as e:
log_failure(f"task trigger (tick {tick + 1}/5)", e)
try:
ref = simple_durable.run_no_wait()
print(f"Durable task triggered: {ref}")
except Exception as e:
log_failure(f"durable task trigger (tick {tick + 1}/5)", e)
# --- Disconnect the worker ---
print("\n=== Disconnecting worker ===")
stop_worker(worker, thread)
_current_worker, _current_thread = None, None
except KeyboardInterrupt:
print("\n\nCtrl+C received, shutting down...")
if _current_worker and _current_thread:
_force_stop_worker(_current_worker, _current_thread)
print("Bye!")
if __name__ == "__main__":
main()
-153
View File
@@ -1,153 +0,0 @@
# This is a worker script that will introduce chaos to test
# complex deployments and migrations.
import argparse
import asyncio
import signal
import threading
import time
import traceback
from datetime import datetime, timezone
from pathlib import Path
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
FAILURE_LOG = Path(__file__).parent / "failures.log"
# Track the current worker so we can clean up on Ctrl+C
_current_worker = None
_current_thread = None
# poetry run python ./simple/worker_test.py --suffix new
def log_failure(phase: str, error: Exception) -> None:
"""Log a failure loudly to stderr and append to the failures log file."""
timestamp = datetime.now(timezone.utc).isoformat()
tb = traceback.format_exception(type(error), error, error.__traceback__)
tb_str = "".join(tb)
msg = f"[{timestamp}] FAILURE during {phase}: {error}\n{tb_str}"
# Loud stderr output
print(f"\n{'!' * 60}", flush=True)
print(f"!!! FAILURE: {phase} !!!", flush=True)
print(msg, flush=True)
print(f"{'!' * 60}\n", flush=True)
# Append to log file
with open(FAILURE_LOG, "a") as f:
f.write(msg)
f.write("-" * 60 + "\n")
@hatchet.task()
def simple(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing simple task!")
return {"result": "Hello, world!"}
@hatchet.durable_task()
async def simple_durable(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("Executing durable task!")
return {"result": "Hello from durable!"}
def _force_stop_worker(worker, thread) -> None:
"""Forcefully terminate the worker and its child processes."""
worker.killing = True
worker._terminate_processes()
worker._close_queues()
if worker.loop and worker.loop.is_running():
worker.loop.call_soon_threadsafe(worker.loop.stop)
thread.join(timeout=5)
def start_worker(suffix: str = "") -> tuple:
"""Create and start a worker in a background thread."""
name = f"test-worker-{suffix}" if suffix else "test-worker"
worker = hatchet.worker(
name,
workflows=[simple, simple_durable],
slots=10,
)
worker.handle_kill = False # Prevent sys.exit on shutdown
# Restore default signal handlers so Ctrl+C raises KeyboardInterrupt
signal.signal(signal.SIGINT, signal.default_int_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
thread = threading.Thread(target=worker.start, daemon=True)
thread.start()
# Give the worker a moment to initialize
time.sleep(2)
print("Worker connected.")
return worker, thread
def stop_worker(worker, thread) -> None:
"""Stop the worker gracefully."""
try:
if worker.loop and worker.loop.is_running():
asyncio.run_coroutine_threadsafe(worker.exit_gracefully(), worker.loop)
thread.join(timeout=10)
if thread.is_alive():
_force_stop_worker(worker, thread)
print("Worker disconnected.")
except Exception as e:
log_failure("worker disconnect", e)
def main() -> None:
global _current_worker, _current_thread
parser = argparse.ArgumentParser()
parser.add_argument(
"--suffix",
default="",
help="Suffix to append to the worker name (e.g. 'old' or 'new')",
)
args = parser.parse_args()
try:
while True:
# --- Connect the worker ---
print("\n=== Connecting worker ===")
try:
worker, thread = start_worker(args.suffix)
_current_worker, _current_thread = worker, thread
except Exception as e:
log_failure("worker connect", e)
time.sleep(5)
continue
# --- Trigger tasks every 1 second for 5 seconds ---
for tick in range(5):
time.sleep(1)
print(f"\n--- Triggering tasks (tick {tick + 1}/5) ---")
try:
ref = simple.run_no_wait()
print(f"Task triggered: {ref}")
except Exception as e:
log_failure(f"task trigger (tick {tick + 1}/5)", e)
try:
ref = simple_durable.run_no_wait()
print(f"Durable task triggered: {ref}")
except Exception as e:
log_failure(f"durable task trigger (tick {tick + 1}/5)", e)
# --- Disconnect the worker ---
print("\n=== Disconnecting worker ===")
stop_worker(worker, thread)
_current_worker, _current_thread = None, None
except KeyboardInterrupt:
print("\n\nCtrl+C received, shutting down...")
if _current_worker and _current_thread:
_force_stop_worker(_current_worker, _current_thread)
print("Bye!")
if __name__ == "__main__":
main()
@@ -1,9 +1,6 @@
from examples.simple.worker import simple
from hatchet_sdk import TriggerWorkflowOptions
# > Trigger with metadata
simple.run(
options=TriggerWorkflowOptions(
additional_metadata={"source": "api"} # Arbitrary key-value pair
)
additional_metadata={"source": "api"}, # Arbitrary key-value pair
)
+1 -1
View File
@@ -1,7 +1,7 @@
# > Simple
from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
@hatchet.task()
+1 -2
View File
@@ -1,6 +1,5 @@
from examples.sticky_workers.worker import sticky_workflow
from hatchet_sdk import TriggerWorkflowOptions
sticky_workflow.run(
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
additional_metadata={"hello": "moon"},
)
+8 -8
View File
@@ -3,10 +3,9 @@ from hatchet_sdk import (
EmptyModel,
Hatchet,
StickyStrategy,
TriggerWorkflowOptions,
)
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > StickyWorker
@@ -20,12 +19,12 @@ sticky_workflow = hatchet.workflow(
@sticky_workflow.task()
def step1a(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
return {"worker": ctx.worker.id()}
return {"worker": ctx.worker_id}
@sticky_workflow.task()
def step1b(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
return {"worker": ctx.worker.id()}
return {"worker": ctx.worker_id}
@@ -38,18 +37,19 @@ sticky_child_workflow = hatchet.workflow(
@sticky_workflow.task(parents=[step1a, step1b])
async def step2(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
ref = await sticky_child_workflow.aio_run_no_wait(
options=TriggerWorkflowOptions(sticky=True)
ref = await sticky_child_workflow.aio_run(
sticky=True,
wait_for_result=False,
)
await ref.aio_result()
return {"worker": ctx.worker.id()}
return {"worker": ctx.worker_id}
@sticky_child_workflow.task()
def child(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
return {"worker": ctx.worker.id()}
return {"worker": ctx.worker_id}
+1 -1
View File
@@ -6,7 +6,7 @@ from hatchet_sdk.clients.listeners.run_event_listener import StepRunEventType
async def main() -> None:
# > Consume
ref = await stream_task.aio_run_no_wait()
ref = await stream_task.aio_run(wait_for_result=False)
async for chunk in hatchet.runs.subscribe_to_stream(ref.workflow_run_id):
print(chunk, flush=True, end="")
+1 -1
View File
@@ -13,7 +13,7 @@ app = FastAPI()
@app.get("/stream")
async def stream() -> StreamingResponse:
ref = await stream_task.aio_run_no_wait()
ref = await stream_task.aio_run(wait_for_result=False)
return StreamingResponse(
hatchet.runs.subscribe_to_stream(ref.workflow_run_id), media_type="text/plain"
+1 -1
View File
@@ -3,7 +3,7 @@ from typing import Generator
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=False)
hatchet = Hatchet()
# > Streaming
+1 -1
View File
@@ -3,7 +3,7 @@ from datetime import timedelta
from hatchet_sdk import Context, EmptyModel, Hatchet, TaskDefaults
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > ScheduleTimeout
timeout_wf = hatchet.workflow(
+4 -2
View File
@@ -23,10 +23,12 @@ async def say_hello(input: HelloInput, ctx: Context) -> HelloOutput:
async def main() -> None:
# > Sync
ref = say_hello.run_no_wait(input=HelloInput(name="World"))
ref = say_hello.run(input=HelloInput(name="World"), wait_for_result=False)
# > Async
ref = await say_hello.aio_run_no_wait(input=HelloInput(name="Async World"))
ref = await say_hello.aio_run(
input=HelloInput(name="Async World"), wait_for_result=False
)
# > Result Sync
result = ref.result()
+1 -1
View File
@@ -4,7 +4,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, DefaultFilter, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class WebhookInputWithScope(BaseModel):
+1 -1
View File
@@ -2,7 +2,7 @@ from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
class WebhookInput(BaseModel):
+1 -1
View File
@@ -86,7 +86,7 @@ from examples.opentelemetry_instrumentation.worker import (
)
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
def main() -> None:
@@ -1,42 +0,0 @@
import asyncio
from contextlib import suppress
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
existing_loop_worker = hatchet.workflow(name="WorkerExistingLoopWorkflow")
@existing_loop_worker.task()
async def task(input: EmptyModel, ctx: Context) -> dict[str, str]:
print("started")
await asyncio.sleep(10)
print("finished")
return {"result": "returned result"}
async def async_main() -> None:
worker = None
try:
worker = hatchet.worker(
"test-worker", slots=1, workflows=[existing_loop_worker]
)
worker.start()
ref = existing_loop_worker.run_no_wait()
print(await ref.aio_result())
while True:
await asyncio.sleep(1)
finally:
if worker:
await worker.exit_gracefully()
def main() -> None:
with suppress(KeyboardInterrupt):
asyncio.run(async_main())
if __name__ == "__main__":
main()
@@ -2,7 +2,7 @@
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
wf_one = hatchet.workflow(name="wf_one")
wf_two = hatchet.workflow(name="wf_two")
@@ -10,6 +10,7 @@ from typing import Any
from hatchet_sdk import Context, Hatchet
# Create large payload (100KB)
def create_large_payload() -> dict[str, str]:
payload: dict[str, str] = {}
@@ -50,7 +51,7 @@ def emit_events(hatchet: Hatchet, total_events: int, events_per_second: int) ->
def main() -> None:
# Namespace is set via environment variable HATCHET_CLIENT_NAMESPACE
hatchet = Hatchet(debug=False)
hatchet = Hatchet()
# Get compression state from environment (default to 'enabled')
compression_state = os.getenv("COMPRESSION_STATE", "enabled")
@@ -98,7 +99,9 @@ def main() -> None:
os.kill(os.getpid(), signal.SIGTERM)
# Start timer to stop worker after test duration
stop_timer = threading.Timer(wait_time, lambda: os.kill(os.getpid(), signal.SIGTERM))
stop_timer = threading.Timer(
wait_time, lambda: os.kill(os.getpid(), signal.SIGTERM)
)
stop_timer.daemon = True
stop_timer.start()
+34
View File
@@ -5,6 +5,40 @@ All notable changes to Hatchet's Python SDK will be documented in this changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.31.0] - 2026-04-03
### Added
- Adds `wait_for_result` parameter to `run()`, `aio_run()`, `run_many()`, and `aio_run_many()` on both `Workflow` and `Standalone`. Passing `wait_for_result=False` replaces the `run_no_wait` / `aio_run_no_wait` / `run_many_no_wait` / `aio_run_many_no_wait` methods.
- Exports `WorkerLabel` from the top-level `hatchet_sdk` package, alongside the existing `DesiredWorkerLabel` and `WorkerLabelComparator`.
- Exports `Priority`, `ConcurrencyExpression`, `ConcurrencyLimitStrategy`, `RateLimit`, `RateLimitDuration`, `StickyStrategy`, `SlotType`, `BulkPushEventOptions`, `BulkPushEventWithMetadata`, `PushEventOptions`, and `WorkflowRunTriggerConfig` from the top-level `hatchet_sdk` package.
- Adds a `current_context` property to `Hatchet` as a replacement for the deprecated `get_current_context()` method.
### Changed
- `worker_labels` and `desired_worker_labels` are now stored internally as `list[WorkerLabel]` / `list[DesiredWorkerLabel]` and converted to the protobuf representation at the last moment, rather than eagerly at construction time.
- Adds top-level parameters to all of the `run`, `schedule`, etc. methods to pass options directly, instead of needing to import e.g. `TriggerWorkflowOptions` which wasn't very Pythonic.
### Deprecated
- `run_no_wait()`, `aio_run_no_wait()`, `run_many_no_wait()`, and `aio_run_many_no_wait()` are deprecated in favor of `run(wait_for_result=False)`, `aio_run(wait_for_result=False)`, `run_many(wait_for_result=False)`, and `aio_run_many(wait_for_result=False)` respectively.
- Passing duration parameters (e.g. `schedule_timeout`, `execution_timeout`) as strings is deprecated. Use `timedelta` objects instead.
- Non-async durable tasks are deprecated. Please convert durable task functions to async.
- Passing `desired_worker_labels` as a `dict` to task decorators (`@workflow.task`, `@hatchet.task`, etc.) is deprecated. Use a `list[DesiredWorkerLabel]` with the `key` field set instead.
- Passing `desired_worker_label` as a `dict` to `TriggerWorkflowOptions` is deprecated. Use a `list[DesiredWorkerLabel]` with the `key` field set instead.
- Passing `priority` as an `int` to task and workflow decorators is deprecated. Use `Priority.LOW`, `Priority.MEDIUM`, or `Priority.HIGH` instead.
- Passing `comparator` as an `int` to `DesiredWorkerLabel` is deprecated. Use `WorkerLabelComparator` enum values instead.
- The `debug` parameter on `Hatchet()` is deprecated. Set debug mode via the `HATCHET_CLIENT_DEBUG` environment variable instead.
- The `client` parameter on `Hatchet()` is deprecated and will be removed in v2.0.0.
- `Hatchet.get_current_context()` is deprecated. Use the `Hatchet.current_context` property instead.
- `Context.step_run_id` is deprecated. Use `Context.task_run_id` instead.
- `Context.workflow_input` and `Context.input` are deprecated. Use the input argument passed directly to the task function instead.
- `Context.aio_task_output()` is deprecated. Use `Context.task_output()` instead.
- `Context.done` is deprecated. Use `Context.is_cancelled` instead.
- `Context.fetch_task_run_error()` is deprecated. Use `Context.get_task_run_error()` instead.
- Deprecates a number of internal properties and methods on the `Worker` and `Context` that are not intended for public use. These will be removed in v2.0.0.
- Accessing `ctx.worker` is now deprecated. Use the various properties on the context directly, such as `ctx.worker_id` instead of `ctx.worker.id()`.
## [1.30.0] - 2026-03-30
### Changed
+1 -3
View File
@@ -14,9 +14,7 @@ from tests.worker_fixture import hatchet_worker
@pytest_asyncio.fixture(scope="session", loop_scope="session")
async def hatchet() -> AsyncGenerator[Hatchet, None]:
yield Hatchet(
debug=True,
)
yield Hatchet()
@pytest_asyncio.fixture(scope="session", loop_scope="session")
@@ -1,6 +1,3 @@
from examples.affinity_workers.worker import affinity_worker_workflow
from hatchet_sdk import TriggerWorkflowOptions
affinity_worker_workflow.run(
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
)
affinity_worker_workflow.run(additional_metadata={"hello": "moon"})
@@ -1,7 +1,7 @@
from hatchet_sdk import Context, EmptyModel, Hatchet, WorkerLabelComparator
from hatchet_sdk import Context, EmptyModel, Hatchet, WorkerLabel, WorkerLabelComparator
from hatchet_sdk.labels import DesiredWorkerLabel
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# > AffinityWorkflow
@@ -10,14 +10,15 @@ affinity_worker_workflow = hatchet.workflow(name="AffinityWorkflow")
@affinity_worker_workflow.task(
desired_worker_labels={
"model": DesiredWorkerLabel(value="fancy-ai-model-v2", weight=10),
"memory": DesiredWorkerLabel(
desired_worker_labels=[
DesiredWorkerLabel(key="model", value="fancy-ai-model-v2", weight=10),
DesiredWorkerLabel(
key="memory",
value=256,
required=True,
comparator=WorkerLabelComparator.LESS_THAN,
),
},
],
)
# !!
@@ -25,12 +26,12 @@ affinity_worker_workflow = hatchet.workflow(name="AffinityWorkflow")
# > AffinityTask
async def step(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
if ctx.worker.labels().get("model") != "fancy-ai-model-v2":
if ctx.worker_labels.get("model") != "fancy-ai-model-v2":
ctx.worker.upsert_labels({"model": "unset"})
# DO WORK TO EVICT OLD MODEL / LOAD NEW MODEL
ctx.worker.upsert_labels({"model": "fancy-ai-model-v2"})
return {"worker": ctx.worker.id()}
return {"worker": ctx.worker_id}
# !!
+1 -1
View File
@@ -1,6 +1,6 @@
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
def main() -> None:
+1 -1
View File
@@ -2,7 +2,7 @@ import asyncio
from hatchet_sdk import Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
async def main() -> None:
@@ -7,15 +7,15 @@ from examples.blocked_async.blocking_example_worker import (
non_blocking_sync,
)
non_blocking_sync.run_no_wait()
non_blocking_async.run_no_wait()
non_blocking_sync.run(wait_for_result=False)
non_blocking_async.run(wait_for_result=False)
time.sleep(1)
blocking.run_no_wait()
blocking.run(wait_for_result=False)
time.sleep(1)
non_blocking_sync.run_no_wait()
non_blocking_sync.run(wait_for_result=False)
# !!
@@ -1,6 +1,3 @@
from examples.blocked_async.worker import blocked_worker_workflow
from hatchet_sdk import TriggerWorkflowOptions
blocked_worker_workflow.run(
options=TriggerWorkflowOptions(additional_metadata={"hello": "moon"}),
)
blocked_worker_workflow.run(additional_metadata={"hello": "moon"})
+1 -1
View File
@@ -4,7 +4,7 @@ from datetime import timedelta
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
hatchet = Hatchet()
# WARNING: this is an example of what NOT to do
# This workflow is intentionally blocking the main thread
@@ -10,7 +10,7 @@ from examples.bug_tests.payload_bug_on_replay.worker import (
step1,
step2,
)
from hatchet_sdk import EmptyModel, Hatchet, TriggerWorkflowOptions, V1TaskStatus
from hatchet_sdk import EmptyModel, Hatchet, V1TaskStatus
@pytest.mark.asyncio(loop_scope="session")
@@ -23,11 +23,10 @@ async def test_payload_replay_bug(hatchet: Hatchet) -> None:
test_run_id = str(uuid4())
ref = await payload_initial_cancel_bug_workflow.aio_run_no_wait(
ref = await payload_initial_cancel_bug_workflow.aio_run(
input=Input(random_number=42),
options=TriggerWorkflowOptions(
additional_metadata={"test_run_id": test_run_id}
),
additional_metadata={"test_run_id": test_run_id},
wait_for_result=False,
)
result = await ref.aio_result()

Some files were not shown because too many files have changed in this diff Show More