[Python]: Fixing logging bugs, fixing duped sleep key bug (#2040)

* feat: add flag to disable log capture

* fix: sleep bug with duped key

* fix: allow formatters to be passed through

* feat: support filters too

* fix: cruft

* chore: gen

* feat: changelog

* fix: lint

* [Python] Fix: Don't retry gRPC requests on 4xx (#2024)

* fix: dont retry on 4xx

* chore: ver

* fix: sleep conditions with index

* fix: bug in sleep conditions

* chore: gen
This commit is contained in:
matt
2025-07-23 15:15:31 -04:00
committed by GitHub
parent 5e2037c7ff
commit cca0999eea
30 changed files with 321 additions and 75 deletions
+30 -1
View File
@@ -2,7 +2,12 @@ import asyncio
import pytest
from examples.durable.worker import EVENT_KEY, SLEEP_TIME, durable_workflow
from examples.durable.worker import (
EVENT_KEY,
SLEEP_TIME,
durable_workflow,
wait_for_sleep_twice,
)
from hatchet_sdk import Hatchet
@@ -43,3 +48,27 @@ async def test_durable(hatchet: Hatchet) -> None:
assert wait_group_1["key"] == "CREATE"
assert "sleep" in wait_group_1["event_id"]
assert "event" in wait_group_2["event_id"]
wait_for_multi_sleep = result["wait_for_multi_sleep"]
assert wait_for_multi_sleep["runtime"] > 3 * SLEEP_TIME
@pytest.mark.asyncio(loop_scope="session")
async def test_durable_sleep_cancel_replay(hatchet: Hatchet) -> None:
first_sleep = await wait_for_sleep_twice.aio_run_no_wait()
await asyncio.sleep(SLEEP_TIME / 2)
await hatchet.runs.aio_cancel(first_sleep.workflow_run_id)
await first_sleep.aio_result()
await hatchet.runs.aio_replay(
first_sleep.workflow_run_id,
)
second_sleep_result = await first_sleep.aio_result()
"""We've already slept for a little bit by the time the task is cancelled"""
assert second_sleep_result["runtime"] < SLEEP_TIME
+37 -1
View File
@@ -1,3 +1,4 @@
import asyncio
import time
from datetime import timedelta
from uuid import uuid4
@@ -105,14 +106,49 @@ async def wait_for_or_group_2(
}
@durable_workflow.durable_task()
async def wait_for_multi_sleep(
_i: EmptyModel, ctx: DurableContext
) -> dict[str, str | int]:
start = time.time()
for _ in range(3):
await ctx.aio_sleep_for(
timedelta(seconds=SLEEP_TIME),
)
return {
"runtime": int(time.time() - start),
}
@ephemeral_workflow.task()
def ephemeral_task_2(input: EmptyModel, ctx: Context) -> None:
print("Running non-durable task")
@hatchet.durable_task()
async def wait_for_sleep_twice(
input: EmptyModel, ctx: DurableContext
) -> dict[str, int]:
try:
start = time.time()
await ctx.aio_sleep_for(
timedelta(seconds=SLEEP_TIME),
)
return {
"runtime": int(time.time() - start),
}
except asyncio.CancelledError:
return {"runtime": -1}
def main() -> None:
worker = hatchet.worker(
"durable-worker", workflows=[durable_workflow, ephemeral_workflow]
"durable-worker",
workflows=[durable_workflow, ephemeral_workflow, wait_for_sleep_twice],
)
worker.start()
-2
View File
@@ -62,8 +62,6 @@ async def process2(input: ChildInput, ctx: Context) -> dict[str, str]:
# !!
child_wf.create_bulk_run_item()
def main() -> None:
worker = hatchet.worker("fanout-worker", slots=40, workflows=[parent_wf, child_wf])
+2 -1
View File
@@ -15,7 +15,7 @@ from examples.concurrency_workflow_level.worker import (
from examples.conditions.worker import task_condition_workflow
from examples.dag.worker import dag_workflow
from examples.dedupe.worker import dedupe_child_wf, dedupe_parent_wf
from examples.durable.worker import durable_workflow
from examples.durable.worker import durable_workflow, wait_for_sleep_twice
from examples.events.worker import event_workflow
from examples.fanout.worker import child_wf, parent_wf
from examples.fanout_sync.worker import sync_fanout_child, sync_fanout_parent
@@ -67,6 +67,7 @@ def main() -> None:
bulk_replay_test_2,
bulk_replay_test_3,
return_exceptions_task,
wait_for_sleep_twice,
],
lifespan=lifespan,
)