Fe overhaul docs (#1640)

* api changes

* doc changes

* move docs

* generated

* generate

* pkg

* backmerge main

* revert to main

* revert main

* race?

* remove go tests
This commit is contained in:
Gabe Ruttner
2025-04-30 17:10:09 -04:00
committed by GitHub
parent 799b5d0dcf
commit 8e80faf2d6
1503 changed files with 36645 additions and 1235 deletions

View File

@@ -4,7 +4,7 @@ from hatchet_sdk.labels import DesiredWorkerLabel
hatchet = Hatchet(debug=True)
# AffinityWorkflow
# > AffinityWorkflow
affinity_worker_workflow = hatchet.workflow(name="AffinityWorkflow")
@@ -20,10 +20,10 @@ affinity_worker_workflow = hatchet.workflow(name="AffinityWorkflow")
},
)
# ‼️
# !!
# AffinityTask
# > AffinityTask
async def step(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
if ctx.worker.labels().get("model") != "fancy-ai-model-v2":
ctx.worker.upsert_labels({"model": "unset"})
@@ -33,12 +33,12 @@ async def step(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
return {"worker": ctx.worker.id()}
# ‼️
# !!
def main() -> None:
# AffinityWorker
# > AffinityWorker
worker = hatchet.worker(
"affinity-worker",
slots=10,
@@ -51,7 +51,7 @@ def main() -> None:
worker.start()
# ‼️
# !!
if __name__ == "__main__":
main()

View File

@@ -21,7 +21,7 @@ bulk_parent_wf = hatchet.workflow(name="BulkFanoutParent", input_validator=Paren
bulk_child_wf = hatchet.workflow(name="BulkFanoutChild", input_validator=ChildInput)
# BulkFanoutParent
# > BulkFanoutParent
@bulk_parent_wf.task(execution_timeout=timedelta(minutes=5))
async def spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, Any]]]:
# 👀 Create each workflow run to spawn
@@ -40,7 +40,7 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, list[dict[str, An
return {"results": spawn_results}
# ‼️
# !!
@bulk_child_wf.task()

View File

@@ -1,4 +1,4 @@
# Setup
# > Setup
from datetime import datetime, timedelta
@@ -14,11 +14,11 @@ workflow = workflows.rows[0]
# !!
# List runs
# > List runs
workflow_runs = hatchet.runs.list(workflow_ids=[workflow.metadata.id])
# !!
# Cancel by run ids
# > Cancel by run ids
workflow_run_ids = [workflow_run.metadata.id for workflow_run in workflow_runs.rows]
bulk_cancel_by_ids = BulkCancelReplayOpts(ids=workflow_run_ids)
@@ -26,7 +26,7 @@ bulk_cancel_by_ids = BulkCancelReplayOpts(ids=workflow_run_ids)
hatchet.runs.bulk_cancel(bulk_cancel_by_ids)
# !!
# Cancel by filters
# > Cancel by filters
bulk_cancel_by_filters = BulkCancelReplayOpts(
filters=RunFilter(

View File

@@ -1,4 +1,4 @@
# Setup
# > Setup
from datetime import datetime, timedelta
@@ -14,11 +14,11 @@ workflow = workflows.rows[0]
# !!
# List runs
# > List runs
workflow_runs = hatchet.runs.list(workflow_ids=[workflow.metadata.id])
# !!
# Replay by run ids
# > Replay by run ids
workflow_run_ids = [workflow_run.metadata.id for workflow_run in workflow_runs.rows]
bulk_replay_by_ids = BulkCancelReplayOpts(ids=workflow_run_ids)
@@ -26,7 +26,7 @@ bulk_replay_by_ids = BulkCancelReplayOpts(ids=workflow_run_ids)
hatchet.runs.bulk_replay(bulk_replay_by_ids)
# !!
# Replay by filters
# > Replay by filters
bulk_replay_by_filters = BulkCancelReplayOpts(
filters=RunFilter(
since=datetime.today() - timedelta(days=1),

View File

@@ -8,7 +8,7 @@ hatchet = Hatchet(debug=True)
cancellation_workflow = hatchet.workflow(name="CancelWorkflow")
# Self-cancelling task
# > Self-cancelling task
@cancellation_workflow.task()
async def self_cancel(input: EmptyModel, ctx: Context) -> dict[str, str]:
await asyncio.sleep(2)
@@ -24,7 +24,7 @@ async def self_cancel(input: EmptyModel, ctx: Context) -> dict[str, str]:
# !!
# Checking exit flag
# > Checking exit flag
@cancellation_workflow.task()
def check_flag(input: EmptyModel, ctx: Context) -> dict[str, str]:
for i in range(3):

View File

@@ -1,6 +1,6 @@
import asyncio
# Running a Task
# > Running a Task
from examples.child.worker import SimpleInput, child_task
child_task.run(SimpleInput(message="Hello, World!"))
@@ -8,7 +8,7 @@ child_task.run(SimpleInput(message="Hello, World!"))
async def main() -> None:
# Bulk Run a Task
# > Bulk Run a Task
greetings = ["Hello, World!", "Hello, Moon!", "Hello, Mars!"]
results = await child_task.aio_run_many(
@@ -25,7 +25,7 @@ async def main() -> None:
print(results)
# !!
# Running Multiple Tasks
# > Running Multiple Tasks
result1 = child_task.aio_run(SimpleInput(message="Hello, World!"))
result2 = child_task.aio_run(SimpleInput(message="Hello, Moon!"))

View File

@@ -8,7 +8,7 @@ from hatchet_sdk.runnables.types import EmptyModel
hatchet = Hatchet(debug=True)
# Running a Task from within a Task
# > Running a Task from within a Task
@hatchet.task(name="SpawnTask")
async def spawn(input: EmptyModel, ctx: Context) -> dict[str, Any]:
# Simply run the task with the input we received
@@ -19,4 +19,4 @@ async def spawn(input: EmptyModel, ctx: Context) -> dict[str, Any]:
return {"results": result}
# ‼️
# !!

View File

@@ -2,13 +2,13 @@
import asyncio
# Running a Task
# > Running a Task
from examples.child.worker import SimpleInput, child_task
child_task.run(SimpleInput(message="Hello, World!"))
# !!
# Schedule a Task
# > Schedule a Task
from datetime import datetime, timedelta
child_task.schedule(
@@ -18,13 +18,13 @@ child_task.schedule(
async def main() -> None:
# Running a Task AIO
# > Running a Task AIO
result = await child_task.aio_run(SimpleInput(message="Hello, World!"))
# !!
print(result)
# Running Multiple Tasks
# > Running Multiple Tasks
result1 = child_task.aio_run(SimpleInput(message="Hello, World!"))
result2 = child_task.aio_run(SimpleInput(message="Hello, Moon!"))

View File

@@ -1,4 +1,4 @@
# Simple
# > Simple
from pydantic import BaseModel
@@ -24,7 +24,7 @@ def step1(input: SimpleInput, ctx: Context) -> SimpleOutput:
return SimpleOutput(transformed_message=input.message.upper())
# ‼️
# !!
def main() -> None:

View File

@@ -13,7 +13,7 @@ from hatchet_sdk import (
hatchet = Hatchet(debug=True)
# Workflow
# > Workflow
class WorkflowInput(BaseModel):
run: int
group_key: str
@@ -29,7 +29,7 @@ concurrency_limit_workflow = hatchet.workflow(
input_validator=WorkflowInput,
)
# ‼️
# !!
@concurrency_limit_workflow.task()

View File

@@ -12,7 +12,7 @@ from hatchet_sdk import (
hatchet = Hatchet(debug=True)
# Concurrency Strategy With Key
# > Concurrency Strategy With Key
class WorkflowInput(BaseModel):
group: str
@@ -26,7 +26,7 @@ concurrency_limit_rr_workflow = hatchet.workflow(
),
input_validator=WorkflowInput,
)
# ‼️
# !!
@concurrency_limit_rr_workflow.task()

View File

@@ -16,7 +16,7 @@ DIGIT_MAX_RUNS = 8
NAME_MAX_RUNS = 3
# Concurrency Strategy With Key
# > Concurrency Strategy With Key
class WorkflowInput(BaseModel):
name: str
digit: str
@@ -26,7 +26,7 @@ concurrency_multiple_keys_workflow = hatchet.workflow(
name="ConcurrencyWorkflowManyKeys",
input_validator=WorkflowInput,
)
# ‼️
# !!
@concurrency_multiple_keys_workflow.task(

View File

@@ -16,7 +16,7 @@ DIGIT_MAX_RUNS = 8
NAME_MAX_RUNS = 3
# Multiple Concurrency Keys
# > Multiple Concurrency Keys
class WorkflowInput(BaseModel):
name: str
digit: str
@@ -38,7 +38,7 @@ concurrency_workflow_level_workflow = hatchet.workflow(
),
],
)
# ‼️
# !!
@concurrency_workflow_level_workflow.task()

View File

@@ -14,7 +14,7 @@ async def create_cron() -> None:
name="CronWorkflow", input_validator=DynamicCronInput
)
# Create
# > Create
cron_trigger = await dynamic_cron_workflow.aio_create_cron(
cron_name="customer-a-daily-report",
expression="0 12 * * *",
@@ -27,14 +27,14 @@ async def create_cron() -> None:
cron_trigger.metadata.id # the id of the cron trigger
# !!
# List
# > List
await hatchet.cron.aio_list()
# !!
# Get
# > Get
cron_trigger = await hatchet.cron.aio_get(cron_id=cron_trigger.metadata.id)
# !!
# Delete
# > Delete
await hatchet.cron.aio_delete(cron_id=cron_trigger.metadata.id)
# !!

View File

@@ -13,7 +13,7 @@ dynamic_cron_workflow = hatchet.workflow(
name="CronWorkflow", input_validator=DynamicCronInput
)
# Create
# > Create
cron_trigger = dynamic_cron_workflow.create_cron(
cron_name="customer-a-daily-report",
expression="0 12 * * *",
@@ -27,14 +27,14 @@ cron_trigger = dynamic_cron_workflow.create_cron(
id = cron_trigger.metadata.id # the id of the cron trigger
# !!
# List
# > List
cron_triggers = hatchet.cron.list()
# !!
# Get
# > Get
cron_trigger = hatchet.cron.get(cron_id=cron_trigger.metadata.id)
# !!
# Delete
# > Delete
hatchet.cron.delete(cron_id=cron_trigger.metadata.id)
# !!

View File

@@ -3,7 +3,7 @@ from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
# Workflow Definition Cron Trigger
# > Workflow Definition Cron Trigger
# Adding a cron trigger to a workflow is as simple
# as adding a `cron expression` to the `on_cron`
# prop of the workflow definition

View File

@@ -4,7 +4,7 @@ from hatchet_sdk import Context, DurableContext, EmptyModel, Hatchet, UserEventC
hatchet = Hatchet(debug=True)
# Create a durable workflow
# > Create a durable workflow
durable_workflow = hatchet.workflow(name="DurableWorkflow")
# !!
@@ -12,7 +12,7 @@ durable_workflow = hatchet.workflow(name="DurableWorkflow")
ephemeral_workflow = hatchet.workflow(name="EphemeralWorkflow")
# Add durable task
# > Add durable task
EVENT_KEY = "durable-example:event"
SLEEP_TIME = 5

View File

@@ -5,7 +5,7 @@ hatchet = Hatchet(debug=True)
EVENT_KEY = "user:update"
# Durable Event
# > Durable Event
@hatchet.durable_task(name="DurableEventTask")
async def durable_event_task(input: EmptyModel, ctx: DurableContext) -> None:
res = await ctx.aio_wait_for(
@@ -23,7 +23,7 @@ async def durable_event_task(input: EmptyModel, ctx: DurableContext) -> None:
async def durable_event_task_with_filter(
input: EmptyModel, ctx: DurableContext
) -> None:
# Durable Event With Filter
# > Durable Event With Filter
res = await ctx.aio_wait_for(
"event",
UserEventCondition(

View File

@@ -5,7 +5,7 @@ from hatchet_sdk import DurableContext, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
# Durable Sleep
# > Durable Sleep
@hatchet.durable_task(name="DurableSleepTask")
async def durable_sleep_task(input: EmptyModel, ctx: DurableContext) -> None:
res = await ctx.aio_sleep_for(timedelta(seconds=5))

View File

@@ -2,6 +2,6 @@ from hatchet_sdk import Hatchet
hatchet = Hatchet()
# Event trigger
# > Event trigger
hatchet.event.push("user:create", {})
# ‼️
# !!

View File

@@ -2,9 +2,9 @@ from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet()
# Event trigger
# > Event trigger
event_workflow = hatchet.workflow(name="EventWorkflow", on_events=["user:create"])
# ‼️
# !!
@event_workflow.task()

View File

@@ -8,7 +8,7 @@ from hatchet_sdk import Context, Hatchet, TriggerWorkflowOptions
hatchet = Hatchet(debug=True)
# FanoutParent
# > FanoutParent
class ParentInput(BaseModel):
n: int = 100
@@ -42,10 +42,10 @@ async def spawn(input: ParentInput, ctx: Context) -> dict[str, Any]:
return {"results": result}
# ‼️
# !!
# FanoutChild
# > FanoutChild
@child_wf.task()
def process(input: ChildInput, ctx: Context) -> dict[str, str]:
print(f"child process {input.a}")
@@ -60,7 +60,7 @@ def process2(input: ChildInput, ctx: Context) -> dict[str, str]:
return {"status2": a + "2"}
# ‼️
# !!
child_wf.create_bulk_run_item()

View File

@@ -0,0 +1,4 @@
from hatchet_sdk import Hatchet
# Initialize Hatchet client
hatchet = Hatchet()

View File

@@ -1,4 +1,4 @@
# Lifespan
# > Lifespan
from typing import AsyncGenerator, cast
@@ -30,7 +30,7 @@ def main() -> None:
worker.start()
# ‼️
# !!
if __name__ == "__main__":
main()

View File

@@ -9,7 +9,7 @@ from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
# Use the lifespan in a task
# > Use the lifespan in a task
class TaskOutput(BaseModel):
num_rows: int
external_ids: list[UUID]
@@ -59,7 +59,7 @@ async def async_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:
)
# Define a lifespan
# > Define a lifespan
class Lifespan(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)

View File

@@ -1,4 +1,4 @@
# RootLogger
# > RootLogger
import logging
@@ -16,4 +16,4 @@ hatchet = Hatchet(
),
)
# ‼️
# !!

View File

@@ -1,4 +1,4 @@
# LoggingWorkflow
# > LoggingWorkflow
import logging
import time
@@ -24,9 +24,9 @@ def root_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!
# ContextLogger
# > ContextLogger
@logging_workflow.task()
@@ -40,4 +40,4 @@ def context_logger(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!

View File

@@ -4,7 +4,7 @@ from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet()
# SlotRelease
# > SlotRelease
slot_release_workflow = hatchet.workflow(name="SlotReleaseWorkflow")
@@ -21,4 +21,4 @@ def step1(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!

View File

@@ -15,7 +15,7 @@ async def process_image(image_url: str, filters: List[str]) -> Dict[str, Any]:
return {"url": image_url, "size": 100, "format": "png"}
# Before (Mergent)
# > Before (Mergent)
async def process_image_task(request: Any) -> Dict[str, Any]:
image_url = request.json["image_url"]
filters = request.json["filters"]
@@ -30,7 +30,7 @@ async def process_image_task(request: Any) -> Dict[str, Any]:
# !!
# After (Hatchet)
# > After (Hatchet)
class ImageProcessInput(BaseModel):
image_url: str
filters: List[str]
@@ -68,7 +68,7 @@ async def image_processor(input: ImageProcessInput, ctx: Context) -> ImageProces
async def run() -> None:
# Running a task (Mergent)
# > Running a task (Mergent)
headers: Mapping[str, str] = {
"Authorization": "Bearer <token>",
"Content-Type": "application/json",
@@ -98,7 +98,7 @@ async def run() -> None:
print(f"Error: {e}")
# !!
# Running a task (Hatchet)
# > Running a task (Hatchet)
result = await image_processor.aio_run(
ImageProcessInput(image_url="https://example.com/image.png", filters=["blur"])
)
@@ -109,7 +109,7 @@ async def run() -> None:
async def schedule() -> None:
# Scheduling tasks (Mergent)
# > Scheduling tasks (Mergent)
options = {
# same options as before
"json": {
@@ -121,7 +121,7 @@ async def schedule() -> None:
print(options)
# Scheduling tasks (Hatchet)
# > Scheduling tasks (Hatchet)
# Schedule the task to run at a specific time
run_at = datetime.now() + timedelta(days=1)
await image_processor.aio_schedule(

View File

@@ -6,7 +6,7 @@ hatchet = Hatchet(debug=True)
non_retryable_workflow = hatchet.workflow(name="NonRetryableWorkflow")
# Non-retryable task
# > Non-retryable task
@non_retryable_workflow.task(retries=1)
def should_not_retry(input: EmptyModel, ctx: Context) -> None:
raise NonRetryableException("This task should not retry")

View File

@@ -7,7 +7,7 @@ hatchet = Hatchet(debug=True)
ERROR_TEXT = "step1 failed"
# OnFailure Step
# > OnFailure Step
# This workflow will fail because the step will throw an error
# we define an onFailure step to handle this case
@@ -32,10 +32,10 @@ def on_failure(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!
# OnFailure With Details
# > OnFailure With Details
# We can access the failure details in the onFailure step
# via the context method
@@ -62,7 +62,7 @@ def details_on_failure(input: EmptyModel, ctx: Context) -> dict[str, str]:
raise Exception("unexpected failure")
# ‼️
# !!
def main() -> None:

View File

@@ -5,7 +5,7 @@ from hatchet_sdk import ScheduleTriggerWorkflowOptions, TriggerWorkflowOptions
priority_workflow.run_no_wait()
# Runtime priority
# > Runtime priority
low_prio = priority_workflow.run_no_wait(
options=TriggerWorkflowOptions(
## 👀 Adding priority and key to metadata to show them in the dashboard
@@ -23,7 +23,7 @@ high_prio = priority_workflow.run_no_wait(
)
# !!
# Scheduled priority
# > Scheduled priority
schedule = priority_workflow.schedule(
run_at=datetime.now() + timedelta(minutes=1),
options=ScheduleTriggerWorkflowOptions(priority=3),
@@ -36,7 +36,7 @@ cron = priority_workflow.create_cron(
)
# !!
# Default priority
# > Default priority
low_prio = priority_workflow.run_no_wait(
options=TriggerWorkflowOptions(
## 👀 Adding priority and key to metadata to show them in the dashboard

View File

@@ -10,7 +10,7 @@ from hatchet_sdk import (
hatchet = Hatchet(debug=True)
# Default priority
# > Default priority
DEFAULT_PRIORITY = 1
SLEEP_TIME = 0.25

View File

@@ -0,0 +1,24 @@
certs/
# Environments
.env
env/
venv/
.venv/
__pycache__/
*.py[cod]
*$py.class
.Python
.pytest_cache/
.coverage
htmlcov/
# Distribution / packaging
dist/
build/
*.egg-info/
*.egg
.DS_Store
index/index.json

View File

@@ -0,0 +1,49 @@
## Hatchet Python Quickstart
This is an example project demonstrating how to use Hatchet with Python. For detailed setup instructions, see the [Hatchet Setup Guide](https://docs.hatchet.run/home/setup).
## Prerequisites
Before running this project, make sure you have the following:
1. [Python v3.10 or higher](https://www.python.org/downloads/)
2. [Poetry](https://python-poetry.org/docs/#installation) for dependency management
## Setup
1. Clone the repository:
```bash
git clone https://github.com/hatchet-dev/hatchet-python-quickstart.git
cd hatchet-python-quickstart
```
2. Set the required environment variable `HATCHET_CLIENT_TOKEN` created in the [Getting Started Guide](https://docs.hatchet.run/home/hatchet-cloud-quickstart).
```bash
export HATCHET_CLIENT_TOKEN=<token>
```
> Note: If you're self hosting you may need to set `HATCHET_CLIENT_TLS_STRATEGY=none` to disable TLS
3. Install the project dependencies:
```bash
poetry install
```
### Running an example
1. Start a Hatchet worker by running the following command:
```shell
poetry run python src/worker.py
```
2. To run the example workflow, open a new terminal and run the following command:
```shell
poetry run python src/run.py
```
This will trigger the workflow on the worker running in the first terminal and print the output to the the second terminal.

View File

@@ -0,0 +1,4 @@
from hatchet_sdk import Hatchet
# Initialize Hatchet client
hatchet = Hatchet()

1255
sdks/python/examples/quickstart/poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
[tool.poetry]
name = "hatchet-python-quickstart"
version = "0.1.0"
description = "Simple Setup to Run Hatchet Workflows"
authors = ["gabriel ruttner <gabe@hatchet.run>"]
readme = "README.md"
package-mode = false
[tool.poetry.dependencies]
python = "^3.10"
hatchet-sdk = "1.0.0a1"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
simple = "src.run:main"
worker = "src.worker:main"

View File

@@ -0,0 +1,16 @@
import asyncio
from .workflows.first_task import SimpleInput, first_task
async def main() -> None:
result = await first_task.aio_run(SimpleInput(message="Hello World!"))
print(
"Finished running task, and got the transformed message! The transformed message is:",
result.transformed_message,
)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,15 @@
from .hatchet_client import hatchet
from .workflows.first_task import first_task
def main() -> None:
worker = hatchet.worker(
"first-worker",
slots=10,
workflows=[first_task],
)
worker.start()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,21 @@
from pydantic import BaseModel
from hatchet_sdk import Context
from ..hatchet_client import hatchet
class SimpleInput(BaseModel):
message: str
class SimpleOutput(BaseModel):
transformed_message: str
# Declare the task to run
@hatchet.task(name="first-task", input_validator=SimpleInput)
def first_task(input: SimpleInput, ctx: Context) -> SimpleOutput:
print("first-task task called")
return SimpleOutput(transformed_message=input.message.lower())

View File

@@ -6,7 +6,7 @@ from hatchet_sdk.rate_limit import RateLimit, RateLimitDuration
hatchet = Hatchet(debug=True)
# Workflow
# > Workflow
class RateLimitInput(BaseModel):
user_id: str
@@ -18,7 +18,7 @@ rate_limit_workflow = hatchet.workflow(
# !!
# Static
# > Static
RATE_LIMIT_KEY = "test-limit"
@@ -29,7 +29,7 @@ def step_1(input: RateLimitInput, ctx: Context) -> None:
# !!
# Dynamic
# > Dynamic
@rate_limit_workflow.task(

View File

@@ -6,16 +6,16 @@ simple_workflow = hatchet.workflow(name="SimpleRetryWorkflow")
backoff_workflow = hatchet.workflow(name="BackoffWorkflow")
# Simple Step Retries
# > Simple Step Retries
@simple_workflow.task(retries=3)
def always_fail(input: EmptyModel, ctx: Context) -> dict[str, str]:
raise Exception("simple task failed")
# ‼️
# !!
# Retries with Count
# > Retries with Count
@simple_workflow.task(retries=3)
def fail_twice(input: EmptyModel, ctx: Context) -> dict[str, str]:
if ctx.retry_count < 2:
@@ -24,10 +24,10 @@ def fail_twice(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!
# Retries with Backoff
# > Retries with Backoff
@backoff_workflow.task(
retries=10,
# 👀 Maximum number of seconds to wait between retries
@@ -43,7 +43,7 @@ def backoff_task(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!
def main() -> None:

View File

@@ -6,7 +6,7 @@ hatchet = Hatchet()
async def create_scheduled() -> None:
# Create
# > Create
scheduled_run = await hatchet.scheduled.aio_create(
workflow_name="simple-workflow",
trigger_at=datetime.now() + timedelta(seconds=10),
@@ -21,15 +21,15 @@ async def create_scheduled() -> None:
scheduled_run.metadata.id # the id of the scheduled run trigger
# !!
# Delete
# > Delete
await hatchet.scheduled.aio_delete(scheduled_id=scheduled_run.metadata.id)
# !!
# List
# > List
await hatchet.scheduled.aio_list()
# !!
# Get
# > Get
scheduled_run = await hatchet.scheduled.aio_get(
scheduled_id=scheduled_run.metadata.id
)

View File

@@ -4,7 +4,7 @@ from hatchet_sdk import Hatchet
hatchet = Hatchet()
# Create
# > Create
scheduled_run = hatchet.scheduled.create(
workflow_name="simple-workflow",
trigger_at=datetime.now() + timedelta(seconds=10),
@@ -19,14 +19,14 @@ scheduled_run = hatchet.scheduled.create(
id = scheduled_run.metadata.id # the id of the scheduled run trigger
# !!
# Delete
# > Delete
hatchet.scheduled.delete(scheduled_id=scheduled_run.metadata.id)
# !!
# List
# > List
scheduled_runs = hatchet.scheduled.list()
# !!
# Get
# > Get
scheduled_run = hatchet.scheduled.get(scheduled_id=scheduled_run.metadata.id)
# !!

View File

@@ -1,4 +1,4 @@
# Simple
# > Simple
from hatchet_sdk import Context, EmptyModel, Hatchet
@@ -15,7 +15,7 @@ def main() -> None:
worker.start()
# ‼️
# !!
if __name__ == "__main__":
main()

View File

@@ -8,7 +8,7 @@ from hatchet_sdk import (
hatchet = Hatchet(debug=True)
# StickyWorker
# > StickyWorker
sticky_workflow = hatchet.workflow(
@@ -28,9 +28,9 @@ def step1b(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
return {"worker": ctx.worker.id()}
# ‼️
# !!
# StickyChild
# > StickyChild
sticky_child_workflow = hatchet.workflow(
name="StickyChildWorkflow", sticky=StickyStrategy.SOFT
@@ -53,7 +53,7 @@ def child(input: EmptyModel, ctx: Context) -> dict[str, str | None]:
return {"worker": ctx.worker.id()}
# ‼️
# !!
def main() -> None:

View File

@@ -4,7 +4,7 @@ from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet(debug=True)
# Streaming
# > Streaming
streaming_workflow = hatchet.workflow(name="StreamingWorkflow")
@@ -21,7 +21,7 @@ def main() -> None:
worker.start()
# ‼️
# !!
if __name__ == "__main__":
main()

View File

@@ -5,15 +5,15 @@ from hatchet_sdk import Context, EmptyModel, Hatchet, TaskDefaults
hatchet = Hatchet(debug=True)
# ScheduleTimeout
# > ScheduleTimeout
timeout_wf = hatchet.workflow(
name="TimeoutWorkflow",
task_defaults=TaskDefaults(execution_timeout=timedelta(minutes=2)),
)
# ‼️
# !!
# ExecutionTimeout
# > ExecutionTimeout
# 👀 Specify an execution timeout on a task
@timeout_wf.task(
execution_timeout=timedelta(seconds=4), schedule_timeout=timedelta(minutes=10)
@@ -23,12 +23,12 @@ def timeout_task(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!
refresh_timeout_wf = hatchet.workflow(name="RefreshTimeoutWorkflow")
# RefreshTimeout
# > RefreshTimeout
@refresh_timeout_wf.task(execution_timeout=timedelta(seconds=4))
def refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:
@@ -38,7 +38,7 @@ def refresh_task(input: EmptyModel, ctx: Context) -> dict[str, str]:
return {"status": "success"}
# ‼️
# !!
def main() -> None:

View File

@@ -1,4 +1,4 @@
# Create a workflow
# > Create a workflow
import random
from datetime import timedelta
@@ -31,7 +31,7 @@ task_condition_workflow = hatchet.workflow(name="TaskConditionWorkflow")
# !!
# Add base task
# > Add base task
@task_condition_workflow.task()
def start(input: EmptyModel, ctx: Context) -> StepOutput:
return StepOutput(random_number=random.randint(1, 100))
@@ -40,7 +40,7 @@ def start(input: EmptyModel, ctx: Context) -> StepOutput:
# !!
# Add wait for sleep
# > Add wait for sleep
@task_condition_workflow.task(
parents=[start], wait_for=[SleepCondition(timedelta(seconds=10))]
)
@@ -51,7 +51,7 @@ def wait_for_sleep(input: EmptyModel, ctx: Context) -> StepOutput:
# !!
# Add skip on event
# > Add skip on event
@task_condition_workflow.task(
parents=[start],
wait_for=[SleepCondition(timedelta(seconds=30))],
@@ -64,7 +64,7 @@ def skip_on_event(input: EmptyModel, ctx: Context) -> StepOutput:
# !!
# Add branching
# > Add branching
@task_condition_workflow.task(
parents=[wait_for_sleep],
skip_if=[
@@ -94,7 +94,7 @@ def right_branch(input: EmptyModel, ctx: Context) -> StepOutput:
# !!
# Add wait for event
# > Add wait for event
@task_condition_workflow.task(
parents=[start],
wait_for=[
@@ -111,7 +111,7 @@ def wait_for_event(input: EmptyModel, ctx: Context) -> StepOutput:
# !!
# Add sum
# > Add sum
@task_condition_workflow.task(
parents=[
start,

View File

@@ -1,4 +1,4 @@
# WorkflowRegistration
# > WorkflowRegistration
from hatchet_sdk import Hatchet
@@ -26,7 +26,7 @@ def main() -> None:
worker.start()
# ‼️
# !!
if __name__ == "__main__":
main()