mirror of
https://github.com/hatchet-dev/hatchet.git
synced 2026-05-12 13:18:43 -05:00
a6650ab84c
* refactor: overloads for run methods, deprecate _no_wait flavors * refactor: same thing for run_many flavors * fix: use gather_max_concurrency for gathering run results * refactor: deprecate a bunch of stuff on the context and core hatchet client * refactor: runs client deprecations * refactor: add deprecation warning to go duration string durations * refactor: durable tasks must be async * chore: changelog * fix: copilot comments * fix: couple more * chore: rm `debug=True` from all the examples * chore: more debug params * fix: more deprecations * fix: more warnings * fix: non-utc timezones * chore: deprecate more internal stuff * fix: a bunch more internal-only stuff, remove non-v2 listener logic * fix: test * chore: make a bunch more things internal * feat: priority enum * refactor: top-level `types` directory * refactor: start reworking labels * fix: some type checker issues * fix: rm transform method in favor of instance method * fix: internal worker label types * fix: more types * refactor: finish labels * fix: labels * chore: gen * fix: rm internal glue pydantic model * fix: removed `owned_loop`, register workflows on worker start instead of init * fix: deprecate ctx getter in favor of property * refactor: more label cleanup, prepare to remove worker context * fix: more deprecations * refactor: get rid of a pydantic a few places we don't need validation * refactor: plan to remove `BulkPushEventOptions` * chore: changelog * chore: changelog * refactor: trigger types * fix: pydantic model default * fix: instrumentor types * refactor: add `seen_at` to event * refactor: remove some more protobuf types * fix: rm unneeded ts_to_iso * refactor: clean up more examples * fix: more warnings * chore: gen * chore: more warnings * fix: one more * fix: warning, namespace * fix: linters * fix: double import * fix: ugh, cursor * fix: clean up a bunch of suboptimal tests * fix: overload signatures * chore: gen * chore: revert opts change * chore: one more revert * feat: start reworking option passing to remove pydantic models * refactor: worker opt * fix: type cleanup * refactor: keep working out signature details * fix: changelog * fix: deprecate some streaming methods * fix: linters * fix: rebase * chore: rm some unused stuff * chore: rm more unused stuff * fix: rm more uses of `options` * fix: more deprecation warnings * fix: instrumentor wrapping * fix: add test for instrumentor signature * chore: deprecate upsert labels on the worker context thingy * fix: deprecate more stuff on the worker context * feat: add `worker_labels_dict` property * fix: label types for workers * chore: update changelog * fix: version * refactor: durable_eviction -> eviction_policy * fix: lint * fix: instrumentor not passing options properly * fix: un-remove * fix: priority * chore: version * fix: improve warning log
92 lines
2.1 KiB
Python
92 lines
2.1 KiB
Python
from collections.abc import AsyncGenerator
|
|
from typing import cast
|
|
from uuid import UUID
|
|
|
|
from psycopg_pool import ConnectionPool
|
|
from pydantic import BaseModel, ConfigDict
|
|
|
|
from hatchet_sdk import Context, EmptyModel, Hatchet
|
|
|
|
hatchet = Hatchet()
|
|
|
|
|
|
# > Use the lifespan in a task
|
|
class TaskOutput(BaseModel):
|
|
num_rows: int
|
|
external_ids: list[UUID]
|
|
|
|
|
|
lifespan_workflow = hatchet.workflow(name="LifespanWorkflow")
|
|
|
|
|
|
@lifespan_workflow.task()
|
|
def sync_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:
|
|
pool = cast(Lifespan, ctx.lifespan).pool
|
|
|
|
with pool.connection() as conn:
|
|
query = conn.execute("SELECT * FROM v1_lookup_table_olap LIMIT 5;")
|
|
rows = query.fetchall()
|
|
|
|
for row in rows:
|
|
print(row)
|
|
|
|
print("executed sync task with lifespan", ctx.lifespan)
|
|
|
|
return TaskOutput(
|
|
num_rows=len(rows),
|
|
external_ids=[cast(UUID, row[0]) for row in rows],
|
|
)
|
|
|
|
|
|
|
|
|
|
@lifespan_workflow.task()
|
|
async def async_lifespan_task(input: EmptyModel, ctx: Context) -> TaskOutput:
|
|
pool = cast(Lifespan, ctx.lifespan).pool
|
|
|
|
with pool.connection() as conn:
|
|
query = conn.execute("SELECT * FROM v1_lookup_table_olap LIMIT 5;")
|
|
rows = query.fetchall()
|
|
|
|
for row in rows:
|
|
print(row)
|
|
|
|
print("executed async task with lifespan", ctx.lifespan)
|
|
|
|
return TaskOutput(
|
|
num_rows=len(rows),
|
|
external_ids=[cast(UUID, row[0]) for row in rows],
|
|
)
|
|
|
|
|
|
# > Define a lifespan
|
|
class Lifespan(BaseModel):
|
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
|
|
foo: str
|
|
pool: ConnectionPool
|
|
|
|
|
|
async def lifespan() -> AsyncGenerator[Lifespan, None]:
|
|
print("Running lifespan!")
|
|
with ConnectionPool("postgres://hatchet:hatchet@localhost:5431/hatchet") as pool:
|
|
yield Lifespan(
|
|
foo="bar",
|
|
pool=pool,
|
|
)
|
|
|
|
print("Cleaning up lifespan!")
|
|
|
|
|
|
worker = hatchet.worker(
|
|
"test-worker", slots=1, workflows=[lifespan_workflow], lifespan=lifespan
|
|
)
|
|
|
|
|
|
def main() -> None:
|
|
worker.start()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|