Files
hatchet/sdks/python/conftest.py
Matt Kaye 993817b049 [Python] Single listener base class + bug fixes + refactors (#1470)
* fix: register durable steps and workflows separately

* chore: initial copy of pooled listener

* feat: initial generic impl

* feat: use pooled listener for wf run listener

* refactor: move listeners to subdir

* feat: refactor durable event listener

* fix: bug

* feat: share single pooled workflow listener and event listener everywhere

* cruft: rm hatchet fixture

* fix: rebase issue

* feat: remove asyncio api client in favor of sync one

* chore: minor version

* proposal: crazy hack idea to make the workflow run listener work

* fix: sleeps and error handling

* Revert "cruft: rm hatchet fixture"

This reverts commit b75f625e6ccec095e8c4e294d6727db166796411.

* fix: set timeout

* fix: rm pytest-timeout

* fix: rm retry

* fix: use v1 by default

* fix: try removing retry state

* fix: try using async client?

* fix: try running sequentially

* debug: loop

* debug: maybe it's this?

* fix: lint

* fix: re-remove unused fixtures

* fix: lazily create clients in admin client

* fix: default

* fix: lazily initialize dispatcher client

* fix: hint

* fix: no. way.

* feat: add back retries in ci

* fix: clients + imports

* fix: loop scope

* debug: try running skipped tests in ci again

* Revert "debug: try running skipped tests in ci again"

This reverts commit 8d9e18150e5207ee6051d8df8a6fe2a7504c722e.

* fix: rm duped code

* refactor: rename everything as `to_proto`

* refactor: removals of `namespace` being passed around

* fix: task output stupidity

* feat: add deprecation warning

* fix: remove more unused code

* feat: mix sync and async in dag example

* fix: autouse

* fix: more input types

* feat: remove ability to pass in loop

* fix: overload key gen
2025-04-10 08:18:17 -04:00

85 lines
2.2 KiB
Python

import logging
import os
import subprocess
import time
from io import BytesIO
from threading import Thread
from typing import AsyncGenerator, Callable, Generator
import psutil
import pytest
import pytest_asyncio
import requests
from hatchet_sdk import Hatchet
@pytest_asyncio.fixture(scope="session", loop_scope="session")
async def hatchet() -> AsyncGenerator[Hatchet, None]:
yield Hatchet(
debug=True,
)
def wait_for_worker_health() -> bool:
worker_healthcheck_attempts = 0
max_healthcheck_attempts = 25
while True:
if worker_healthcheck_attempts > max_healthcheck_attempts:
raise Exception(
f"Worker failed to start within {max_healthcheck_attempts} seconds"
)
try:
requests.get("http://localhost:8001/health", timeout=5)
return True
except Exception:
time.sleep(1)
worker_healthcheck_attempts += 1
def log_output(pipe: BytesIO, log_func: Callable[[str], None]) -> None:
for line in iter(pipe.readline, b""):
print(line.decode().strip())
@pytest.fixture(scope="session", autouse=True)
def worker() -> Generator[subprocess.Popen[bytes], None, None]:
command = ["poetry", "run", "python", "examples/worker.py"]
logging.info(f"Starting background worker: {' '.join(command)}")
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()
)
# Check if the process is still running
if proc.poll() is not None:
raise Exception(f"Worker failed to start with return code {proc.returncode}")
Thread(target=log_output, args=(proc.stdout, logging.info), daemon=True).start()
Thread(target=log_output, args=(proc.stderr, logging.error), daemon=True).start()
wait_for_worker_health()
yield proc
logging.info("Cleaning up background worker")
parent = psutil.Process(proc.pid)
children = parent.children(recursive=True)
for child in children:
child.terminate()
parent.terminate()
_, alive = psutil.wait_procs([parent] + children, timeout=5)
for p in alive:
logging.warning(f"Force killing process {p.pid}")
p.kill()