Merge branch 'main' into feat-durable-execution

This commit is contained in:
mrkaye97
2026-02-16 07:59:50 -05:00
728 changed files with 90070 additions and 2736 deletions

View File

@@ -33,6 +33,37 @@ jobs:
working-directory: frontend/docs
run: npm run lint:check
search-quality:
runs-on: ubicloud-standard-2
steps:
- name: Clone repository
uses: actions/checkout@v6
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.16.1
run_install: false
- name: Setup Node.js
uses: actions/setup-node@v6
with:
cache: pnpm
cache-dependency-path: frontend/docs/pnpm-lock.yaml
- name: Install dependencies
working-directory: frontend/docs
run: pnpm install --frozen-lockfile
- uses: actions/setup-python@v6
with:
python-version: '3.14'
- name: Generate snippets
working-directory: frontend/snippets
run: python3 generate.py
- name: Generate search index
working-directory: frontend/docs
run: pnpm run generate-llms
- name: Run search quality tests
working-directory: frontend/docs
run: pnpm run test-search
build:
runs-on: ubicloud-standard-2
steps:

View File

@@ -11,12 +11,27 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v6
- name: Install Task
uses: arduino/setup-task@v2
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: "1.25"
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10.16.1
run_install: false
- uses: actions/setup-python@v6
with:
python-version: '3.14'
- name: Generate snippets
working-directory: frontend/snippets
run: python3 generate.py
run: task install-dependencies pre-commit-install generate-docs -v
- name: Check for changes in examples directory
id: verify-changed-files

View File

@@ -22,7 +22,7 @@ jobs:
./
scan-pr:
if: ${{ github.event_name == 'pull_request' }}
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@v2.3.2"
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@v2.3.3"
with:
scan-args: |-
-r

View File

@@ -90,7 +90,6 @@ jobs:
run: |
export DATABASE_URL="postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet"
go run ./cmd/hatchet-migrate
task generate-go
- name: Setup
working-directory: .

223
.github/workflows/sdk-ruby.yml vendored Normal file
View File

@@ -0,0 +1,223 @@
name: ruby
on:
workflow_dispatch:
pull_request:
paths:
- ".github/**"
- "api/**"
- "api-contracts/**"
- "internal/**"
- "pkg/**"
- "sdks/ruby/**"
push:
branches:
- main
paths:
- "sdks/ruby/**"
defaults:
run:
working-directory: ./sdks/ruby/src
jobs:
lint:
runs-on: ubicloud-standard-4
steps:
- name: Checkout code
uses: actions/checkout@v6
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: "3.2"
bundler-cache: true
working-directory: ./sdks/ruby/src
- name: Run RuboCop
run: bundle exec rubocop
- name: Run RBS validate
run: rbs -I sig validate
- name: Test gem build
run: gem build hatchet-sdk.gemspec
test:
runs-on: ubicloud-standard-4
strategy:
matrix:
ruby-version: ${{ github.event_name == 'pull_request' && fromJSON('["3.2"]') || fromJSON('["3.2", "3.3"]') }}
optimistic-scheduling: ["true", "false"]
timeout-minutes: 20
steps:
- uses: actions/checkout@v6
- name: Install Protoc
uses: arduino/setup-protoc@v3
with:
version: "25.1"
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Task
uses: arduino/setup-task@v2
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: "1.25"
- name: Start Docker dependencies
working-directory: .
run: docker compose up -d
- name: Generate
working-directory: .
run: |
export DATABASE_URL="postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet"
go run ./cmd/hatchet-migrate
- name: Setup
working-directory: .
run: |
export SEED_DEVELOPMENT=true
export SERVER_PORT=8080
export SERVER_URL=http://localhost:8080
export SERVER_AUTH_COOKIE_DOMAIN=localhost
export SERVER_AUTH_COOKIE_INSECURE=true
export SERVER_DEFAULT_ENGINE_VERSION=V1
export SERVER_MSGQUEUE_RABBITMQ_URL="amqp://user:password@localhost:5672/"
export SERVER_OPTIMISTIC_SCHEDULING_ENABLED=${{ matrix.optimistic-scheduling }}
go run ./cmd/hatchet-admin quickstart
go run ./cmd/hatchet-engine --config ./generated/ > engine.log 2>&1 &
go run ./cmd/hatchet-api --config ./generated/ > api.log 2>&1 &
sleep 30
- name: Set up Ruby ${{ matrix.ruby-version }}
uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby-version }}
bundler-cache: true
working-directory: ./sdks/ruby/src
- name: Display Ruby version
run: ruby -v
- name: Generate Env File
working-directory: .
run: |
echo "HATCHET_CLIENT_TOKEN=$(go run ./cmd/hatchet-admin token create --config ./generated/ --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52)" >> $GITHUB_ENV
echo "HATCHET_CLIENT_TLS_ROOT_CA_FILE=../../../certs/ca.cert" >> $GITHUB_ENV
echo "HATCHET_CLIENT_WORKER_HEALTHCHECK_ENABLED=true" >> $GITHUB_ENV
- name: Set HATCHET_CLIENT_NAMESPACE
run: |
RUBY_VER=$(ruby -e "puts \"rb#{RUBY_VERSION.gsub('.','')[0..1]}\"")
SHORT_SHA=$(git rev-parse --short HEAD)
echo "HATCHET_CLIENT_NAMESPACE=${RUBY_VER}-${SHORT_SHA}" >> $GITHUB_ENV
- name: Run unit tests
run: |
echo "Using HATCHET_CLIENT_NAMESPACE: $HATCHET_CLIENT_NAMESPACE"
bundle exec rspec --format documentation --tag ~integration
- name: Run integration tests
run: bundle exec rspec spec/integration/ --format documentation --tag integration
- name: Set up Ruby for examples
uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby-version }}
bundler-cache: true
working-directory: ./sdks/ruby/examples
- name: Start example worker
working-directory: ./sdks/ruby/examples
run: bundle exec ruby worker.rb > worker.log 2>&1 &
- name: Wait for worker health
run: |
for i in $(seq 1 30); do
if curl -s http://localhost:8001/health > /dev/null 2>&1; then
echo "Worker is healthy after ${i}s"
exit 0
fi
sleep 1
done
echo "Worker failed to start within 30s"
cat ./sdks/ruby/examples/worker.log || true
exit 1
- name: Run e2e tests
working-directory: ./sdks/ruby/examples
run: bundle exec rspec -f d --fail-fast
- name: Upload worker logs
if: always()
uses: actions/upload-artifact@v6
with:
name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-opt-${{ matrix.optimistic-scheduling }}-worker-logs
path: ./sdks/ruby/examples/worker.log
- name: Upload engine logs
if: always()
uses: actions/upload-artifact@v6
with:
name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-opt-${{ matrix.optimistic-scheduling }}-engine-logs
path: engine.log
- name: Upload API logs
if: always()
uses: actions/upload-artifact@v6
with:
name: ${{ env.HATCHET_CLIENT_NAMESPACE }}-opt-${{ matrix.optimistic-scheduling }}-api-logs
path: api.log
publish:
runs-on: ubicloud-standard-4
needs: [lint, test]
if: github.ref == 'refs/heads/main'
permissions:
contents: write
id-token: write
steps:
- name: Checkout Repository
uses: actions/checkout@v6
with:
submodules: recursive
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: "3.2"
bundler-cache: true
working-directory: ./sdks/ruby/src
- name: Check if version changed
id: version_check
run: |
NEW_VERSION=$(ruby -e "require_relative 'lib/hatchet/version'; puts Hatchet::VERSION")
CURRENT_VERSION=$(gem info hatchet-sdk --remote --exact 2>/dev/null | grep -oP 'hatchet-sdk \(\K[^)]+' || echo "0.0.0")
if [ "$CURRENT_VERSION" == "$NEW_VERSION" ]; then
echo "Version has not changed ($NEW_VERSION). Skipping publish."
echo "should_publish=false" >> "$GITHUB_OUTPUT"
else
echo "Publishing version $NEW_VERSION (current: $CURRENT_VERSION)"
echo "should_publish=true" >> "$GITHUB_OUTPUT"
fi
- name: Configure RubyGems credentials
if: steps.version_check.outputs.should_publish == 'true'
uses: rubygems/configure-rubygems-credentials@main
- name: Publish to RubyGems
if: steps.version_check.outputs.should_publish == 'true'
run: |
gem build hatchet-sdk.gemspec
NEW_VERSION=$(ruby -e "require_relative 'lib/hatchet/version'; puts Hatchet::VERSION")
gem push hatchet-sdk-${NEW_VERSION}.gem

View File

@@ -140,7 +140,6 @@ jobs:
run: |
export DATABASE_URL="postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet"
go run ./cmd/hatchet-migrate
task generate-go
- name: Setup
working-directory: .

View File

@@ -107,7 +107,6 @@ jobs:
- name: Generate
run: |
go run ./cmd/hatchet-migrate
task generate-go
task generate-certs
task generate-local-encryption-keys
@@ -184,7 +183,6 @@ jobs:
- name: Generate
run: |
go run ./cmd/hatchet-migrate
task generate-go
task generate-certs
task generate-local-encryption-keys
@@ -276,7 +274,6 @@ jobs:
- name: Generate
run: |
go run ./cmd/hatchet-migrate
task generate-go
task generate-certs
task generate-local-encryption-keys

1
.nvmrc Normal file
View File

@@ -0,0 +1 @@
v20.15.1

View File

@@ -1,128 +1,120 @@
# Contributing
### Setup
This guide will help you understand how to contribute effectively to the Hatchet project.
1. Make sure all prerequisite dependencies are installed:
## Getting Started
- [Go 1.25+](https://go.dev/doc/install)
- [Node.js v18+](https://nodejs.org/en/download) - we recommend using [nvm](https://github.com/nvm-sh/nvm) for managing node versions.
- [pnpm](https://pnpm.io/installation) installed globally (`npm i -g pnpm`)
- [Docker Desktop](https://docs.docker.com/desktop/install/mac-install/)
- [protoc](https://grpc.io/docs/protoc-installation/)
- [pip](https://pip.pypa.io/en/stable/installation/)
- [Caddy](https://caddyserver.com/docs/install)
- [atlas](https://atlasgo.io/)
- [pre-commit](https://pre-commit.com/)
- You can install this in a virtual environment with `python3 -m venv venv && source venv/bin/activate && pip3 install pre-commit`
New to Hatchet? Start with our [Architecture](https://docs.hatchet.run/home/architecture) docs to familiarize yourself with Hatchet's core system design.
2. You can then populate a local `.env` file with the following:
Then, before contributing, check out the following sections:
```
DATABASE_URL='postgresql://hatchet:hatchet@127.0.0.1:5431/hatchet'
- [Development Environment Setup](#development-environment-setup)
- [Pull Requests](#pull-requests)
- [Testing](#testing)
- [Running Locally](#running-locally)
- [Example Workflow](#example-workflow)
SERVER_ENCRYPTION_MASTER_KEYSET_FILE=./hack/dev/encryption-keys/master.key
SERVER_ENCRYPTION_JWT_PRIVATE_KEYSET_FILE=./hack/dev/encryption-keys/private_ec256.key
SERVER_ENCRYPTION_JWT_PUBLIC_KEYSET_FILE=./hack/dev/encryption-keys/public_ec256.key
## Development Environment Setup
SERVER_PORT=8080
SERVER_URL=http://localhost:8080
Ensure all prerequisite dependencies are installed:
SERVER_AUTH_COOKIE_SECRETS="1234"
SERVER_AUTH_COOKIE_DOMAIN=app.dev.hatchet-tools.com
SERVER_AUTH_COOKIE_INSECURE=false
SERVER_AUTH_SET_EMAIL_VERIFIED=true
- [Go 1.25+](https://go.dev/doc/install)
- [Node.js v18+](https://nodejs.org/en/download)
- We recommend using [nvm](https://github.com/nvm-sh/nvm) for managing node versions to match the version defined in [`.nvmrc`](.nvmrc)
- [pnpm](https://pnpm.io/installation) installed globally (`npm i -g pnpm`)
- [Docker](https://docs.docker.com/engine/install/)
- [task](https://taskfile.dev/docs/installation)
- [protoc](https://grpc.io/docs/protoc-installation/)
- [Caddy](https://caddyserver.com/docs/install)
- [goose](https://pressly.github.io/goose/installation/)
- [pre-commit](https://pre-commit.com/)
- You can install this in a virtual environment with `task pre-commit-install`
SERVER_MSGQUEUE_KIND=rabbitmq
SERVER_MSGQUEUE_RABBITMQ_URL=amqp://user:password@127.0.0.1:5672/
We recommend installing these tools individually using your preferred package manager (e.g., Homebrew).
SERVER_GRPC_BROADCAST_ADDRESS=grpc.dev.hatchet-tools.com:443
SERVER_GRPC_INSECURE=true
## Pull Requests
Before opening a PR, check if there's a related issue in our [backlog](https://github.com/hatchet-dev/hatchet/issues).
For non-trivial changes (anything beyond typos or patch version bumps), please create an issue first so we can discuss the proposal and ensure it aligns with the project.
Next, ensure all changes are:
- Unit tested with `task test`
- Linted with `task lint`
- Formatted with `task fmt`
- Integration tested with `task test-integration` (when applicable)
If your changes require documentation updates, modify the relevant files in [`frontend/docs/pages/`](frontend/docs/pages/). You can spin up the documentation site locally by running `task docs`. By default, this will be available at [`http://localhost:3000`](http://localhost:3000).
For configuration changes, see [Updating Configuration](docs/development/updating-configuration.md).
## Testing
Hatchet uses Go build tags to categorize tests into different test suites. For example, these build tags mark a test as unit-only:
```go
//go:build !e2e && !load && !rampup && !integration
func TestMyUnitOfCode() { ... }
```
3. Start the Database and RabbitMQ services:
Most contributors should familiarize themselves with **unit testing** and **integration testing**.
**Unit tests** verify individual functions without external dependencies:
```sh
task test
```
**Integration tests** verify components working together with real dependencies (normally spun up via `docker compose`):
```sh
task test-integration
```
Note: **manual testing** is acceptable for cases where automated testing is impractical, but testing steps should be clearly outlined in your PR description.
## Running locally
1. Start the Postgres Database and RabbitMQ services:
```sh
task start-db
```
4. Install dependencies, run migrations, generate encryption keys, and seed the database:
2. Install Go & Node.js dependencies, run migrations, generate encryption keys, and seed the database:
```sh
task setup
```
**_Note: You might need to run this as `sudo` so it can install certificates._**
### Starting the dev server
Start the Hatchet engine, API server, dashboard, and Prisma studio:
3. Start the Hatchet engine, API server, and frontend:
```sh
task start-dev # or task start-dev-tmux if you want to use tmux panes
```
### Creating and testing workflows
Once started, you should be able to access the Hatchet UI at [https://app.dev.hatchet-tools.com](https://app.dev.hatchet-tools.com).
To create and test workflows, run the examples in the `./examples` directory.
You will need to add the tenant (output from the `task seed-dev` command) to the `.env` file in each example directory. An example `.env` file for the `./examples/simple` directory. You can be generated and add it to the .env file via:
### Example Workflow
1. Generate client credentials:
```sh
cat >> ./examples/simple/.env <<EOF
HATCHET_CLIENT_TOKEN="$(go run ./cmd/hatchet-admin token create --name local --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52)"
EOF
task init-dev-env | tee ./examples/go/simple/.env
```
This example can then be run via `go run main.go` from the `./examples/simple` directory.
### Logging
You can set the following logging formats to configure your logging:
```
# info, debug, error, etc
SERVER_LOGGER_LEVEL=debug
# json or console
SERVER_LOGGER_FORMAT=json
DATABASE_LOGGER_LEVEL=debug
DATABASE_LOGGER_FORMAT=console
2. Run the simple workflow by loading the environment variables from `./examples/go/simple/.env`:
```sh
cd ./examples/go/simple
env $(cat .env | xargs) go run main.go
```
### OpenTelemetry
You can set the following to enable distributed tracing:
```
SERVER_OTEL_SERVICE_NAME=engine
SERVER_OTEL_COLLECTOR_URL=<collector-url>
# optional
OTEL_EXPORTER_OTLP_HEADERS=<optional-headers>
# optional
OTEL_EXPORTER_OTLP_ENDPOINT=<collector-url>
You should see the following logs if the workflow was started against your local instance successfully:
```log
{"level":"debug","service":"client","message":"connecting to 127.0.0.1:7070 without TLS"}
{"level":"info","service":"client","message":"gzip compression enabled for gRPC client"}
{"level":"debug","service":"worker","message":"worker simple-worker is listening for actions: [process-message:process-message]"}
{"level":"debug","service":"client","message":"No compute configs found, skipping cloud registration and running all actions locally."}
{"level":"debug","service":"client","message":"Registered worker with id: c47cc839-8c3b-4b0f-a904-00e37f164b7d"}
{"level":"debug","service":"client","message":"Starting to listen for actions"}
{"level":"debug","service":"client","message":"updating worker c47cc839-8c3b-4b0f-a904-00e37f164b7d heartbeat"}
```
### CloudKMS
## Questions
CloudKMS can be used to generate master encryption keys:
```
gcloud kms keyrings create "development" --location "global"
gcloud kms keys create "development" --location "global" --keyring "development" --purpose "encryption"
gcloud kms keys list --location "global" --keyring "development"
```
From the last step, copy the Key URI and set the following environment variable:
```
SERVER_ENCRYPTION_CLOUDKMS_KEY_URI=gcp-kms://projects/<PROJECT>/locations/global/keyRings/development/cryptoKeys/development
```
Generate a service account in GCP which can encrypt/decrypt on CloudKMS, then download a service account JSON file and set it via:
```
SERVER_ENCRYPTION_CLOUDKMS_CREDENTIALS_JSON='{...}'
```
If you have any further questions or queries, feel free to raise an issue on GitHub. Else, come join our [Discord](https://hatchet.run/discord)!

View File

@@ -4,15 +4,12 @@ tasks:
setup:
cmds:
- task: install-dependencies
- task: generate-certs
- task: set-env-db
- task: migrate
- task: generate-all
- task: generate-local-encryption-keys
- task: generate-docs
- task: set-env-all
- task: seed-dev
- task: copy-ca-to-sdks
- task: docs
set-env-db:
cmds:
- |
@@ -36,7 +33,7 @@ tasks:
SERVER_AUTH_COOKIE_SECRETS="$(randstring 16) $(randstring 16)"
SERVER_AUTH_COOKIE_DOMAIN=app.dev.hatchet-tools.com
SERVER_AUTH_COOKIE_INSECURE=false
SERVER_AUTH_COOKIE_INSECURE=true
SERVER_AUTH_SET_EMAIL_VERIFIED=true
SERVER_MSGQUEUE_KIND=rabbitmq
@@ -46,9 +43,9 @@ tasks:
SERVER_ADDITIONAL_LOGGERS_QUEUE_FORMAT=console
SERVER_ADDITIONAL_LOGGERS_PGXSTATS_LEVEL=error
SERVER_ADDITIONAL_LOGGERS_PGXSTATS_FORMAT=console
SERVER_LOGGER_LEVEL=error
SERVER_LOGGER_LEVEL=warn
SERVER_LOGGER_FORMAT=console
DATABASE_LOGGER_LEVEL=error
DATABASE_LOGGER_LEVEL=warn
DATABASE_LOGGER_FORMAT=console
SERVER_GRPC_BROADCAST_ADDRESS=127.0.0.1:7070
@@ -56,16 +53,27 @@ tasks:
SERVER_INTERNAL_CLIENT_BASE_STRATEGY=none
SERVER_INTERNAL_CLIENT_BASE_INHERIT_BASE=false
EOF
fmt-go:
cmd: gofmt -s -w .
fmt-app:
dir: frontend/app
cmd: pnpm run prettier:fix && pnpm run prettier:check
fmt-docs:
dir: frontend/docs
cmd: pnpm run prettier:fix && pnpm run prettier:check
pre:
aliases: [fmt]
cmds:
- cd frontend/app/ && pnpm run prettier:fix
- pre-commit run --all-files
- task: fmt-go
- task: fmt-app
- task: fmt-docs
- task: pre-commit-run
start-db:
cmds:
- docker compose up -d
stop-db:
cmds:
- docker compose down
- docker compose down {{.CLI_ARGS}}
recreate-db-from-scratch:
cmds:
- docker compose down -v
@@ -82,20 +90,20 @@ tasks:
cmds:
- task: generate-sqlc
- task: goose-migrate
atlas-migrate:
cmds:
- bash ./hack/dev/atlas-migrate.sh {{.CLI_ARGS}}
goose-migrate:
cmds:
- bash ./hack/dev/migrate.sh
seed-dev:
dotenv: [.env]
cmds:
- SEED_DEVELOPMENT=true bash ./hack/dev/run-go-with-env.sh run ./cmd/hatchet-admin seed
- SEED_DEVELOPMENT=true go run ./cmd/hatchet-admin seed
seed-cypress:
dotenv: [.env]
cmds:
- SEED_DEVELOPMENT=true bash ./hack/dev/run-go-with-env.sh run ./cmd/hatchet-admin seed-cypress
- SEED_DEVELOPMENT=true go run ./cmd/hatchet-admin seed-cypress
start-dev:
deps:
- task: goose-migrate
- task: start-db
- task: start-api
- task: start-engine
@@ -125,30 +133,38 @@ tasks:
desc: Run Cypress E2E for frontend/app against a locally started API stack (CI-friendly).
cmds:
- bash ./hack/ci/e2e-frontend.sh
start-ngrok:
cmds:
- ngrok http 8080
start-lite:
cmds:
- bash ./hack/dev/start-lite.sh
start-ngrok:
cmds:
- ngrok http 8080
generate-all:
aliases: [generate]
cmds:
- task: install-dependencies
- task: generate-api
- task: generate-go
- task: generate-proto
- task: generate-sqlc
- task: generate-docs
- task: pre-commit-run
install-dependencies:
deps: [venv]
cmds:
- go mod download
- cd frontend/app/ && pnpm install
- cd frontend/docs/ && pnpm install
- npm install -g vite
- source .venv/bin/activate && pip install pre-commit
generate-api:
cmds:
- task: generate-api-server
- task: generate-api-client
generate-docs:
deps: [venv]
dir: frontend/snippets
cmds:
- 'source {{.ROOT_DIR}}/.venv/bin/activate && python generate.py'
- task: fmt
generate-certs:
cmds:
- bash ./hack/dev/generate-x509-certs.sh ./hack/dev/certs
@@ -156,7 +172,9 @@ tasks:
cmds:
- bash ./hack/dev/generate-local-encryption-keys.sh ./hack/dev/encryption-keys
init-dev-env:
- bash ./hack/dev/init-dev-token-and-env.sh
dotenv: [.env]
cmds:
- bash ./hack/dev/init-dev-token-and-env.sh
generate-dev-api-token:
cmds:
- bash ./hack/dev/generate-dev-api-token.sh
@@ -168,9 +186,6 @@ tasks:
cmds:
- bash ./hack/oas/generate-clients.sh
silent: true
generate-go:
cmds:
- go generate ./...
generate-proto:
cmds:
- bash ./hack/proto/proto.sh
@@ -184,13 +199,16 @@ tasks:
lint:
cmds:
- task: lint-go
- task: lint-frontend
- task: lint-app
- task: lint-docs
lint-go:
cmds:
- golangci-lint run ./... --config .golangci.yml
lint-frontend:
cmds:
- cd frontend/app/ && pnpm run lint:check
cmd: golangci-lint run ./... --config .golangci.yml
lint-app:
dir: frontend/app
cmd: pnpm run lint:check
lint-docs:
dir: frontend/docs
cmd: npm run lint:check
kill-apis:
cmds:
- ps -A | grep 'cmd/hatchet-api' | grep -v grep | awk '{print $1}' | xargs kill -9 $1
@@ -202,15 +220,15 @@ tasks:
copy-ca-to-sdks:
cmds:
- mkdir -p ./python-sdk/certs/ && cp ./hack/dev/certs/ca.cert ./python-sdk/certs/
venv:
status: [test -d .venv]
cmd: python3 -m venv .venv
pre-commit-install:
cmds:
- pip install pre-commit # can use brew install pre-commit if you are on macOS
- pre-commit install
deps: [venv]
cmd: source .venv/bin/activate && pip install pre-commit && pre-commit install
pre-commit-run:
cmds:
- cd frontend/app/ && pnpm run lint:fix && pnpm run prettier:check
- cd frontend/docs/ && pnpm run lint:fix && pnpm run prettier:check
- pre-commit run --all-files || pre-commit run --all-files
deps: [venv]
cmd: 'source .venv/bin/activate && pre-commit run --all-files || pre-commit run --all-files'
docs:
cmds:
- |
@@ -231,3 +249,15 @@ tasks:
start-telemetry:
cmds:
- docker compose -f docker-compose.infra.yml up -d
test:
cmds:
- go test -count=1 $(go list ./... | grep -v "quickstart") -v -failfast
test-integration:
deps:
- start-db
- goose-migrate
- generate-local-encryption-keys
cmds:
- defer:
task: stop-db
- go test -count=1 -tags integration $(go list ./... | grep -v "quickstart") -v -failfast

View File

@@ -46,6 +46,7 @@ enum SDKS {
GO = 1;
PYTHON = 2;
TYPESCRIPT = 3;
RUBY = 4;
}
message RuntimeInfo {

View File

@@ -212,5 +212,3 @@ V1UpdateWebhookRequest:
staticPayload:
type: object
description: The static payload to use for the webhook. This is used to send a static payload with the webhook.
required:
- eventKeyExpression

View File

@@ -191,6 +191,7 @@ WorkerRuntimeSDKs:
- GOLANG
- PYTHON
- TYPESCRIPT
- RUBY
WorkerRuntimeInfo:
properties:

View File

@@ -1,7 +1,6 @@
package tenants
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
@@ -80,8 +79,7 @@ func (t *TenantService) TenantCreate(ctx echo.Context, request gen.TenantCreateR
tenantId := tenant.ID
err = t.config.V1.TenantLimit().SelectOrInsertTenantLimits(context.Background(), tenantId, nil)
err = t.config.V1.TenantLimit().UpdateLimits(ctx.Request().Context(), tenantId, t.config.V1.TenantLimit().DefaultLimits())
if err != nil {
return nil, err
}

View File

@@ -16,7 +16,7 @@ func (w *V1WebhooksService) V1WebhookUpdate(ctx echo.Context, request gen.V1Webh
webhook := ctx.Get("v1-webhook").(*sqlcv1.V1IncomingWebhook)
opts := repository.UpdateWebhookOpts{
EventKeyExpression: &request.Body.EventKeyExpression,
EventKeyExpression: request.Body.EventKeyExpression,
ScopeExpression: request.Body.ScopeExpression,
}

View File

@@ -319,6 +319,7 @@ const (
const (
GOLANG WorkerRuntimeSDKs = "GOLANG"
PYTHON WorkerRuntimeSDKs = "PYTHON"
RUBY WorkerRuntimeSDKs = "RUBY"
TYPESCRIPT WorkerRuntimeSDKs = "TYPESCRIPT"
)
@@ -1896,7 +1897,7 @@ type V1UpdateFilterRequest struct {
// V1UpdateWebhookRequest defines model for V1UpdateWebhookRequest.
type V1UpdateWebhookRequest struct {
// EventKeyExpression The CEL expression to use for the event key. This is used to create the event key from the webhook payload.
EventKeyExpression string `json:"eventKeyExpression"`
EventKeyExpression *string `json:"eventKeyExpression,omitempty"`
// ScopeExpression The CEL expression to use for the scope. This is used to filter the correct workflow to trigger.
ScopeExpression *string `json:"scopeExpression,omitempty"`
@@ -16264,197 +16265,197 @@ var swaggerSpec = []string{
"Wn/yrJAvasUNQblHJ/0WfC0q9Nu1ClUbYDelFZcCilPgBSTmC9kELcTT/RYtsB6MyNyg99JPOWUCcS+w",
"J0BgfA98Xz/kzhTRtcuNbUeTaCg4uU9DQ2TRU4R3tEfXz6bQaKzrG7grtkrLD6S0rOYMp+oAa9V+5MK3",
"cMRe5A7qVQ7dr4Uj5CXPUUpNLBF6o+NUHH0bO013lgSv24liFMpaKRq/cfHVREr6um2qPlvj9ita10f8",
"58btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW390Fs1gFrxirUajiTXcip7PDcPQTC2noRmTa0",
"8+W5YjPOuOaK31Y+sJYBpUoE4VeVNZVQ7jKTRuijKWitfzNkXKEgOR98qCOCOQQejO3Of962uIli2lpc",
"KTN15ToqRVRfEUj5UNNuGoreNcVLKuPkwnGLaqpVziu61CkdhSFUh8YEUyXahET5tXagAsrSUWuyX+XD",
"U/0Z1QLnCxVv4w/9006X/ufs9e/8j9enZ51u59PF62rspRGvmjyzykT20bNpL5bi1A09i4p2uREGshNz",
"uJkFgCQx/LA2HdOhnXQ8rcBEs4CVX3JjaLjeYvaNsWEqy9AssJqgGOKbIkrBk37FRdBqaWSg4D0NPB78",
"H1bQcDxgYTP8j9vRZTV57IVzndRpLF1myoeZgob3g6vBiMmY98PJh9u3zGluNLwZMH+3/vnHTrdzObwa",
"9Jkr2+fh/zHhJ7sPbj6kudLno7mnhDT6td4SrbfEj+Ut0To0lJ8h1jRr7rdZ/mCswg1fnGueeDX2Y/Hq",
"u5YNmbXODMjZFSf/4Jt7f03fdlWrn3IaXkAiixIUXGaTwP6NXyQ0wHNQb7FQI7tp+3dhrIFHPr+wfBY2",
"QTSsYXaXzb/drx8WwMHBm0vOUusOUY6M7uRwItEtIStvbV4dyG+vVxOLsoXqkOqUVcC+1BuGqh01eMQw",
"YHxTDxpfdA4UEkXmxewolK7g5qPGXPbfiwJpWpVcaP28bMNGCxY2Mg+Kkg9a5TeJDUmdZd8k9hsZpYTx",
"gI6r2+scSnjKLHMq/00tEttdn6lcFfnC6SnmDO+dICROFIePyINe1wFODAIvXMhOT8j3nSl0ZjCAsbzG",
"qNR1tjWMN0ezt58EuNre7JqUUzhrkU2lljl/9E6tFHnxY2WpyHUxMqa4tN8Bw76xB0YQeFm9xJgPtdqV",
"fwHJPPQarVaA/on3THX789AzUO2HyeRGZqJ2Qy+l4Fgg3z56/w7w8H02c27ir5YIryYhgcqac17SvGxt",
"ncZLSwEr086ndOsyY9ek0+3cXI/Zf24nTEsynZA8pAlXxTth8X7Cqxq5IHAiGFO6OrKvL/fc7YBHgNhl",
"1pxDLZdeqDwt/AbdhEDHDQNR+9JfGnwUEY7YzVqbL4tSHUoz8QGM0SyAnpN1Ypan29vhhSPYZ/c3Sh9M",
"oY+rC3+yNoylct4c/BiwI0UuUOk4ui3zASYfIIjJFAJSZRvIbRWr48oqMABnLnvnb+VnJ2dnvdOz3umr",
"yenrNye/v/ntj6M//vjj1es/eiev35yc2Cc4AZyZqXowwARMfWZs20NIF+CbmfAX4BtaJIvNMcD29Q6z",
"vhFDF6bVS7Epiwttw6NEePW6MF6FgEf5uTQ0HIuqP1nVT1ybcQk7WS8nDNR9aABZcV4tdElACWYY3Id2",
"vDpSOtBD1w9N5xSGCxDNwxg6tJEQEyuieSzHGrP5dEHy1uUnsqnTvDLnk+FnXuo6/fOmfzs2hPDaxI1w",
"ZKUxI/zcNObaEic5l/cFIOuNebz3bZ1ufDu61AzfVFVm7bVqjiLKS6d8ZU5cmUWJdt20k01F+Wpetrpm",
"8uoUoBV4ePkXTOOlIAVylGf+Qu1qEMwS8aRlLRbGFx8xPxZ5Z6X0czlxjV5tExJp8I3EQNsAew/mYUuL",
"YxCpyun1ZZ/F7t/8c/KBPZBM/nkzGJ+PhjcTvYUn42TV9WFw+e7D9ZiH/n/qX/V5SpGqEvmpGC4bMVXa",
"1Me1pL9YuM12G5QJ5QeOLBSqLy/5dzg1CFb6RQeQFX3+GU51gnwnmoMRc7KonEZ5A7PV15paF4H2alL9",
"wCScyrIbQ+UKxAtNMzmhPAZJZFZafTXnQurKb5CJ4pGA6426YvkzSJTvrBS5xn8hkEkweE62GSRY+Cmm",
"XZ0Z7ZuedYrlWYswVi15TGJA4Kw2W7QC4WWuX3MNO1Oi84WUi/lmX53VGybk1MXVdLVYrdqi4YUuLV4K",
"4PBCi0PZ+yMKcqaAd7dX55MhE7MXt6P+20uqWl3031cKSDqIPD8bUTCbXcNe8rv+UF4rJm/H57lejX+u",
"2E9jLiHGJB9hVXgdCQnwdRSb8tgDXBq8YuTwlCztIvjkLQw4OIIuukduNonzSwQwhp7ziIDwTv5VzxVG",
"RDRwmcp+vVFakziBmvHrXiBV36P0Wn96cnJi9CXSDpP3/mnoyNNoQX+HUynGbM9xQyGAtaNd+Ym4a9MX",
"n1vc6V8GhJw7zCZdW1SvBa1/i7n0xNtlg8EnSq+yw0lDlcTosrJOLulsINUZRQH7a7Uw2ZMbnuK2Yn8o",
"jJJgjTy75VHeIejnzn01jUNGyzkppkjGmknG0h2nld2t7G5l90vJbsMcP6Bor/DnW0E0s9GGBC7MHoKG",
"+0p9Z2P1tTFLjVWdgHVNn6ks+9bGk2ptYECDTC+maC3mKhCL6pYQqYxaRz2lzKE3g6sLnjA0Sx2qyQqb",
"zyGapht92z//eP3uXe0pyaZd6d6cFyhmYpzkxUnRYyQMbhTJX4KVNhi7c+glfkXmdEPntY+jL8W8GZYC",
"pmazMa+qbfSjyaXr2CI7VtWnwrWLMBoJWAbeJnQkhzrnHeu00ELz0vwZQ2iTDVfldZZMp/0omEv7TfJo",
"82zRVYudgJkOvT5XGdc3+QcbTrYhzLocwir6EULhPKYXmXu9XNCyNOfLO2TgxroJmfu4dkYmR+7Ek+Om",
"p8X6FTbXDAp400hemAYNrDJwip/NKvdc3dKjL9PA7sQrRHM085QjRnm6yZetKjAUbbbIsrknDJsNUV89",
"WP66e5D45KYy645oZMy+Y/VIIG6Rf2J+8C4MJZH+HF9fORzocuAJG0EbESqfBV/osS+MPe5PaIEGLNSO",
"CVrA0FAsBRPkPixNDiT0m4PFs4rdS6IiLxqwLdPBHk8LL2VWOFb6jHkyIB3KHzPKNif1tFngk/Kebftu",
"0Th5qvU1UC5LEkZuoK/1nM7IapNvQ03ocy/2ZFcI5w4V2aNQocpsDJmT17m5xMQCfKtp8dRM2TfVmeCx",
"CwmVv0x+cginEMQwltkrGEbZscJ+zjZlTkjErj1h+ICgbI7orvKf5Nv5m44Iws36ikQmtHeCSbiwnOyZ",
"SXzuzKPxf+ezOP2bISt/RJhNLP9rSoid06OToxNGxzwMufOm8+ro9OhERBQzTLCoYV+UDZ3pQjzey+d5",
"2iqAGDupPYZuOpDFLjqX4vt7hgbpks9mOTs5KQ/8AQKfzBmKXvPvbhgQUf5L1BemTY//xpyvcHoA1vDx",
"II5DKoWf2VGtznkVknQdOeLovPnra7eDZU0PuuqsofQp+UvA7M6h+9D5Svsz/MUQeMt6BNJmqAqDI9lg",
"31HIFuyQ0AGuCyPikBjc3yO3FqMpBmpR+nh6DHwqUoJZDy4A8nvsIRkff2c/q789c7z4kGhuTxfsd+yA",
"NK8T7e6w7vxturQLfdpiQBswVws+AuOZGCwgYfrAXxVOPqUZHJH3uvOGR/KnQqO0lI4q1Pj7QLZj69Vo",
"/Vqip9/K2Bonrgsxvk98f+lwlHq5pFgl5D13O7/tivL6zgL4FAvQc1i+JE8GznAwXm0cDB0U78J4ijwP",
"8ttHRt+cTqrITFL8hDWhh9W3XixUDvaB9+10NYTxlV17iavJms2vW+uQOB/hxyBxRg9vQy6PN0IMHDt8",
"0wqISyOvymRSiS0SOonEeR4bz3qxv5GFaJeggz0nBjigrRiwFAOcWrYnBtQDMkI9Ej7AgJ6K8m92Gkah",
"Lih/BB/DB+iAgKXmY62Ft1Y6Y0FMRGhCW0mDDu1uIyXS4Q0yQcK6V8ddzJYn6JxB92MTNW5C1YJ06MZO",
"xM5JMs5+q6LkdMtzFOz6YeIdqzd0swZdynUmrz1sEAcFmIDAhSUiPqefpXuJWbHePm4ZIE4SpEGse0Ng",
"NVo7R7D6Xi+2/pPywvatJ4fohRF3dhEnmrLf3Bx+/J3997lqv6mUYq2OShvKrOJ8I2slEU8JbFJO2Ned",
"CqHNbbZIDlRzePOaGo9CrHFssB1rZVuOxBXMZOTNUVwh1Tj9fDVT+HGdWGPbkkq1Gpq/SAXYz073F4yE",
"W9rfL9pfwJXPcOPpvbuDW+QMa0JT6ZF4IAf5Jo5wOsYxs9PzXcLGHb9EmF6AfCfX2rTBtPUw33Bru03n",
"EjuuTNlw82UOl9zq9okQ0q1nG1HYhPL+5zY5DBAJqTQ//s45/vk4isMpNF8u5dunA3J1Bphdl9cpyEXw",
"mxk+nfomxGSUBDdsXnvblOnQSyXXjk+9CoISuTg4PTH8Hu30VLgKCcs3H8boPzwnucjKw7NV8CjNkpmT",
"AORDz+F2e4dtj/NOyPNhtq36gyNHZtgH7sPxd/YfCyu+M6YNlXIaecphX0V6I3ujfW5MI/EwEPfSOp/H",
"yT6pNqe7AeM2yEiYT/x6NxPzrFks+SDw/fCJTq97EShSrRS97PcqFYsTXZ5jAnz8HQfYiluuxqrUL/NL",
"gBuwSX4wM6OIk3vv2KSAjJZR9pBRSgSbssrVuJJRAqxhE6m4KNYmvepC55VX4hKLNH4bezH9o2s2BPAy",
"PCtZAhQYzl6/zgFxugkdKIpD+g/otWfYHrGm6RLJKhA4IIoktZePNd6mwI8ETH147IEZPk6TlxsvjZjd",
"Glk7h8wBcabQD4OZmlUgTZQNZuUr5efTC8DKj05ESe16c5lMUZ0laOFJoxnL/DuB8TLjGQ/M7pBXfcxt",
"K0LESu4U4H2pi4819W6sJvoFmKW15LU5syrkEJ1Svv6xWX9uK2G383pXwo/eQtEi8uECBqSkGzDjhaSD",
"9Okc4AethGENj7/T/9Q8L/FaDdMl55uiAKETWJraeY1606FPAd3xkZ8vxm8QCrKcvwpLKRZqm3b8QlWK",
"RqY3htWfnT9/43ef7c86UeuxU03hPkx4kqY9EREZP5dEhPnOQGxEyLEfzup0FT+cOT4KoMx8JOAoSpTL",
"cHaJAl5R5BClisjyREKHpSdzpkuDZGGfO1poUEBYCcFy0KUh5WtMRMLl0JlBQlHNsGyYGSNuedTMXJG6",
"wXBvSvPiW02dBAT5G5i671B51yPwG3EwBLE7d9hMSkXfivWzDjqRXr1WRsHwEfq/4F/pRChw/cSDpv2l",
"LXFHq+1WC3zJAnQAW+XWk8ltKGAsSsVMeezz3XR5l3bKQWkFXCmnjtUha7U9e3DkqkKogUIsoljbd/O8",
"VppKfuXYuQxn65869P97Weiw+XVVKTVmPHjSSmI/wNGDH1BkYv77eww3cu5s9aTbvkqd7fUKDjLttbdV",
"q3MyTidh1lexWQvFRO9C/9iD02RmNtIPHoGfsIpFzvng0oFpuXwHzAAKcFYBTFS49QABRxp5eA79CzbV",
"obgUbD6i5fPp+eCSIaEmgIVhElNRyCreUjGhR/5O41hU8GXaxRpRBwX1eJo1tHqN+hI3TWYlFlN4/nxw",
"aWZ5K1630Gv4A0Be9KR1eYv83Ey32cc3uh9Jv9HcaKUx/wEusXJRMk5L2zW/XjIyEBH3dRfL8zDAiF4l",
"BYmxR6bQZZk3PAfcE5aDBmFHXNu3aWyohmUK78MY1gKzKfPDO741JMxBA2JW4Sx0EZOgT4jM1be4YoFj",
"DXxZWgnDzm75mcx+Xbk0/s4CEHeO2NOjC2MCUJCF7letM83GB1cylBRKl1svLt0Sscrpkh53KHb4c6UO",
"YpGw70W3Zbp0sgy5mY84K5qV3ksMNpVyAmHtQjTFHOQ0D3DZ43WEIoBi7PziQSb4KPctHeD8682/fi2K",
"rUonCDvDFnbDCFrJQ97Sdl2s9XrwbveOan8/bS1QdRaolDcswzYaKGjH7Bi21NL42W6lqX2Ey0NR1rYe",
"xiRx0ZQRGLpbZtAxgyO0xy0wxPfH016DwFXmW0Cw3r+gSQzrHvsVmmCSmDpQ5hT70x5QGwktxE3CClPK",
"seJMruPYHFOiZe0ZxVXS1pywr+aEUpVdCwW69vZZOUXpisgu43zOo/VLTTS7K+BkiiFxXBB4iOWZkXS9",
"0dtD1YqdWww9xkYcFkKvx2V4AJE2V/Z2byiasdOLh8LaDQS7FDGtZM9rWxIvmWzn+K3StbqGt51zVmrI",
"AU4An8TARtHM2/7cjzcMBRwdNg847P0mJWWHFXbiVv1dvtkI8qhjPVF2SgG4fZLe1ZP0VfYKnWP4lD9T",
"3rTneXstjl2w+N824Y2gTlI0Tty5X2qc4FbE4rI9uRb9ZSvFxGHetixFg4zlbMXCS4oFW9bvKoRJj/6K",
"UIxUgTcbTPhsh2wxSfn5J+fiWUjaw91oMVnhjC0yWmWa4Ppj88ADnnPHZppk9yUZbhtXAL5JK18BXiD5",
"sLV8kPmGW/lweKe8hbLPfNsXWbG6CrVASEYZCOzESeCIntV5i7kHxSXChHtRyNp4hyrTypFQChpq/JMs",
"AF07OKoemk05KBVts8z6G3jcW8c8fVrTDL2QpwuFm1etY6T8v7CacsAAtKhyR9vfydZ3rPVWiS1LgcDf",
"+JirVFp5N4u/NSQb4A1RMLvjNfx2BHlf40D00HsUPj0WjwSZJ9HdotKV6GWN2FSwjZJASrTmcdOqFG1z",
"HOxPADPbm0V6UNnFWNifuFGIAmJ57i5QkBBIr+PyrxiCBy98CtKjuMEx/B6SGzr5oR/C7MCTvsFK6I4w",
"WHe6SpX6s5Oz094J/d/k5OQN+9//Ncgd0b1/z28imzggGaSp57AKakjhWwPYexQgPIfeWzZ4c3C3Lxtz",
"pLaCdGR80srHPZWP+d3ZuJTExy4rBW6OQuOlwtN8NDp5x5v83A+UDAVMVakpkMRzfIWOK5G20ygyNqkP",
"PZ4nrPZlUjZvk0S10bIlGVWQDBuXTDGMfLCsKu5Ev1dKJt7kp5ZMHAVNJFMskbZLycTBtBVMsWjdyqVW",
"LpXkUkEubFAuidSfNt63Mr16nfetyN7eut/us/stJxeHDmsXv8baX9HmqwRDCpoYp6PY2lsl0VkDKjpU",
"QFo9yYt7uKrs08DFNWXk9i0+7+OaIiaTmwLFa3u5mopYpJvY+rkKP1eBjyav3JIpX8jTVdJIE1fXfUx+",
"/nP7upYzm1vwfgO1ibm7in/Y+bvWyowD93ilk8u3R8nC9b6vGVbMwO7WDm3L/9KfteX9vXB1qWXvrkpu",
"NS6tkn6FT6tQDw18e8hurQUF+EfjUemt2vKowV215piEAT0FezEgsMduoHRzxd5bclmdP2vtsXjgHq3b",
"5bDteaf+uIq7dFFtBcMeKe4aebD6ya6/wd+EmOX3QIEbLlAwS+l1ATEGs4oTfgRdiB5bGdREBgWJ75co",
"P1g6EVj6IfAcFDggWDpitd0Ogd/IceQDVKC04pTrypDMU/AmpttNEB2HL1TMFU7/hm6VDS6Ho3vgY9gq",
"FoaaY5zpNKy2Knfb3NGFv3AvToK69418xsDaF44sQ2D7yrH/OUuxyOJo9c6xs4yPzAcfxD6CmOW5hlbg",
"bTEgwAekCSgbq5ayN07flnlqDiRSgQKRxtDZZNeB8Zbd+7/MIZlzASCq0zgX/feYnl5h4C/V39OagTqB",
"FPjLO9mgVkmZhqEPQWARz5ErIGmBsxcK7dCUuTTGeFhk9X2xWA/n3gczdtQ+CboIY+Z8oZJBercEgeeE",
"CaF/CtURU92RNpB64JFzAe9B4vNc9/+i9PAvB907SYAhO8Z1yxcz3clBO5UktLNaek1ff1uHoX2ruZHT",
"KFVFV/4+or+v+QqlarjHHsKRD5Y95ipRo++KtnRY4VoR3lcowdU68AUfjLlcHLQ+rIhWnL5h5ZAiYiUF",
"+gTqzIqAIktfpNTwls3vWhJoRVcrupqKLsknPcon1ZIrx6NMe9An+89S21VIroEYbOgdruBq77ntPfcn",
"uefu7DjL5EJ7mv1Ip1nu9NjJySau1+aQnwlvID1K8xf2iqOrdS09FahTkFLzTJ0jBRIK381dv08rWjMk",
"APm4mY+pSiHte1PR5bPAQBtg8Dw/M39P5ZeaMhJ5kgOBxxzJ0vOfhOlVUhRK+p+Ox4jifzpOZHiMzujH",
"0uUsBwO3bc5YT8MLsLK8g81juAKXtaf4Hp/ixdA3S4bulgh6BRY/FuXiqjid8AxfJGGGozzfH9Vy8VjW",
"o1uRl9XpFXX9x2Rt9frZsvSeOnidh4nv8VhaepHUaS57lJckx1VpccgXkTUs0ZNFeV0WkssD3Lml3v7q",
"kBaYtzZ6/TzVaDKxqjWA/LgSdaWKjq1QbfWkouwiaIGCWb22JNo1ll7vIZmIKQ727qOVQR6MyJxnK+EZ",
"zRx3jnwvhibXDdahofTbviDhm9NKkoOXJFX8uWnxAiMhU+Sfz8cgdufoEdZpQaKVAJN214qQMYGRcNft",
"y4EtxIccz2g9lfC2rrura2TblEli38WeW0mlfELJtibo7nMxpVxXyMdUFlI59leYX8onuv1UNlWJppSF",
"62WSzb1MlO23l0cDWV+1lUY/iTSyv2u1suhwZJHC+NuXRH44q/OU8sOZ46OgpBuVzdGX4ewSBdDWGtSK",
"oZeNZ/LhI/StXIZ4y9zMVcwg6YD2eoeg7xmzx0F68DpsNgWOikImrENTQMa8lzaUBLBAgTD2qtbPPr9d",
"8rU0nPxa7WvAA5/eQzF0RaR7BRQXSrNVIMn6b/eQUqVBWzx/3fRzqRRWzoLLcNb8GBCORhVpzZkHBBae",
"RAbH/Qn7+Vx1fNm0Yw4fnE9Ul6CXuya9jCsOh7CR841A6o9N4yt43aTElmamFf40RSLXUXTqOldrMuau",
"MeKFvZLAmyZjSgM7xAzGJ5/deMu9LMXLdEktte/2tsGJ0Qshv2jAb/wELhXRsGW2XDbT6vxLAZ8NBbNq",
"vjqcLExb8jrlCGhyuEVpfpFcMdP2nDukc07wyQqsV3HeHQOfEkYw68EFQH5vFodJVPlwSpU7eQsU5MXG",
"cNgAjhigyLp92mRAW7ynDQ4l0mn7J6EOMQ3LTRk3oeWd/GtiBbU2Osesrz7lueoY46cPqVBvbgXc2J11",
"JZQ3utqdbpe9VzgBNTTU8rX27qflts2ekscYElLnWoTZ7skujuxSnc1AIRcUzMaiz4Ek9N3RMakgZo0z",
"Ut2TlpU01zoNmjbGRxHqkfAB1iTDc/o3Q4e3q+aafoQmtFmrT+Jj5ld0M2T4wCMxS0M+kf5RrQ29qDxS",
"iuSoVZgh/XGdMi5BRu12xN7qiAwBktYVtXCbJozipC1/bThsNmOmhgxWdeBYeEvxynI5lylT2tXMaaZN",
"t7rX7gkPcGnlnEDbNU8/w8jgI1za5DXJYErdl4cX2DYfJpcVjQGULtHDixVBzGLQ1kjlYwPhKAl4HKUw",
"fL2Iqwfbz5dx9GBT74GbhwqH6uRRQSxZBiG4dB6Bn0B9HiH4DSwiH1KR/QCXp29Y09NOl/7rjP/rjIr3",
"6nxDnzabbihbBk9cmmYcqqZz1nh4+JmGVoq0a71rArPPpaK0MOSub0Jm4xp0kPYKwBDAcFFjFhaJiV/E",
"vYdTQhObL+Q9fnbv6rP/3s2sI8GfQj2F31wIPWgo5cj3pgGf119MjqeJ/2B2p3ub+KKGEcSZTMCVQoH2",
"+YkFA11+Q+GAX1I64ObioY2+2DP5wNhUFRJ4w1LCBYEL/Qq3W/adGzKUxNk5FdckNbhbCR/hZ1YoGALs",
"FQpxYYhh5IPlxsVGpBSE+p5aAkZJMOTJibdVxMO67pQQTQxpMMtR0gqpvRVSI0ap25FPzIxmaWPltjkL",
"O+tHuGyf9TJj40q3dYbs9sauu7E7wva7ST4Qp4HxnOY8iJsdzSN5xPysRzNHwL4czZsxq3HgWq3+Jz0w",
"v7P/9p4QmffkJ2bdrg0/AgTwwzOoNBBeAALeQ/IFkflEsn2t/JDsoxcfJZB3/Xb5w5/ydNNWScfAqKI9",
"5fO+bApmrHm3qyHyan5GwSMisGnAhOyldwIdsq+t7it9PxV8rOT1KbHd+nrqwiEyWtxSDASfoJLW2+cs",
"JeqBo8Qu2IHj9kUjHDi4qwQ2CML42WN7z852pPUCYvfOVeRbnVyAAZj6sBcDAntsTMoegtdW0YuFFJI/",
"9Pi/n7mI8SGBZWFzwX7HqRnJRtDwPgfrvZfn+mrYeik6Dv3kr5UtnEL2Wbbk2IwTYUauJl00v4+1EfTN",
"OOFwougPhRO2G+i/mlbwYqH+lpzL4TsYzhUh+I05t+rkW8DFlDFfoxuk7KVn8U/sa3uDlNSo4GOlG6TE",
"dnuD1N0gM1rcTJCgGO/4O//DQgl0gADCuY/DRV2QLaeGH0MVFMs2wcY/75R3f9sK766iA/4cXLtHuWqv",
"DKlpUybNbUwDedGVhGyRRqo0iVkE/Bg68F6IgO0qv3y77JRfgY49SXllKb00erDYt1Z4vbDwMsqVFYRX",
"ldYTxeECkjlMcG9BdVC3vnxR1sURXVIfvLrMlDdp109ish/iokDgN3Ic+QAVqKI4UpM7QBnLLVO+NFNS",
"DtDsy6ZuIP9OYAKt2ZC1bsyB/6C9Doj5Djuy+ZCCVbdvD8nR3moZLJxHGGMUBq1M3CeZmO5OWSJKzllV",
"JmZPfTau3nH62Fjn6z0CBF7Shm1ejX2uTruJHAy1mNxmpoWUzvYg20IRll2V1cjzWoNgAoWdWz/DghVc",
"xU0mbpm3xSX/dVWJK3r0otBH7rI+5aTs4PAONgknpSv0DevRpps81qFltUejwm60j0c7z9qKfeA+VCea",
"HNMmzhOczsPwofycyj5/4V/b51SeY1LFSZPbQwHV+8QOO6p4fBuAhMzDGP0Henzi17uZ+BMk89BjFT2A",
"74dP+mrLfIOYHshZQD3P2Me1GPEYExATIzuO6Vd+jl33EzJ32GWlyJC3WD7bMICuKUJZz0PkzFcnZxo8",
"qNzDUCaOlRxW5hB4wmvEDznB1Fg82YZDN4kRWTL8uGH4gCAdlBVF+qrSA0NpfkZJCHQHVqaDury/46tx",
"kQALAjnArRwWcvhqPFRR1UASF7HcyuK9k8VlRkgl8dV4jXTDhYF1DNZGYzAE5PmrMsvw5mg2P6l1VEVx",
"V1uG3iOGNnKeJUdXnqiiTmdvF09WonT4ob1cbd9coENMM5tBWs86tzPto8o+PKqke7PpZ2ZdVfVK1s0K",
"qDvTJWeowunNCfFA7Hjdfa3svk2JIbZoRfnQSoSdlUJVafEJ8HqodSJCPdTpT3SjV62yXS0nanMC9gmB",
"i0gkt2RtFfFhEhyHlgywlSBVLvEIM19pIUI4Efj7d0F44Ue8OkbZFUPHkHasyB3Gkiza8jBr3rLwPmYz",
"i5NAbFWNRzsKooT5Q/DHXd1yn/dCU2lzmVXIF7bhLyFQsjVV2gJ4M+EsUCdc3kMy5sO2ouXltINmWXoN",
"lgYxXHuh2OcLhdylrUgNAvBDDxNAagyGAD+walDCUlhjJZwA/DBmg9qLiOHFj2gbTBHRgEO1uG55dA/M",
"gCY22EV6JOE103sK44eqZBGZA7bRpan1ZsqCSTgqvjCkUoRUVfWkyEgDXnhHR25H+9y2b+/nCvmvnsRQ",
"DGJioZ/+nTzHPxwbOyrGq5nZa5SCUG5ty7n791CuMt5KhyWjiuqHNHpCcuFd7SWfnQ0//WGZYaKteb2R",
"DNVSe8jH6K3uXSkRzQ1BzWtRqNV/NSUplJK9bWEKpTCFghdcY9DN1Vd+uTIVOrity9krtt4cwbSX1L0s",
"X5Hfo3I4cLUpqYnA+a7+s86PJccJtSewINNDdmspsL4eNBWDB6wmiO1aNbNA6+ZijuvPvyDVx/R38zS1",
"Oj8fs8fI2sck/mTJGVoF+qiGr4ds9Ja5X565sywmN0oRSg7jOu9OeRyx7W7N2jsya39RcR/Y5A/JNqmp",
"yrA5iYPnIIJb0iPGbOxW3hyMMsE3rNUofiCNIo1dET5DlZGholI7Y3HfT9/HsUbXqGJ9FjjJXVkGsrBf",
"KwM2DuAlwMQZXrCE9XPo+EDuoClNEcBk6BnzFL060+Up2oGPbZOCnqWyfK1JZP98a1aQJfaON3ayEFu9",
"TLCWdhrNT5k4zYP3IPFJ581JNycqdpFCLZ379SqTj3kmtenSYRPoJxWfzPkcdqF2tY89m9e3NpmSMR2z",
"NhjoXMY1TAFx56XHniqN6XCCgbbl5aC8k3Bk2Lrti2iS8lPJph97IsVS8z1V+kZJMPRwLvXsWggu59tt",
"aBASEUjt61FNejRONrt4ucHHbhwG9RoJbeX8HU4zoEiMZrNa94nzOAx+ajXlYPK7phuLPDrtDJJUJT6q",
"SeNturht4a5LZ24K3lWdKqWdklF8k+loh+ZTHWaG8oqcudOlcy/y8m4sda8qRbB9+t7pcnsZfBWlYMc5",
"fHPIWENDb49djZZeOue2pK7TQ/f4O/1PT/5qV+aufBBbP3xQwjnwonfp6k1g5TC6+7J3lvXptJvY5gcu",
"1ovTo6nZW0WeIL4+d6seE9dkrkN2T9pjztrS0dkem4dg2G90WG9EPtSVl2SzpjNaC4cDrzW5X/JhW9Um",
"VQEx4QYOK1sfpQJewtHGtlenKqjFIFtVoVoOCLbchiiwU+XZcWD7oKe+Mta7KbUGs302mLFH5AbWMtZ+",
"h6ayfbTjRSCmSDO4rhTA4o2/qI8ZO4JPkyJGC5twEtkuXH1tfBZLRJBgaFVvUbZdxbo1Zn2FnckGuAcU",
"eFZQsYaNQfqIAq8emoM3phK0gA64p4CWnKefAJaxzOoSOmcnZ6e9E/q/ycnJG/a//2s0VrPufTqBnnjp",
"sdqjUHRsq5FTiKfwPozhNkF+y2bYJMwVWL5HAcLz1WGW/XeK500BvVFMb+9xoGyJ/2mfBoq6Y2vh2Iq7",
"9HbeBJiHtE3+fuAI0OhBl2d/NaG/ZSDEIVegbtXwVg3fvRre6patbvkiIVB4zYrtTAC1lUXqz/ctVE/P",
"znkKqpf49HissRqmLVexH45l59aKuM9WxO3di1ICOCjPqVaZapWpg1GmsmVkonojttkUJCsGT620Gpi3",
"GiNZkjCt1WGzWolBA9iuXnI8TfyHXuaJqI8oepv4D8KpbUOKCh3xcPwTt+SHUOapDC22YUfT+q3ZbR2R",
"yjWZE8+pJBan7VoJISXEW6t93rqk4O4qNZKCN3J+iaHs/esGxcbhOFftVGzINJ0NxIbYp/0VG3JNNWJD",
"rKMVGwaxUbvP2xQb39M/e6WckbUREHqQGwqNA4+D0ODAWM1Ii+q9DY3Q727r8FiMjTDgqZnHo4E2aqIk",
"NsKAB12h+KC4b5sHcnvXP/QYim3Lkepoitx1YEOS5cADLfZeuGwr9qIkXRrUR83IqJz38WWvLLUSUg32",
"+CmVnwOo/nZbdVnalKy0u0SlKTSfs8wtVWWsHOAE8Mmcv8U+fYuIhzqcolf1mUSqc2ZWgrYj0cixvWpY",
"mqgcbdz8ncrGZsG3aq0uM/ytZNy9ZNy7QidC0FVR+XZSZymyOOfUo5fHUjcQEtlew9UpRq0U3qUUljuw",
"gmZaodbtuWKqSuBWMW3Fr0n8CoWkTifeuMjl1fN6bpgEpCZegrWRuchl2UfwCJAPpj5k0lcRN3r7wntI",
"eHU+fM5mPHjRW5cy/sBLRuQ2a0UzJScVTj7tC6LBYTqHpNUKSeTZP8EwxsduEsewmrMxvx3whg7tVuLe",
"Wwzj95Cci8G2SHd0poZ0xiBuCxC/fAFi6CYxIksmxt0wfECwn1DZ9ddXKqoKSYfy5CbJnW2/hoxniMyT",
"6bELfH8K3AcjOZ+Hi8iHBHKavqbzO9rziE7E7VHv2dDXFJfncvgCgb86Oat5e3XFvF553jkEHjvcvnf8",
"kG9Gfh+KYv25gMwc7uQC83NYog8TEJtFwZh+XQ1xrGtzrDF4to8zBl1DhIXhzIfboTc29A9Obxx9G6a3",
"DHE/HL2h4BERWF27CbNoJqkN8w5M6bY6vukIE9Z3KOba4imuTmTlzO4jLDcmv8BWX7Q+VllNngL2Msqb",
"aG6IOdo7Bq4LI2K2vPXZd5xa2MQkJWpTN5/36WzHnsQH5xMphiSDAaiC+vjKdfTXekyl5MWxXdp7e/qK",
"IatuUVFJn35vRl+8T2dbdenp4BugL77ylr4q6YtjewX68sMZCsxkdRnOsIMCB7Cz8ahCwbhkA23JOYMe",
"wXT8ekLa3T3aD2cz6DkoaK/PL3x97nZ+Ozvb1bqjOKQ0wIy2g4AgsnR6ziPwkccmo5simqBg5kA5klnh",
"ZYStv8p3O996MKBT9WJAYI/ZwKkOzd9qdMwcJqSGm8OE2LFzmLy8sUowWbhnhbpbI1WNNs2ox9Y+tYCL",
"KYzxHEUN7nBKJ7t7HD8DP2XdRFKKrRK4ftLmFzoVRe2lbpVLnYrBepKMAMZPYVzhSpHmYqcdHNm+SqTe",
"yDG3pySdz0EwSyfaJ23JZZB5KaJacd4qTc2UpmpW55SfZ8a19akYzqgkjquu3bwFrlSpUk+pbfG9BGOf",
"OF4ir31obJl+MzclSeWbuSxhH7gPW3mkGtOR9/iNqkaSNny0eoQxFiAY3Z/oGkQ76QKFYfyo0dKHwX34",
"HpLPYtCN1iRWIM0yNJ4enRyd6HJAKp5Hf6Vdv1qUG55ULLbgbVlB7F+gE0OSxEEOeYWbDhWzSRBQ/kmn",
"+NaTQ/bCiKecKrPAE5zOw/ChJxzRjr+LHyzC3+lRJ1qXHdX47/aR7WIgsyNYOtGO/cAsQ8UlfO3B9vLG",
"iWJ4ukqmRu8v0eKrFXMcCzzbmClkU+FXX8MxQnHDtoky95ZvNuM/yaHn7pMCNRQzVRlXKFbSOiACO+l2",
"tey5R+zJrDKlLWrKoylvsj+ea7yveSutYzVzzrTiOe5kWuWzrDnjD8djubHvqFhxa48sOSWXAr7kBcXs",
"g8zU6vrKj5WEbJ92YC9oeVtR/Llzw3RWCAwkEmW7i4Oy5DU1KL/lNEPNxXWYrXCaFIN7rBKBNavB2uBe",
"tJcRMk2SaKUAtgF6L5w5QhCrQjErxsd06zQse05ooHL9DIFiKwaHtbz10rylRqGtw1g2ap89dzXTA/eC",
"wTavC+aRYRsrL3KS5rhs18qhlUQoqoetPDAqiOsxZ42aaFUuj25Svi5eyniP6UuH8aRsUB5vH/hZU6KC",
"F5jYQP3g1asH6wGbxWESsbofGQhyo4ygsE4f4bJTmwZky0JizVpc8lGpLce1h9rESvW/GgkumZrI6Nwi",
"s2o0TRa0Uo6gvZRcEw27HDnDe2bdxgmlDuh1GVf5gEBMUp5C2LmHxJ1Dz1QdKhP8e65ICTJYMfHQi6Ub",
"UuBtlGeozS7UZhfaQnahRqJZyAZs8aqVO8mtxLLwrTkgE8yPIJe3LOWkw9R6qmAr7/ZKBcxIcVUVsOj4",
"N4UghnHq+NfVugIyTzIuD5LY77zpdJ6/Pv+/AAAA//9+b1jkXTUDAA==",
"58btKun6Pp/y5EltWOjK/mb6ZwCRk6oUenlwYXT7GQW3l0FsGjIw1phWg5XsAkplh+fuIZDN1kvEtIGb",
"L0/zm3G1NdfztvJwtQwXVeIDv6qsqQRql5k0Qh9NIWn9myHjCgXJ+dBCHRHMIfBgbHe687bFTRTT1uJK",
"makr1/G1SkT1FYGUDyTtpoHmXVM0pDJOLti2qIRaZbSiS53SURhCdWhMMFWRTUiUX2sHKqAsHbUmt1U+",
"+NSfUR1vvlDxNv7QP+106X/OXv/O/3h9etbpdj5dvK7GXhrPqskiq0xkHxub9mIJTN3Qs6hXlxthIDsx",
"d5pZAEgSww9r0zEd2knH0wpMNAtYcSU3hobLK2bfGBumsgzNAqsJigG8KaIUPOlXXAStlkYGCt7TsOLB",
"/2HlCscDFhTD/7gdXVaTx164zkmdxtIhpnyYKWh4P7gajJiMeT+cfLh9y1ziRsObAfNm659/7HQ7l8Or",
"QZ85qn0e/h8TfrLb3uYDlis9Opr7QUiTXusL0fpC/Fi+EK27QvmRYU2j5X4b3Q/G5tvwPbnmAVdjHRZv",
"umtZiFnrzDycXXHyz7m519X05Va16Smn4QUksuRAwSE2Cexf8EW6AjwH9RYLNW6btn8Xxhp45OMKy1Zh",
"EyLDGmZ32fzL/PpO/xwcvLnUK7XODuW4504OJxLdErLy1ubVgfz2ejWRJluo/ahOWQXsS71QqNpRgycK",
"A8Y39VzxReceIVFkXsyOAuUKTjxqRGX/vSh/plXJhdbPizJstBxhI/OgKOigVX6T2JCyWfZNYr+RUUoY",
"D+i4ur3OoYQnxDIn6t/UIrHd9ZnKVZENnJ5izvDeCULiRHH4iDzodR3gxCDwwoXs9IR835lCZwYDGMtr",
"jEpdZ1vDeHM0e/tJgKvtza5JOYWzFtlUapmzQ+/USpEXP1aWilwXI2OKS/sdMOwbez4EgZdVQ4z5UKtd",
"+ReQzEOv0WoF6J94z1S3Pw89A9V+mExuZJ5pN/RSCo4F8u1j8+8AD85nM+cm/mqJ8GoSEqisOeclzcvW",
"1km6tBSwMu18SrcuM3ZNOt3OzfWY/ed2wrQk0wnJA5ZwVTQTFu8nvGaRCwIngjGlqyP76nHP3Q54BIhd",
"Zs0Z0nLJg8rTwm/QTQh03DAQlS39pcEDEeGI3ay12bAo1aE0zx7AGM0C6DlZJ2Z5ur0dXjiCfXZ/o/TB",
"FPq4uqwna8NYKuerwY8BO1LkApWOo9syH2DyAYKYTCEgVbaB3FaxKq2svgJw5rJ3/lZ+dnJ21js9652+",
"mpy+fnPy+5vf/jj6448/Xr3+o3fy+s3JiX36EsCZmaoHA0zA1GfGtj2EdAG+mQl/Ab6hRbLYHANsX+8w",
"6xsxdGFamxSbcrTQNjwGhNemC+NVCHiUn0tDw7Go6ZPV9MS1+ZSwk/VywkDdhwaQFefVQpcElGCGwX1o",
"x6sjpQM9dP3QdE5huADRPIyhQxsJMbEimsdyrDGbTxcCb11cIps6zRpzPhl+5oWs0z9v+rdjQ4CuTVQI",
"R1YaEcLPTWMmLXGSc3lfALLemMd739bpxrejS83wTVVl1l6r5iiivHTKV2a8lTmSaNdNO9lUFKfmRalr",
"Jq9O8FmBh5d/wTReClIgR3nmL1SmBsEsEU9a1mJhfPER82ORd1YKO5fT0ujVNiGRBt9IDLQNsPdgHra0",
"OAaRqpxeX/ZZZP7NPycf2APJ5J83g/H5aHjD8ozcvv2n3tCTMbTqATG4fPfheszj+z/1r/o8b0hVHfwv",
"SpX/gi1TJVF98Er6i4VvbLdBLVB+7shqoPoakn+HU4N8pV90AFmR6Z/hVCfPd6JAGDEnK8dpdDgwW32t",
"qZERaG8o1e9MwrcsuzhUrkA81DQTF8qbkERmpfFXczyk/voG0SjeCrj6qKuIP4NE+c7qjWvcGAKZ6YIn",
"XptBgoW7YtrVmdG+6ZGnGKC1CGMlkcckBgTOalNCKxBe5vo1V7QzXTpfLbmYVPbVWb19Qk5dXE1Xi9Wq",
"LRpe6HLfpQAOL7Q4lL0/oiBnEXh3e3U+GTJpe3E76r+9pBrWRf99pYCkg8hjtBEFs9k17CW/68/mtQLv",
"dnys67X554r9NCYMYkzyEVbF0JGQAF9HsSmPPcClwTlGDk/J0i5MT17GgIMj6KJ75GaTOL9EAGPoOY8I",
"CCflX/VcYUREA8+p7NcbpTWJE6gZv+4hUnVBSm/3pycnJ0aXIu0weSeghv48jRb0dziVYsz2HDdk+187",
"pJWfiLu2gPG5xdX+ZUDIecVs0sNFdV7QurmY60u8XTYYfKL0KvudNFRJjJ4r6ySMzgZSfVIUsL9WC5M9",
"uegp3iv2h8IoCdZIplse5R2Cfu7cV3M1ZLSck2KKZKyZZCy9clrZ3cruVna/lOw2zPEDivYKt74VRDMb",
"bUjgwuwoaLiv1Hc2llgbs/xX1VlW13SdylJsbTxz1gYGNMj0Yh7WYkICsahuCZHKqHXUU0oPejO4uuBZ",
"QbP8oJrUr/lEoWlO0bf984/X797VnpJs2pXuzXmBYibGSV6cFB1HwuBGkfwlWGmDsTuHXuJXpEc3dF77",
"OPpSTI5hKWBqNhvz0tlGd5pcTo4tsmNVESpcuwijkYCl2W1CR3Koc96xTgstNC/NnzGENqNwVfJmyXTa",
"j4K5tN8kjzZPCV212AmY6dDrc5VxfZN/sOGMGsKsyyGsoh8hFM5jepG518sFLUtzvrxDBm6sm5B5kWtn",
"ZHLkTrw8bnparF9hc82ggDeN5IVp7MAqA6f42axyz9UtPfoyDexOvEI0RzPPK2KUp5t82aoCQ9Fmiyyb",
"e8Kw2RD11YMlqbsHiU9uKlPriEbGFDtWjwTiFvkn5gfvwlD36M/x9ZXDgS7Hn7ARtIGh8lnwhR77wtjj",
"boUWaMBC7ZigBQwNFVEwQe7D0uRHQr85WDyr2L0kKvKiAdsyHezxtPBSZoVjpc+YZ/zRofwxo2xz5k6b",
"BT4p79m27xaNM6RaXwPlsiRh5Ab6Ws/pjKw2+TbUhD73Yk92hXDuUJE9ChVKycaQ+Xqdm+tILMC3mhZP",
"zZR9UzEJHsKQUPnL5CeHcApBDGOZxIJhlB0r7OdsU+aEROzaE4YPCMrmiO4q/0m+nb/piFjcrK/IZ0J7",
"J5iEC8vJnpnE5z49Gjd4PovTvxmyGkeE2cTyv6aE2Dk9Ojk6YXTMo5E7bzqvjk6PTkRgMcMECx72RW3Q",
"mS7S4718nqetAoixk9pj6KYDWdGicym+v2dokJ75bJazk5PywB8g8Mmcoeg1/+6GARE1vkQRYdr0+G/M",
"+QqnB2ANHw/iOKRS+Jkd1eqcVyFJ15Ejjs6bv752O1gW7qCrzhpKn5K/BMzuHLoPna+0P8NfDIG3rEcg",
"bYaqMDiSDfYdhWzBDgkd4LowIg6Jwf09cmsxmmKgFqWPp8fApyIlmPXgAiC/xx6S8fF39rP62zPHiw+J",
"5vZ0wX7HDkjTO9HuDuvO36ZLu9CnLQa0AXO14CMwnonBAhKmD/xV4eRTmsERya07b3hAfyo0SkvpqEKN",
"vw9kO7ZeIdavJXr6rYytceK6EOP7xPeXDkepl8uNVULec7fz264or+8sgE+xAD2HpU3yZPwMB+PVxsHQ",
"QfEujKfI8yC/fWT0zemkiswkxU9YE3pYfevFQuVgH3jfTldDGF/ZtZe4mtTY/Lq1DonzEX4MEmf08Dbk",
"8ngjxMCxwzetgLg0AKtMJpXYIqGTSJznsfGsF/sbWYh2CTrYc2KAA9qKAUsxwKlle2JAPSAj1CPhAwzo",
"qSj/ZqdhFOpi80fwMXyADghYhj7WWnhrpTMWxESEJrSVNOjQ7jZSIh3eIBMkrHt13MVseYLOGXQ/NlHj",
"JlQtSIdu7ETsnCTj7LcqSk63PEfBrh8m3rF6Qzdr0KWUZ/LawwZxUIAJCFxYIuJz+lm6l5gV6+3jlgHi",
"JEEay7o3BFajtXMEq+/1Yus/KS9s33pyiF4YcWcXcaIp+83N4cff2X+fq/abSinW6qi0ocwqzjeyVhLx",
"zMAm5YR93akQ2txmixxBNYc3L5zxKMQaxwbbsVa25UhcwUxG3hzFFVKN089XM4Uf14k1ti2pVKuh+YtU",
"gP3sdH/BSLil/f2i/QVc+Qw3nt67O7hF6rAmNJUeiQdykG/iCKdjHDM7Pd8lbNzxS4TpBch3cq1NG0xb",
"D/MNt7bbdC6x48qUDTdfpnLJrW6fCCHderYRhU0o739uk8MAkZBK8+PvnOOfj6M4nELz5VK+fTogV26A",
"2XV5uYJcIL+Z4dOpb0JMRklww+a1t02ZDr1Ucu341KsgKJGSg9MTw+/RTk+Fq5CwtPNhjP7DU5OL5Dw8",
"aQWP0iyZOQlAPvQcbrd32PY474Q8H2bbqj84cmSGfeA+HH9n/7Gw4jtj2lCpqpGnHPZVZDmyN9rnxjQS",
"DwNxL63zeZzsk2pzuhswboOMhPnEr3czMU+exXIQAt8Pn+j0uheBItVK0ct+r1KxONHlOSbAx99xgK24",
"5WqsSv0yvwS4AZvkBzMziji5945NCshoGWUPGaVEsCmrXI0rGSXAGjaRiotibdKrLnReeSUusUjjt7EX",
"0z+6ZkMAr8azkiVAgeHs9escEKeb0IGiOKT/gF57hu0Ra5oukawQgQOiSFJ7+VjjbQr8SMDUh8cemOHj",
"NIe58dKI2a2RtXPIHBBnCv0wmKlZBdJ82WBWvlJ+Pr0ArMboRNTNrjeXyUzVWYIWnjuascy/ExgvM57x",
"wOwOedXH3LYiRKzkTgHel7r4WFPvxgqfX4BZWjBemzqrQg7RKeXrH5v157YSdjuvdyX86C0ULSIfLmBA",
"SroBM15IOkifzgF+0EoY1vD4O/1PzfMSL9kwXXK+KQoQOoGlqZ0Xojcd+hTQHR/5+Yr7BqEga/arsJRi",
"obZpxy8Up2hkemNY/dn58zd+99n+rBO16DrVFO7DhCdp2hMRkfFzSUSY7wzERoQc++GsTlfxw5njowDK",
"zEcCjqJEuQxnlyjghUUOUaqILE8kdFh6Mme6NEgW9rmjhQYFhFUSLAddGjK/xkTkXQ6dGSQU1QzLhpkx",
"4pZHzcwVqRsM96Y0Pb7V1ElAkL+BqfsOlXc9Ar8RB0MQu3OHzaQU9q1YP+ugE+nVa2UUDB+h/wv+lU6E",
"AtdPPGjaX9oSd7TabrXAlyxAB7BVbj2Z3IYCxqJUzJTHPt9Nl3dppxyUVsCVcupYHbJW27MHR64qhBoo",
"xCKKtX03z2ulqeRXjp3LcLb+qUP/v5eFDptfV5WKY8aDJy0o9gMcPfgBRSbmv7/HcCPnzlZPuu2r1Nle",
"r+Ag0157W7U6J+N0EmZ9FZu1UEz0LvSPPThNZmYj/eAR+AkrXOScDy4dmFbNd8AMoABnhcBEoVsPEHCk",
"kYfn0L9gUx2KS8HmI1o+n54PLhkSagJYGCYxFYWs8C0VE3rk7zSORQVfpl2sEXVQUI+nWUOr16gvcdNk",
"VmIxhefPB5dmlrfidQu9hj8A5EVPWp63yM/NdJt9fKP7kfQbzY1WGvMf4BIrFyXjtLRd8+slIwMRcV93",
"sTwPA4zoVVKQGHtkCl2WecNzwD1hOWgQdsS1fZvGhmpYpvA+jGEtMJsyP7zjW0PCHDQgZoXOQhcxCfqE",
"yFx9iyvWOdbAl6WVMOzslp/J7NeVS+PvLABx54g9PbowJgAFWeh+1TrTbHxwJUNJoYK59eLSLRGrnC7p",
"cYdihz9X6iAWCftedFumSyfLkJv5iLPaWem9xGBTKScQ1i5EU8xBTvMAlz1eTigCKMbOLx5kgo9y39IB",
"zr/e/OvXotiqdIKwM2xhN4yglTzkLW3XxVqvB+9276j299PWAlVngUp5wzJso4GCdsyOYUstjZ/tVpra",
"R7g8FGVt62FMEhdNGYGhu2UGHTM4QnvcAkN8fzztNQhcZb4FBOv9C5rEsO6xX6EJJompA2VOsT/tAbWR",
"0ELcJKwwpRwrzuQ6js0xJVrWnlFcJW3NCftqTigV27VQoGtvn5VTlK6I7DLO5zxav9REs7sCTqYYEscF",
"gYdYnhlJ1xu9PVSt2LnF0GNsxGEh9HpchgcQaXNlb/eGohk7vXgorN1AsEsR00r2vLYl8ZLJdo7fKl2r",
"a3jbOWelhhzgBPBJDGwUzbztz/14w1DA0WHzgMPeb1JSdlhhJ27V3+WbjSCPOtYTZacUgNsn6V09SV9l",
"r9A5hk/5M+VNe5631+LYBYv/bRPeCOokRePEnfulxgluRSwu25Nr0V+2Ukwc5m3LUjTIWM5WLLykWLBl",
"/a5CmPTorwjFSBV4s8GEz3bIFpOUn39yLp6FpD3cjRaTFc7YIqNVpgmuPzYPPOA5d2ymSXZfkuG2cQXg",
"m7TyFeAFkg9byweZb7iVD4d3ylso+8y3fZEVq6tQC4RklIHATpwEjuhZnbeYe1BcIky4F4WsjXeoMq0c",
"CaWgocY/yQLQtYOj6qHZlINS0TbLrL+Bx711zNOnNc3QC3m6ULh51TpGyv8LqykHDECLKne0/Z1sfcda",
"b5XYshQI/I2PuUqllXez+FtDsgHeEAWzO17Db0eQ9zUORA+9R+HTY/FIkHkS3S0qXYle1ohNBdsoCaRE",
"ax43rUrRNsfB/gQws71ZpAeVXYyF/YkbhSgglufuAgUJgfQ6Lv+KIXjwwqcgPYobHMPvIbmhkx/6IcwO",
"POkbrITuCIN1p6tUqT87OTvtndD/TU5O3rD//V+D3BHd+/f8JrKJA5JBmnoOq6CGFL41gL1HAcJz6L1l",
"gzcHd/uyMUdqK0hHxietfNxT+ZjfnY1LSXzsslLg5ig0Xio8zUejk3e8yc/9QMlQwFSVmgJJPMdX6LgS",
"aTuNImOT+tDjecJqXyZl8zZJVBstW5JRBcmwcckUw8gHy6riTvR7pWTiTX5qycRR0EQyxRJpu5RMHExb",
"wRSL1q1cauVSSS4V5MIG5ZJI/WnjfSvTq9d534rs7a377T6733JyceiwdvFrrP0Vbb5KMKSgiXE6iq29",
"VRKdNaCiQwWk1ZO8uIeryj4NXFxTRm7f4vM+riliMrkpULy2l6upiEW6ia2fq/BzFfho8sotmfKFPF0l",
"jTRxdd3H5Oc/t69rObO5Be83UJuYu6v4h52/a63MOHCPVzq5fHuULFzv+5phxQzsbu3Qtvwv/Vlb3t8L",
"V5da9u6q5Fbj0irpV/i0CvXQwLeH7NZaUIB/NB6V3qotjxrcVWuOSRjQU7AXAwJ77AZKN1fsvSWX1fmz",
"1h6LB+7Rul0O25536o+ruEsX1VYw7JHirpEHq5/s+hv8TYhZfg8UuOECBbOUXhcQYzCrOOFH0IXosZVB",
"TWRQkPh+ifKDpROBpR8Cz0GBA4KlI1bb7RD4jRxHPkAFSitOua4MyTwFb2K63QTRcfhCxVzh9G/oVtng",
"cji6Bz6GrWJhqDnGmU7Daqtyt80dXfgL9+IkqHvfyGcMrH3hyDIEtq8c+5+zFIssjlbvHDvL+Mh88EHs",
"I4hZnmtoBd4WAwJ8QJqAsrFqKXvj9G2Zp+ZAIhUoEGkMnU12HRhv2b3/yxySORcAojqNc9F/j+npFQb+",
"Uv09rRmoE0iBv7yTDWqVlGkY+hAEFvEcuQKSFjh7odAOTZlLY4yHRVbfF4v1cO59MGNH7ZOgizBmzhcq",
"GaR3SxB4TpgQ+qdQHTHVHWkDqQceORfwHiQ+z3X/L0oP/3LQvZMEGLJjXLd8MdOdHLRTSUI7q6XX9PW3",
"dRjat5obOY1SVXTl7yP6+5qvUKqGe+whHPlg2WOuEjX6rmhLhxWuFeF9hRJcrQNf8MGYy8VB68OKaMXp",
"G1YOKSJWUqBPoM6sCCiy9EVKDW/Z/K4lgVZ0taKrqeiSfNKjfFItuXI8yrQHfbL/LLVdheQaiMGG3uEK",
"rvae295zf5J77s6Os0wutKfZj3Sa5U6PnZxs4nptDvmZ8AbSozR/Ya84ulrX0lOBOgUpNc/UOVIgofDd",
"3PX7tKI1QwKQj5v5mKoU0r43FV0+Cwy0AQbP8zPz91R+qSkjkSc5EHjMkSw9/0mYXiVFoaT/6XiMKP6n",
"40SGx+iMfixdznIwcNvmjPU0vAAryzvYPIYrcFl7iu/xKV4MfbNk6G6JoFdg8WNRLq6K0wnP8EUSZjjK",
"8/1RLRePZT26FXlZnV5R139M1lavny1L76mD13mY+B6PpaUXSZ3mskd5SXJclRaHfBFZwxI9WZTXZSG5",
"PMCdW+rtrw5pgXlro9fPU40mE6taA8iPK1FXqujYCtVWTyrKLoIWKJjVa0uiXWPp9R6SiZjiYO8+Whnk",
"wYjMebYSntHMcefI92Joct1gHRpKv+0LEr45rSQ5eElSxZ+bFi8wEjJF/vl8DGJ3jh5hnRYkWgkwaXet",
"CBkTGAl33b4c2EJ8yPGM1lMJb+u6u7pGtk2ZJPZd7LmVVMonlGxrgu4+F1PKdYV8TGUhlWN/hfmlfKLb",
"T2VTlWhKWbheJtncy0TZfnt5NJD1VVtp9JNII/u7ViuLDkcWKYy/fUnkh7M6Tyk/nDk+Ckq6UdkcfRnO",
"LlEAba1BrRh62XgmHz5C38pliLfMzVzFDJIOaK93CPqeMXscpAevw2ZT4KgoZMI6NAVkzHtpQ0kACxQI",
"Y69q/ezz2yVfS8PJr9W+Bjzw6T0UQ1dEuldAcaE0WwWSrP92DylVGrTF89dNP5dKYeUsuAxnzY8B4WhU",
"kdaceUBg4UlkcNyfsJ/PVceXTTvm8MH5RHUJerlr0su44nAIGznfCKT+2DS+gtdNSmxpZlrhT1Mkch1F",
"p65ztSZj7hojXtgrCbxpMqY0sEPMYHzy2Y233MtSvEyX1FL7bm8bnBi9EPKLBvzGT+BSEQ1bZstlM63O",
"vxTw2VAwq+arw8nCtCWvU46AJodblOYXyRUzbc+5QzrnBJ+swHoV590x8ClhBLMeXADk92ZxmESVD6dU",
"uZO3QEFebAyHDeCIAYqs26dNBrTFe9rgUCKdtn8S6hDTsNyUcRNa3sm/JlZQa6NzzPrqU56rjjF++pAK",
"9eZWwI3dWVdCeaOr3el22XuFE1BDQy1fa+9+Wm7b7Cl5jCEhda5FmO2e7OLILtXZDBRyQcFsLPocSELf",
"HR2TCmLWOCPVPWlZSXOt06BpY3wUoR4JH2BNMjynfzN0eLtqrulHaEKbtfokPmZ+RTdDhg88ErM05BPp",
"H9Xa0IvKI6VIjlqFGdIf1ynjEmTUbkfsrY7IECBpXVELt2nCKE7a8teGw2YzZmrIYFUHjoW3FK8sl3OZ",
"MqVdzZxm2nSre+2e8ACXVs4JtF3z9DOMDD7CpU1ekwym1H15eIFt82FyWdEYQOkSPbxYEcQsBm2NVD42",
"EI6SgMdRCsPXi7h6sP18GUcPNvUeuHmocKhOHhXEkmUQgkvnEfgJ1OcRgt/AIvIhFdkPcHn6hjU97XTp",
"v874v86oeK/ON/Rps+mGsmXwxKVpxqFqOmeNh4efaWilSLvWuyYw+1wqSgtD7vomZDauQQdprwAMAQwX",
"NWZhkZj4Rdx7OCU0sflC3uNn964+++/dzDoS/CnUU/jNhdCDhlKOfG8a8Hn9xeR4mvgPZne6t4kvahhB",
"nMkEXCkUaJ+fWDDQ5TcUDvglpQNuLh7a6Is9kw+MTVUhgTcsJVwQuNCvcLtl37khQ0mcnVNxTVKDu5Xw",
"EX5mhYIhwF6hEBeGGEY+WG5cbERKQajvqSVglARDnpx4W0U8rOtOCdHEkAazHCWtkNpbITVilLod+cTM",
"aJY2Vm6bs7CzfoTL9lkvMzaudFtnyG5v7LobuyNsv5vkA3EaGM9pzoO42dE8kkfMz3o0cwTsy9G8GbMa",
"B67V6n/SA/M7+2/vCZF5T35i1u3a8CNAAD88g0oD4QUg4D0kXxCZTyTb18oPyT568VECeddvlz/8KU83",
"bZV0DIwq2lM+78umYMaad7saIq/mZxQ8IgKbBkzIXnon0CH72uq+0vdTwcdKXp8S262vpy4cIqPFLcVA",
"8Akqab19zlKiHjhK7IIdOG5fNMKBg7tKYIMgjJ89tvfsbEdaLyB271xFvtXJBRiAqQ97MSCwx8ak7CF4",
"bRW9WEgh+UOP//uZixgfElgWNhfsd5yakWwEDe9zsN57ea6vhq2XouPQT/5a2cIpZJ9lS47NOBFm5GrS",
"RfP7WBtB34wTDieK/lA4YbuB/qtpBS8W6m/JuRy+g+FcEYLfmHOrTr4FXEwZ8zW6Qcpeehb/xL62N0hJ",
"jQo+VrpBSmy3N0jdDTKjxc0ECYrxjr/zPyyUQAcIIJz7OFzUBdlyavgxVEGxbBNs/PNOefe3rfDuKjrg",
"z8G1e5Sr9sqQmjZl0tzGNJAXXUnIFmmkSpOYRcCPoQPvhQjYrvLLt8tO+RXo2JOUV5bSS6MHi31rhdcL",
"Cy+jXFlBeFVpPVEcLiCZwwT3FlQHdevLF2VdHNEl9cGry0x5k3b9JCb7IS4KBH4jx5EPUIEqiiM1uQOU",
"sdwy5UszJeUAzb5s6gby7wQm0JoNWevGHPgP2uuAmO+wI5sPKVh1+/aQHO2tlsHCeYQxRmHQysR9konp",
"7pQlouScVWVi9tRn4+odp4+Ndb7eI0DgJW3Y5tXY5+q0m8jBUIvJbWZaSOlsD7ItFGHZVVmNPK81CCZQ",
"2Ln1MyxYwVXcZOKWeVtc8l9XlbiiRy8KfeQu61NOyg4O72CTcFK6Qt+wHm26yWMdWlZ7NCrsRvt4tPOs",
"rdgH7kN1oskxbeI8wek8DB/Kz6ns8xf+tX1O5TkmVZw0uT0UUL1P7LCjise3AUjIPIzRf6DHJ369m4k/",
"QTIPPVbRA/h++KSvtsw3iOmBnAXU84x9XIsRjzEBMTGy45h+5efYdT8hc4ddVooMeYvlsw0D6JoilPU8",
"RM58dXKmwYPKPQxl4ljJYWUOgSe8RvyQE0yNxZNtOHSTGJElw48bhg8I0kFZUaSvKj0wlOZnlIRAd2Bl",
"OqjL+zu+GhcJsCCQA9zKYSGHr8ZDFVUNJHERy60s3jtZXGaEVBJfjddIN1wYWMdgbTQGQ0CevyqzDG+O",
"ZvOTWkdVFHe1Zeg9Ymgj51lydOWJKup09nbxZCVKhx/ay9X2zQU6xDSzGaT1rHM70z6q7MOjSro3m35m",
"1lVVr2TdrIC6M11yhiqc3pwQD8SO193Xyu7blBhii1aUD61E2FkpVJUWnwCvh1onItRDnf5EN3rVKtvV",
"cqI2J2CfELiIRHJL1lYRHybBcWjJAFsJUuUSjzDzlRYihBOBv38XhBd+xKtjlF0xdAxpx4rcYSzJoi0P",
"s+YtC+9jNrM4CcRW1Xi0oyBKmD8Ef9zVLfd5LzSVNpdZhXxhG/4SAiVbU6UtgDcTzgJ1wuU9JGM+bCta",
"Xk47aJal12BpEMO1F4p9vlDIXdqK1CAAP/QwAaTGYAjwA6sGJSyFNVbCCcAPYzaovYgYXvyItsEUEQ04",
"VIvrlkf3wAxoYoNdpEcSXjO9pzB+qEoWkTlgG12aWm+mLJiEo+ILQypFSFVVT4qMNOCFd3TkdrTPbfv2",
"fq6Q/+pJDMUgJhb66d/Jc/zDsbGjYryamb1GKQjl1racu38P5SrjrXRYMqqofkijJyQX3tVe8tnZ8NMf",
"lhkm2prXG8lQLbWHfIze6t6VEtHcENS8FoVa/VdTkkIp2dsWplAKUyh4wTUG3Vx95ZcrU6GD27qcvWLr",
"zRFMe0ndy/IV+T0qhwNXm5KaCJzv6j/r/FhynFB7AgsyPWS3lgLr60FTMXjAaoLYrlUzC7RuLua4/vwL",
"Un1MfzdPU6vz8zF7jKx9TOJPlpyhVaCPavh6yEZvmfvlmTvLYnKjFKHkMK7z7pTHEdvu1qy9I7P2FxX3",
"gU3+kGyTmqoMm5M4eA4iuCU9YszGbuXNwSgTfMNajeIH0ijS2BXhM1QZGSoqtTMW9/30fRxrdI0q1meB",
"k9yVZSAL+7UyYOMAXgJMnOEFS1g/h44P5A6a0hQBTIaeMU/RqzNdnqId+Ng2KehZKsvXmkT2z7dmBVli",
"73hjJwux1csEa2mn0fyUidM8eA8Sn3TenHRzomIXKdTSuV+vMvmYZ1KbLh02gX5S8cmcz2EXalf72LN5",
"fWuTKRnTMWuDgc5lXMMUEHdeeuyp0pgOJxhoW14OyjsJR4at276IJik/lWz6sSdSLDXfU6VvlARDD+dS",
"z66F4HK+3YYGIRGB1L4e1aRH42Szi5cbfOzGYVCvkdBWzt/hNAOKxGg2q3WfOI/D4KdWUw4mv2u6scij",
"084gSVXio5o03qaL2xbuunTmpuBd1alS2ikZxTeZjnZoPtVhZiivyJk7XTr3Ii/vxlL3qlIE26fvnS63",
"l8FXUQp2nMM3h4w1NPT22NVo6aVzbkvqOj10j7/T//Tkr3Zl7soHsfXDByWcAy96l67eBFYOo7sve2dZ",
"n067iW1+4GK9OD2amr1V5Ani63O36jFxTeY6ZPekPeasLR2d7bF5CIb9Rof1RuRDXXlJNms6o7VwOPBa",
"k/slH7ZVbVIVEBNu4LCy9VEq4CUcbWx7daqCWgyyVRWq5YBgy22IAjtVnh0Htg966itjvZtSazDbZ4MZ",
"e0RuYC1j7XdoKttHO14EYoo0g+tKASze+Iv6mLEj+DQpYrSwCSeR7cLV18ZnsUQECYZW9RZl21WsW2PW",
"V9iZbIB7QIFnBRVr2Bikjyjw6qE5eGMqQQvogHsKaMl5+glgGcusLqFzdnJ22juh/5ucnLxh//u/RmM1",
"696nE+iJlx6rPQpFx7YaOYV4Cu/DGG4T5Ldshk3CXIHlexQgPF8dZtl/p3jeFNAbxfT2HgfKlvif9mmg",
"qDu2Fo6tuEtv502AeUjb5O8HjgCNHnR59lcT+lsGQhxyBepWDW/V8N2r4a1u2eqWLxIChdes2M4EUFtZ",
"pP5830L19Oycp6B6iU+PxxqrYdpyFfvhWHZurYj7bEXc3r0oJYCD8pxqlalWmToYZSpbRiaqN2KbTUGy",
"YvDUSquBeasxkiUJ01odNquVGDSA7eolx9PEf+hlnoj6iKK3if8gnNo2pKjQEQ/HP3FLfghlnsrQYht2",
"NK3fmt3WEalckznxnEpicdqulRBSQry12uetSwrurlIjKXgj55cYyt6/blBsHI5z1U7FhkzT2UBsiH3a",
"X7Eh11QjNsQ6WrFhEBu1+7xNsfE9/bNXyhlZGwGhB7mh0DjwOAgNDozVjLSo3tvQCP3utg6PxdgIA56a",
"eTwaaKMmSmIjDHjQFYoPivu2eSC3d/1Dj6HYthypjqbIXQc2JFkOPNBi74XLtmIvStKlQX3UjIzKeR9f",
"9spSKyHVYI+fUvk5gOpvt1WXpU3JSrtLVJpC8znL3FJVxsoBTgCfzPlb7NO3iHiowyl6VZ9JpDpnZiVo",
"OxKNHNurhqWJytHGzd+pbGwWfKvW6jLD30rG3UvGvSt0IgRdFZVvJ3WWIotzTj16eSx1AyGR7TVcnWLU",
"SuFdSmG5AytophVq3Z4rpqoEbhXTVvyaxK9QSOp04o2LXF49r+eGSUBq4iVYG5mLXJZ9BI8A+WDqQyZ9",
"FXGjty+8h4RX58PnbMaDF711KeMPvGREbrNWNFNyUuHk074gGhymc0harZBEnv0TDGN87CZxDKs5G/Pb",
"AW/o0G4l7r3FMH4PybkYbIt0R2dqSGcM4rYA8csXIIZuEiOyZGLcDcMHBPsJlV1/faWiqpB0KE9uktzZ",
"9mvIeIbIPJkeu8D3p8B9MJLzebiIfEggp+lrOr+jPY/oRNwe9Z4NfU1xeS6HLxD4q5OzmrdXV8zrleed",
"Q+Cxw+17xw/5ZuT3oSjWnwvIzOFOLjA/hyX6MAGxWRSM6dfVEMe6Nscag2f7OGPQNURYGM58uB16Y0P/",
"4PTG0bdhessQ98PRGwoeEYHVtZswi2aS2jDvwJRuq+ObjjBhfYdiri2e4upEVs7sPsJyY/ILbPVF62OV",
"1eQpYC+jvInmhpijvWPgujAiZstbn33HqYVNTFKiNnXzeZ/OduxJfHA+kWJIMhiAKqiPr1xHf63HVEpe",
"HNulvbenrxiy6hYVlfTp92b0xft0tlWXng6+AfriK2/pq5K+OLZXoC8/nKHATFaX4Qw7KHAAOxuPKhSM",
"SzbQlpwz6BFMx68npN3do/1wNoOeg4L2+vzC1+du57ezs12tO4pDSgPMaDsICCJLp+c8Ah95bDK6KaIJ",
"CmYOlCOZFV5G2PqrfLfzrQcDOlUvBgT2mA2c6tD8rUbHzGFCarg5TIgdO4fJyxurBJOFe1aouzVS1WjT",
"jHps7VMLuJjCGM9R1OAOp3Syu8fxM/BT1k0kpdgqgesnbX6hU1HUXupWudSpGKwnyQhg/BTGFa4UaS52",
"2sGR7atE6o0cc3tK0vkcBLN0on3SllwGmZciqhXnrdLUTGmqZnVO+XlmXFufiuGMSuK46trNW+BKlSr1",
"lNoW30sw9onjJfLah8aW6TdzU5JUvpnLEvaB+7CVR6oxHXmP36hqJGnDR6tHGGMBgtH9ia5BtJMuUBjG",
"jxotfRjch+8h+SwG3WhNYgXSLEPj6dHJ0YkuB6TiefRX2vWrRbnhScViC96WFcT+BToxJEkc5JBXuOlQ",
"MZsEAeWfdIpvPTlkL4x4yqkyCzzB6TwMH3rCEe34u/jBIvydHnWiddlRjf9uH9kuBjI7gqUT7dgPzDJU",
"XMLXHmwvb5wohqerZGr0/hItvloxx7HAs42ZQjYVfvU1HCMUN2ybKHNv+WYz/pMceu4+KVBDMVOVcYVi",
"Ja0DIrCTblfLnnvEnswqU9qipjya8ib747nG+5q30jpWM+dMK57jTqZVPsuaM/5wPJYb+46KFbf2yJJT",
"cingS15QzD7ITK2ur/xYScj2aQf2gpa3FcWfOzdMZ4XAQCJRtrs4KEteU4PyW04z1Fxch9kKp0kxuMcq",
"EVizGqwN7kV7GSHTJIlWCmAboPfCmSMEsSoUs2J8TLdOw7LnhAYq188QKLZicFjLWy/NW2oU2jqMZaP2",
"2XNXMz1wLxhs87pgHhm2sfIiJ2mOy3atHFpJhKJ62MoDo4K4HnPWqIlW5fLoJuXr4qWM95i+dBhPygbl",
"8faBnzUlKniBiQ3UD169erAesFkcJhGr+5GBIDfKCArr9BEuO7VpQLYsJNasxSUfldpyXHuoTaxU/6uR",
"4JKpiYzOLTKrRtNkQSvlCNpLyTXRsMuRM7xn1m2cUOqAXpdxlQ8IxCTlKYSde0jcOfRM1aEywb/nipQg",
"gxUTD71YuiEF3kZ5htrsQm12oS1kF2okmoVswBavWrmT3EosC9+aAzLB/AhyectSTjpMracKtvJur1TA",
"jBRXVQGLjn9TCGIYp45/Xa0rIPMk4/Igif3Om07n+evz/wsAAP//ral9pkI1AwA=",
}
// GetSwagger returns the content of the embedded swagger specification file

300
cmd/hatchet-cli/cli/docs.go Normal file
View File

@@ -0,0 +1,300 @@
package cli
import (
"encoding/base64"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"github.com/charmbracelet/huh"
"github.com/spf13/cobra"
"github.com/hatchet-dev/hatchet/cmd/hatchet-cli/cli/internal/config/cli"
"github.com/hatchet-dev/hatchet/cmd/hatchet-cli/cli/internal/styles"
)
const defaultMCPURL = "https://docs.hatchet.run/api/mcp"
const docsBaseURL = "https://docs.hatchet.run"
var mcpURL string
// docsCmd represents the docs command
var docsCmd = &cobra.Command{
Use: "docs",
Aliases: []string{"doc"},
Short: "Hatchet documentation for AI editors and coding agents",
Long: `Hatchet documentation is optimized for LLMs and available as:
• MCP server: ` + defaultMCPURL + `
• llms.txt: ` + docsBaseURL + `/llms.txt
• Full docs: ` + docsBaseURL + `/llms-full.txt
Use "hatchet docs install" to configure your AI editor.`,
Example: ` # Interactive — pick your editor
hatchet docs install
# Configure for Cursor
hatchet docs install cursor
# Configure for Claude Code
hatchet docs install claude-code
# Use a custom MCP URL (self-hosted)
hatchet docs install cursor --url https://my-hatchet.example.com/api/mcp`,
Run: func(cmd *cobra.Command, args []string) {
printAllOptions()
},
}
// docsInstallCmd represents the docs install command
var docsInstallCmd = &cobra.Command{
Use: "install",
Short: "Install Hatchet docs into an AI editor",
Long: `Configure Hatchet documentation as an MCP (Model Context Protocol) server
for AI editors like Cursor and Claude Code.`,
Example: ` # Interactive — pick your editor
hatchet docs install
# Configure for Cursor
hatchet docs install cursor
# Configure for Claude Code
hatchet docs install claude-code`,
Run: func(cmd *cobra.Command, args []string) {
// Interactive mode: let user pick their editor
var editor string
form := huh.NewForm(
huh.NewGroup(
huh.NewSelect[string]().
Title("Which AI editor do you want to configure?").
Options(
huh.NewOption("Cursor", "cursor"),
huh.NewOption("Claude Code", "claude-code"),
).
Value(&editor),
),
).WithTheme(styles.HatchetTheme())
err := form.Run()
if err != nil {
cli.Logger.Fatalf("could not run AI editor selection form: %v", err)
}
switch editor {
case "cursor":
runDocsCursor()
case "claude-code":
runDocsClaudeCode()
}
},
}
// ---------------------------------------------------------------------------
// Subcommands of `docs install`
// ---------------------------------------------------------------------------
var docsInstallCursorCmd = &cobra.Command{
Use: "cursor",
Short: "Configure Hatchet docs for Cursor IDE",
Long: `Set up Hatchet documentation as an MCP server in Cursor.
This creates a .cursor/rules/hatchet-docs.mdc file in your project that
configures the Hatchet MCP docs server, and prints the one-click deeplink.`,
Run: func(cmd *cobra.Command, args []string) {
runDocsCursor()
},
}
var docsInstallClaudeCodeCmd = &cobra.Command{
Use: "claude-code",
Short: "Configure Hatchet docs for Claude Code",
Long: `Set up Hatchet documentation as an MCP server in Claude Code.`,
Run: func(cmd *cobra.Command, args []string) {
runDocsClaudeCode()
},
}
func init() {
rootCmd.AddCommand(docsCmd)
docsCmd.AddCommand(docsInstallCmd)
docsInstallCmd.AddCommand(docsInstallCursorCmd)
docsInstallCmd.AddCommand(docsInstallClaudeCodeCmd)
// Add --url flag to install and its subcommands
for _, cmd := range []*cobra.Command{docsInstallCmd, docsInstallCursorCmd, docsInstallClaudeCodeCmd} {
cmd.Flags().StringVar(&mcpURL, "url", "", "Custom MCP server URL (default: "+defaultMCPURL+")")
}
}
// ---------------------------------------------------------------------------
// Implementation
// ---------------------------------------------------------------------------
func runDocsCursor() {
url := getMCPURL()
fmt.Println(styles.Title("Hatchet Docs → Cursor"))
fmt.Println()
// 1. Write .cursor/rules/hatchet-docs.mdc
rulesDir := filepath.Join(".", ".cursor", "rules")
rulesFile := filepath.Join(rulesDir, "hatchet-docs.mdc")
ruleContent := fmt.Sprintf(`---
description: Hatchet documentation MCP server
alwaysApply: true
---
When working with Hatchet (task queues, workflows, durable execution), use the
Hatchet MCP docs server for accurate, up-to-date API reference and examples.
MCP server URL: %s
Use the search_docs tool to find relevant documentation pages, or get_full_docs
for comprehensive context. Documentation covers Python, TypeScript, and Go SDKs.
`, url)
if err := os.MkdirAll(rulesDir, 0o755); err == nil {
if err := os.WriteFile(rulesFile, []byte(ruleContent), 0o644); err == nil {
fmt.Println(styles.SuccessMessage("Created " + rulesFile))
} else {
fmt.Printf(" ⚠ Could not write %s: %v\n", rulesFile, err)
}
} else {
fmt.Printf(" ⚠ Could not create %s: %v\n", rulesDir, err)
}
// 2. Print the MCP deeplink
fmt.Println()
deeplink := cursorMCPDeeplink(url)
fmt.Println(styles.Section("One-click install"))
fmt.Println(styles.InfoMessage("Open this link in your browser to install the MCP server in Cursor:"))
fmt.Println()
fmt.Println(" " + styles.URL(deeplink))
fmt.Println()
// 3. Offer to open in browser
if promptOpenBrowser() {
openBrowser(deeplink)
}
}
func runDocsClaudeCode() {
url := getMCPURL()
fmt.Println(styles.Title("Hatchet Docs → Claude Code"))
fmt.Println()
claudeCmd := fmt.Sprintf("claude mcp add --transport http hatchet-docs %s", url)
// Try to run claude mcp add directly
if _, err := exec.LookPath("claude"); err == nil {
fmt.Println(styles.InfoMessage("Found claude CLI. Adding MCP server..."))
fmt.Println()
cmd := exec.Command("claude", "mcp", "add", "--transport", "http", "hatchet-docs", url)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err == nil {
fmt.Println()
fmt.Println(styles.SuccessMessage("Hatchet docs MCP server added to Claude Code"))
return
}
fmt.Printf(" ⚠ Command failed. You can run it manually:\n\n")
} else {
fmt.Println(styles.InfoMessage("Claude CLI not found on PATH. Run this command manually:"))
fmt.Println()
}
fmt.Println(styles.Code.Render(claudeCmd))
fmt.Println()
}
func printAllOptions() {
url := getMCPURL()
fmt.Println(styles.Title("Hatchet Docs for AI Editors"))
fmt.Println()
// MCP Server
fmt.Println(styles.Section("MCP Server"))
fmt.Println(styles.KeyValue("URL", url))
fmt.Println()
// Cursor
fmt.Println(styles.Section("Cursor"))
deeplink := cursorMCPDeeplink(url)
fmt.Println(styles.KeyValue("Deeplink", deeplink))
fmt.Println(styles.KeyValue("Or run", "hatchet docs install cursor"))
fmt.Println()
// Claude Code
fmt.Println(styles.Section("Claude Code"))
fmt.Println(styles.Code.Render(fmt.Sprintf("claude mcp add --transport http hatchet-docs %s", url)))
fmt.Println()
// llms.txt
fmt.Println(styles.Section("LLM-Friendly Docs (llms.txt)"))
fmt.Println(styles.KeyValue("Index", docsBaseURL+"/llms.txt"))
fmt.Println(styles.KeyValue("Full docs", docsBaseURL+"/llms-full.txt"))
fmt.Println()
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func getMCPURL() string {
if mcpURL != "" {
return mcpURL
}
return defaultMCPURL
}
func cursorMCPDeeplink(url string) string {
config := map[string]interface{}{
"command": "npx",
"args": []string{"-y", "mcp-remote", url},
}
configJSON, _ := json.Marshal(config)
encoded := base64.StdEncoding.EncodeToString(configJSON)
return fmt.Sprintf("cursor://anysphere.cursor-deeplink/mcp/install?name=hatchet-docs&config=%s", encoded)
}
func promptOpenBrowser() bool {
var open bool
form := huh.NewForm(
huh.NewGroup(
huh.NewConfirm().
Title("Open in browser?").
Value(&open),
),
).WithTheme(styles.HatchetTheme())
if err := form.Run(); err != nil {
return false
}
return open
}
func openBrowser(url string) {
var cmd *exec.Cmd
switch runtime.GOOS {
case "darwin":
cmd = exec.Command("open", url)
case "windows":
cmd = exec.Command("rundll32", "url.dll,FileProtocolHandler", url)
default:
cmd = exec.Command("xdg-open", url)
}
if err := cmd.Start(); err != nil {
fmt.Printf(" ⚠ Could not open browser: %v\n", err)
fmt.Println(" Copy the link above and paste it in your browser.")
}
}

View File

@@ -860,6 +860,45 @@ func (v *RunDetailsView) fetchWorkflowRun() tea.Cmd {
}
}
// calculateAvailableDAGHeight dynamically calculates how much vertical space is available for the DAG
// by measuring the actual rendered chrome elements
func (v *RunDetailsView) calculateAvailableDAGHeight() int {
if v.Height == 0 {
return 10 // Fallback minimum
}
title := "Run Details"
if v.details != nil {
if v.details.Run.DisplayName != "" {
title = fmt.Sprintf("Run Details: %s", v.details.Run.DisplayName)
} else if len(v.details.Tasks) > 0 && v.details.Tasks[0].WorkflowName != nil && *v.details.Tasks[0].WorkflowName != "" {
title = fmt.Sprintf("Run Details: %s", *v.details.Tasks[0].WorkflowName)
}
}
header := RenderHeader(title, v.Ctx.ProfileName, v.Width)
headerHeight := lipgloss.Height(header) + 2 // +2 for spacing after header
statusSection := v.renderStatusSection()
statusHeight := lipgloss.Height(statusSection) + 2 // +2 for spacing after status
tabs := v.renderTabs()
tabsHeight := lipgloss.Height(tabs) + 2 // +2 for spacing after tabs
footerHeight := 3 // Footer typically has border + padding + content
// Account for DAG border and padding (from renderDAG style)
// Border: 2 lines (top + bottom), Padding(1, 2): 2 lines (top + bottom)
dagBorderPadding := 4
usedHeight := headerHeight + statusHeight + tabsHeight + footerHeight + dagBorderPadding
availableHeight := max(v.Height-usedHeight, 10) // Ensure minimum height of 10
v.debugLogger.Log("DAG height calculation: total=%d, header=%d, status=%d, tabs=%d, footer=%d, border=%d, available=%d",
v.Height, headerHeight, statusHeight, tabsHeight, footerHeight, dagBorderPadding, availableHeight)
return availableHeight
}
// buildDAG builds and renders the DAG visualization
func (v *RunDetailsView) buildDAG() {
if v.details == nil || len(v.details.Shape) == 0 {
@@ -869,8 +908,7 @@ func (v *RunDetailsView) buildDAG() {
return
}
// Reserve space for DAG (height of border box minus padding/chrome)
dagHeight := 10 // Approximate height for DAG area
dagHeight := v.calculateAvailableDAGHeight()
dagWidth := v.Width - 10 // Account for border and padding
v.debugLogger.Log("Building DAG graph: nodes=%d, dagWidth=%d, dagHeight=%d", len(v.details.Shape), dagWidth, dagHeight)
@@ -960,16 +998,10 @@ func (v *RunDetailsView) navigateDAG(direction string) {
switch direction {
case "left":
// Move to previous node in visual order
newIndex = currentIndex - 1
if newIndex < 0 {
newIndex = 0
}
newIndex = max(currentIndex-1, 0)
case "right":
// Move to next node in visual order
newIndex = currentIndex + 1
if newIndex >= len(navigableNodes) {
newIndex = len(navigableNodes) - 1
}
newIndex = min(currentIndex+1, len(navigableNodes)-1)
default:
return
}
@@ -1085,18 +1117,18 @@ func (v *RunDetailsView) exportDAGData() (string, error) {
b.WriteString(separator)
b.WriteString("\n\n")
b.WriteString(fmt.Sprintf("Nodes: %d\n", v.dagGraph.NodeCount()))
b.WriteString(fmt.Sprintf("Edges: %d\n", v.dagGraph.EdgeCount()))
b.WriteString(fmt.Sprintf("Components: %d\n", v.dagGraph.ComponentCount()))
b.WriteString(fmt.Sprintf("Actual Width: %d\n", v.dagGraph.ActualWidth))
b.WriteString(fmt.Sprintf("Actual Height: %d\n", v.dagGraph.ActualHeight))
fmt.Fprintf(&b, "Nodes: %d\n", v.dagGraph.NodeCount())
fmt.Fprintf(&b, "Edges: %d\n", v.dagGraph.EdgeCount())
fmt.Fprintf(&b, "Components: %d\n", v.dagGraph.ComponentCount())
fmt.Fprintf(&b, "Actual Width: %d\n", v.dagGraph.ActualWidth)
fmt.Fprintf(&b, "Actual Height: %d\n", v.dagGraph.ActualHeight)
b.WriteString("\n")
stats := v.dagGraph.GetComponentStats()
b.WriteString(fmt.Sprintf("Total Components: %d\n", stats.TotalComponents))
b.WriteString(fmt.Sprintf("Largest Component: %d nodes\n", stats.LargestComponent))
b.WriteString(fmt.Sprintf("Smallest Component: %d nodes\n", stats.SmallestComponent))
b.WriteString(fmt.Sprintf("Isolated Nodes: %d\n", stats.IsolatedNodes))
fmt.Fprintf(&b, "Total Components: %d\n", stats.TotalComponents)
fmt.Fprintf(&b, "Largest Component: %d nodes\n", stats.LargestComponent)
fmt.Fprintf(&b, "Smallest Component: %d nodes\n", stats.SmallestComponent)
fmt.Fprintf(&b, "Isolated Nodes: %d\n", stats.IsolatedNodes)
b.WriteString("\n")
}

View File

@@ -0,0 +1,7 @@
-- +goose Up
-- Add RUBY to WorkerSDKS enum
ALTER TYPE "WorkerSDKS" ADD VALUE IF NOT EXISTS 'RUBY';
-- +goose Down
-- NOTE: Postgres does not support removing enum values.
-- A full enum recreation would be needed to revert this.

View File

@@ -0,0 +1,22 @@
# Updating Configuration
Modifications to Hatchet's configuration should be reflected in the appropriate [`pkg/config`](../pkg/config) package and wired in [`pkg/config/loader/loader.go`](../../pkg/config/loader/loader.go).
```go
type ServerConfig struct {
RequestTimeout time.Duration `mapstructure:"request_timeout"`
}
```
To ensure configuration is loadable via environment variables, add the corresponding `BindEnv` call in `BindAllEnv()`.
```go
func BindAllEnv(v *viper.Viper) {
v.BindEnv("request_timeout", "HATCHET_REQUEST_TIMEOUT")
}
```
Finally, document the new environment variable in [`frontend/docs/pages/self-hosting/configuration-options.mdx`](frontend/docs/pages/self-hosting/configuration-options.mdx) and any other relevant documentation.
```markdown
| Variable | Description | Default Value |
| ------------------------- | ---------------------------- | ------------- |
| `HATCHET_REQUEST_TIMEOUT` | Duration of request timeouts | `5s` |
```

View File

@@ -75,7 +75,6 @@ func Child(client *hatchet.Client) *hatchet.StandaloneTask {
)
}
func main() {
client, err := hatchet.NewClient()
if err != nil {
@@ -127,7 +126,6 @@ func main() {
return err
}
_ = childResult
n := 5

View File

@@ -45,7 +45,6 @@ func ProcessImageMergent(req MergentRequest) (*MergentResponse, error) {
}, nil
}
// > After (Hatchet)
type ImageProcessInput struct {
ImageURL string `json:"image_url"`

View File

@@ -40,7 +40,6 @@ func Lower(client *hatchet.Client) *hatchet.StandaloneTask {
)
}
// > Accessing the filter payload
func accessFilterPayload(ctx hatchet.Context, input EventInput) (*LowerTaskOutput, error) {
fmt.Println(ctx.FilterPayload())
@@ -49,7 +48,6 @@ func accessFilterPayload(ctx hatchet.Context, input EventInput) (*LowerTaskOutpu
}, nil
}
// > Declare with filter
func LowerWithFilter(client *hatchet.Client) *hatchet.StandaloneTask {
return client.NewStandaloneTask(
@@ -66,7 +64,6 @@ func LowerWithFilter(client *hatchet.Client) *hatchet.StandaloneTask {
)
}
func Upper(client *hatchet.Client) *hatchet.StandaloneTask {
return client.NewStandaloneTask(
"upper", func(ctx hatchet.Context, input EventInput) (*UpperTaskOutput, error) {

View File

@@ -48,7 +48,6 @@ func StickyDag(client *hatchet.Client) *hatchet.Workflow {
return stickyDag
}
type ChildInput struct {
N int `json:"n"`
}
@@ -91,4 +90,3 @@ func Sticky(client *hatchet.Client) *hatchet.StandaloneTask {
return sticky
}

View File

@@ -34,4 +34,3 @@ func main() {
fmt.Println("\nStreaming completed!")
}

View File

@@ -54,4 +54,3 @@ func main() {
log.Println("Failed to start server:", err)
}
}

View File

@@ -46,7 +46,6 @@ func StreamTask(ctx hatchet.Context, input StreamTaskInput) (*StreamTaskOutput,
}, nil
}
func StreamingWorkflow(client *hatchet.Client) *hatchet.StandaloneTask {
return client.NewStandaloneTask("stream-example", StreamTask)
}

View File

@@ -59,8 +59,9 @@ func main() {
// Update the webhook
fmt.Println("\nUpdating webhook...")
eventKeyExpr := "body.type"
updated, err := client.Webhooks().Update(ctx, basicWebhook.Name, features.UpdateWebhookOpts{
EventKeyExpression: "body.type",
EventKeyExpression: &eventKeyExpr,
})
if err != nil {
log.Fatalf("failed to update webhook: %v", err)

View File

@@ -473,14 +473,14 @@ setuptools = "*"
[[package]]
name = "hatchet-sdk"
version = "1.23.2"
version = "1.24.0"
description = "This is the official Python SDK for Hatchet, a distributed, fault-tolerant task queue. The SDK allows you to easily integrate Hatchet's task scheduling and workflow orchestration capabilities into your Python applications."
optional = false
python-versions = "<4.0,>=3.10"
groups = ["main"]
files = [
{file = "hatchet_sdk-1.23.2-py3-none-any.whl", hash = "sha256:95aa0f330527fa0a64adb1d9c758ae2161beb159b0ad54665eb0f1018c2d880f"},
{file = "hatchet_sdk-1.23.2.tar.gz", hash = "sha256:54e9120341ad464c1bb57db76fb29c06ff77df5afd935b22e4bd8c3586ed9f93"},
{file = "hatchet_sdk-1.24.0-py3-none-any.whl", hash = "sha256:6719947bcf3ee954966f5c403f3217b05f3a8829a54eddc3a12c982863d53c4c"},
{file = "hatchet_sdk-1.24.0.tar.gz", hash = "sha256:e39bdb4e7013e98f5354dba046cfe14f9284bf835a2f0ca67613efadcac3e180"},
]
[package.dependencies]
@@ -1125,4 +1125,4 @@ propcache = ">=0.2.0"
[metadata]
lock-version = "2.1"
python-versions = "^3.10"
content-hash = "b1e5494e65f47bb499caeffe3e70a906f1b1da98fd8e15182fe863138eada31f"
content-hash = "665009b2127a5e046ab48cb29fac59dd00bf17ab45f53a8c897bf8bf62d6bc57"

View File

@@ -8,7 +8,7 @@ package-mode = false
[tool.poetry.dependencies]
python = "^3.10"
hatchet-sdk = "1.23.2"
hatchet-sdk = "1.24.0"
[build-system]

View File

@@ -1,5 +1,3 @@
from pydantic import BaseModel
from hatchet_sdk import Context, EmptyModel, Hatchet
hatchet = Hatchet()
@@ -15,3 +13,34 @@ async def return_exceptions_task(input: Input, ctx: Context) -> dict[str, str]:
raise ValueError(f"error in task with index {input.index}")
return {"message": "this is a successful task."}
exception_parsing_workflow = hatchet.workflow(name="ExceptionParsingWorkflow")
@exception_parsing_workflow.task()
async def exception_class_no_name_task(input: EmptyModel, ctx: Context) -> None:
class CustomNoNamedException(Exception): ...
CustomNoNamedException.__name__ = ""
raise CustomNoNamedException
@exception_parsing_workflow.task()
async def exception_class_task(input: EmptyModel, ctx: Context) -> None:
raise ValueError
@exception_parsing_workflow.task()
async def exception_instance_no_args_task(input: EmptyModel, ctx: Context) -> None:
raise ValueError()
@exception_parsing_workflow.task()
async def exception_instance_falsy_arg_task(input: EmptyModel, ctx: Context) -> None:
raise ValueError("")
@exception_parsing_workflow.task()
async def exception_instance_truthy_arg_task(input: EmptyModel, ctx: Context) -> None:
raise ValueError("Oh no!")

View File

@@ -39,7 +39,10 @@ from examples.lifespans.simple import lifespan, lifespan_task
from examples.logger.workflow import logging_workflow
from examples.non_retryable.worker import non_retryable_workflow
from examples.on_failure.worker import on_failure_wf, on_failure_wf_with_details
from examples.return_exceptions.worker import return_exceptions_task
from examples.return_exceptions.worker import (
exception_parsing_workflow,
return_exceptions_task,
)
from examples.run_details.worker import run_detail_test_workflow
from examples.serde.worker import serde_workflow
from examples.simple.worker import simple, simple_durable
@@ -98,6 +101,7 @@ def main() -> None:
webhook_with_scope,
webhook_with_static_payload,
return_exceptions_task,
exception_parsing_workflow,
wait_for_sleep_twice,
async_task_with_dependencies,
sync_task_with_dependencies,

View File

@@ -0,0 +1,48 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > AffinityWorkflow
AFFINITY_WORKER_WORKFLOW = HATCHET.workflow(name: "AffinityWorkflow")
# > AffinityTask
AFFINITY_WORKER_WORKFLOW.task(
:step,
desired_worker_labels: {
"model" => Hatchet::DesiredWorkerLabel.new(value: "fancy-ai-model-v2", weight: 10),
"memory" => Hatchet::DesiredWorkerLabel.new(
value: 256,
required: true,
comparator: :less_than
)
}
) do |input, ctx|
if ctx.worker.labels["model"] != "fancy-ai-model-v2"
ctx.worker.upsert_labels("model" => "unset")
# DO WORK TO EVICT OLD MODEL / LOAD NEW MODEL
ctx.worker.upsert_labels("model" => "fancy-ai-model-v2")
end
{ "worker" => ctx.worker.id }
end
# > AffinityWorker
def main
worker = HATCHET.worker(
"affinity-worker",
slots: 10,
labels: {
"model" => "fancy-ai-model-v2",
"memory" => 512
},
workflows: [AFFINITY_WORKER_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,19 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
def main
workflow_list = HATCHET.workflows.list
rows = workflow_list.rows || []
rows.each do |workflow|
puts workflow.name
puts workflow.metadata.id
puts workflow.metadata.created_at
puts workflow.metadata.updated_at
end
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,49 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > BulkFanoutParent
BULK_PARENT_WF = HATCHET.workflow(name: "BulkFanoutParent")
BULK_CHILD_WF = HATCHET.workflow(name: "BulkFanoutChild")
BULK_PARENT_WF.task(:spawn, execution_timeout: 300) do |input, ctx|
n = input["n"] || 100
# Create each workflow run to spawn
child_workflow_runs = n.times.map do |i|
BULK_CHILD_WF.create_bulk_run_item(
input: { "a" => i.to_s },
key: "child#{i}",
options: Hatchet::TriggerWorkflowOptions.new(
additional_metadata: { "hello" => "earth" }
)
)
end
# Run workflows in bulk to improve performance
spawn_results = BULK_CHILD_WF.run_many(child_workflow_runs)
{ "results" => spawn_results }
end
BULK_CHILD_WF.task(:process) do |input, ctx|
puts "child process #{input['a']}"
{ "status" => "success #{input['a']}" }
end
BULK_CHILD_WF.task(:process2) do |input, ctx|
puts "child process2"
{ "status2" => "success" }
end
def main
worker = HATCHET.worker(
"fanout-worker", slots: 40, workflows: [BULK_PARENT_WF, BULK_CHILD_WF]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,27 @@
# frozen_string_literal: true
require "hatchet-sdk"
# > Setup
hatchet = Hatchet::Client.new
workflows = hatchet.workflows.list
workflow = workflows.rows.first
# > List runs
workflow_runs = hatchet.runs.list(workflow_ids: [workflow.metadata.id])
# > Cancel by run ids
workflow_run_ids = workflow_runs.rows.map { |run| run.metadata.id }
hatchet.runs.bulk_cancel(ids: workflow_run_ids)
# > Cancel by filters
hatchet.runs.bulk_cancel(
since: Time.now - 86_400,
until_time: Time.now,
statuses: ["RUNNING"],
workflow_ids: [workflow.metadata.id],
additional_metadata: { "key" => "value" }
)

View File

@@ -0,0 +1,30 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
BULK_REPLAY_TEST_1 = HATCHET.task(name: "bulk_replay_test_1") do |input, ctx|
puts "retrying bulk replay test task #{ctx.retry_count}"
raise "This is a test error to trigger a retry." if ctx.retry_count == 0
end
BULK_REPLAY_TEST_2 = HATCHET.task(name: "bulk_replay_test_2") do |input, ctx|
puts "retrying bulk replay test task #{ctx.retry_count}"
raise "This is a test error to trigger a retry." if ctx.retry_count == 0
end
BULK_REPLAY_TEST_3 = HATCHET.task(name: "bulk_replay_test_3") do |input, ctx|
puts "retrying bulk replay test task #{ctx.retry_count}"
raise "This is a test error to trigger a retry." if ctx.retry_count == 0
end
def main
worker = HATCHET.worker(
"bulk-replay-test-worker",
workflows: [BULK_REPLAY_TEST_1, BULK_REPLAY_TEST_2, BULK_REPLAY_TEST_3]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,44 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
CANCELLATION_WORKFLOW = HATCHET.workflow(name: "CancelWorkflow")
# > Self-cancelling task
CANCELLATION_WORKFLOW.task(:self_cancel) do |input, ctx|
sleep 2
## Cancel the task
ctx.cancel
sleep 10
{ "error" => "Task should have been cancelled" }
end
# > Checking exit flag
CANCELLATION_WORKFLOW.task(:check_flag) do |input, ctx|
3.times do
sleep 1
# Note: Checking the status of the exit flag is mostly useful for cancelling
# sync tasks without needing to forcibly kill the thread they're running on.
if ctx.cancelled?
puts "Task has been cancelled"
raise "Task has been cancelled"
end
end
{ "error" => "Task should have been cancelled" }
end
def main
worker = HATCHET.worker("cancellation-worker", workflows: [CANCELLATION_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,16 @@
# frozen_string_literal: true
require_relative "worker"
# > Bulk run a task
greetings = ["Hello, World!", "Hello, Moon!", "Hello, Mars!"]
results = CHILD_TASK_WF.run_many(
greetings.map do |greeting|
CHILD_TASK_WF.create_bulk_run_item(
input: { "message" => greeting }
)
end
)
puts results

View File

@@ -0,0 +1,12 @@
# frozen_string_literal: true
require "hatchet-sdk"
require_relative "worker"
hatchet = Hatchet::Client.new
# > Running a task from within a task
SPAWN_TASK = hatchet.task(name: "SpawnTask") do |input, ctx|
result = CHILD_TASK_WF.run({ "message" => "Hello, World!" })
{ "results" => result }
end

View File

@@ -0,0 +1,19 @@
# frozen_string_literal: true
require_relative "worker"
# > Running a task
result = CHILD_TASK_WF.run({ "message" => "Hello, World!" })
# > Running a task aio
# In Ruby, run is synchronous
result = CHILD_TASK_WF.run({ "message" => "Hello, World!" })
# > Running multiple tasks
results = CHILD_TASK_WF.run_many(
[
CHILD_TASK_WF.create_bulk_run_item(input: { "message" => "Hello, World!" }),
CHILD_TASK_WF.create_bulk_run_item(input: { "message" => "Hello, Moon!" })
]
)
puts results

View File

@@ -0,0 +1,22 @@
# frozen_string_literal: true
# > Simple
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
CHILD_TASK_WF = HATCHET.workflow(name: "SimpleWorkflow")
CHILD_TASK_WF.task(:step1) do |input, ctx|
puts "executed step1: #{input['message']}"
{ "transformed_message" => input["message"].upcase }
end
def main
worker = HATCHET.worker("test-worker", slots: 1, workflows: [CHILD_TASK_WF])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,22 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
CONCURRENCY_CANCEL_IN_PROGRESS_WORKFLOW = HATCHET.workflow(
name: "ConcurrencyCancelInProgress",
concurrency: Hatchet::ConcurrencyExpression.new(
expression: "input.group",
max_runs: 1,
limit_strategy: :cancel_in_progress
)
)
STEP1_CIP = CONCURRENCY_CANCEL_IN_PROGRESS_WORKFLOW.task(:step1) do |input, ctx|
50.times { sleep 0.10 }
end
CONCURRENCY_CANCEL_IN_PROGRESS_WORKFLOW.task(:step2, parents: [STEP1_CIP]) do |input, ctx|
50.times { sleep 0.10 }
end

View File

@@ -0,0 +1,22 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
CONCURRENCY_CANCEL_NEWEST_WORKFLOW = HATCHET.workflow(
name: "ConcurrencyCancelNewest",
concurrency: Hatchet::ConcurrencyExpression.new(
expression: "input.group",
max_runs: 1,
limit_strategy: :cancel_newest
)
)
STEP1_CN = CONCURRENCY_CANCEL_NEWEST_WORKFLOW.task(:step1) do |input, ctx|
50.times { sleep 0.10 }
end
CONCURRENCY_CANCEL_NEWEST_WORKFLOW.task(:step2, parents: [STEP1_CN]) do |input, ctx|
50.times { sleep 0.10 }
end

View File

@@ -0,0 +1,31 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Workflow
CONCURRENCY_LIMIT_WORKFLOW = HATCHET.workflow(
name: "ConcurrencyDemoWorkflow",
concurrency: Hatchet::ConcurrencyExpression.new(
expression: "input.group_key",
max_runs: 5,
limit_strategy: :cancel_in_progress
)
)
CONCURRENCY_LIMIT_WORKFLOW.task(:step1) do |input, ctx|
sleep 3
puts "executed step1"
{ "run" => input["run"] }
end
def main
worker = HATCHET.worker(
"concurrency-demo-worker", slots: 10, workflows: [CONCURRENCY_LIMIT_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,33 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Concurrency Strategy With Key
CONCURRENCY_LIMIT_RR_WORKFLOW = HATCHET.workflow(
name: "ConcurrencyDemoWorkflowRR",
concurrency: Hatchet::ConcurrencyExpression.new(
expression: "input.group",
max_runs: 1,
limit_strategy: :group_round_robin
)
)
CONCURRENCY_LIMIT_RR_WORKFLOW.task(:step1) do |input, ctx|
puts "starting step1"
sleep 2
puts "finished step1"
end
def main
worker = HATCHET.worker(
"concurrency-demo-worker-rr",
slots: 10,
workflows: [CONCURRENCY_LIMIT_RR_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,44 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
SLEEP_TIME_MK = 2
DIGIT_MAX_RUNS = 8
NAME_MAX_RUNS = 3
# > Concurrency Strategy With Key
CONCURRENCY_MULTIPLE_KEYS_WORKFLOW = HATCHET.workflow(
name: "ConcurrencyWorkflowManyKeys"
)
CONCURRENCY_MULTIPLE_KEYS_WORKFLOW.task(
:concurrency_task,
concurrency: [
Hatchet::ConcurrencyExpression.new(
expression: "input.digit",
max_runs: DIGIT_MAX_RUNS,
limit_strategy: :group_round_robin
),
Hatchet::ConcurrencyExpression.new(
expression: "input.name",
max_runs: NAME_MAX_RUNS,
limit_strategy: :group_round_robin
)
]
) do |input, ctx|
sleep SLEEP_TIME_MK
end
def main
worker = HATCHET.worker(
"concurrency-worker-multiple-keys",
slots: 10,
workflows: [CONCURRENCY_MULTIPLE_KEYS_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,46 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
SLEEP_TIME_WL = 2
DIGIT_MAX_RUNS_WL = 8
NAME_MAX_RUNS_WL = 3
# > Multiple Concurrency Keys
CONCURRENCY_WORKFLOW_LEVEL_WORKFLOW = HATCHET.workflow(
name: "ConcurrencyWorkflowLevel",
concurrency: [
Hatchet::ConcurrencyExpression.new(
expression: "input.digit",
max_runs: DIGIT_MAX_RUNS_WL,
limit_strategy: :group_round_robin
),
Hatchet::ConcurrencyExpression.new(
expression: "input.name",
max_runs: NAME_MAX_RUNS_WL,
limit_strategy: :group_round_robin
)
]
)
CONCURRENCY_WORKFLOW_LEVEL_WORKFLOW.task(:task_1) do |input, ctx|
sleep SLEEP_TIME_WL
end
CONCURRENCY_WORKFLOW_LEVEL_WORKFLOW.task(:task_2) do |input, ctx|
sleep SLEEP_TIME_WL
end
def main
worker = HATCHET.worker(
"concurrency-worker-workflow-level",
slots: 10,
workflows: [CONCURRENCY_WORKFLOW_LEVEL_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,113 @@
# frozen_string_literal: true
# > Create a workflow
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
TASK_CONDITION_WORKFLOW = HATCHET.workflow(name: "TaskConditionWorkflow")
# > Add base task
COND_START = TASK_CONDITION_WORKFLOW.task(:start) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Add wait for sleep
WAIT_FOR_SLEEP = TASK_CONDITION_WORKFLOW.task(
:wait_for_sleep,
parents: [COND_START],
wait_for: [Hatchet::SleepCondition.new(10)]
) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Add skip condition override
TASK_CONDITION_WORKFLOW.task(
:skip_with_multiple_parents,
parents: [COND_START, WAIT_FOR_SLEEP],
skip_if: [Hatchet::ParentCondition.new(parent: COND_START, expression: "output.random_number > 0")]
) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Add skip on event
SKIP_ON_EVENT = TASK_CONDITION_WORKFLOW.task(
:skip_on_event,
parents: [COND_START],
wait_for: [Hatchet::SleepCondition.new(30)],
skip_if: [Hatchet::UserEventCondition.new(event_key: "skip_on_event:skip")]
) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Add branching
LEFT_BRANCH = TASK_CONDITION_WORKFLOW.task(
:left_branch,
parents: [WAIT_FOR_SLEEP],
skip_if: [
Hatchet::ParentCondition.new(
parent: WAIT_FOR_SLEEP,
expression: "output.random_number > 50"
)
]
) do |input, ctx|
{ "random_number" => rand(1..100) }
end
RIGHT_BRANCH = TASK_CONDITION_WORKFLOW.task(
:right_branch,
parents: [WAIT_FOR_SLEEP],
skip_if: [
Hatchet::ParentCondition.new(
parent: WAIT_FOR_SLEEP,
expression: "output.random_number <= 50"
)
]
) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Add wait for event
WAIT_FOR_EVENT = TASK_CONDITION_WORKFLOW.task(
:wait_for_event,
parents: [COND_START],
wait_for: [
Hatchet.or_(
Hatchet::SleepCondition.new(60),
Hatchet::UserEventCondition.new(event_key: "wait_for_event:start")
)
]
) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Add sum
TASK_CONDITION_WORKFLOW.task(
:sum,
parents: [COND_START, WAIT_FOR_SLEEP, WAIT_FOR_EVENT, SKIP_ON_EVENT, LEFT_BRANCH, RIGHT_BRANCH]
) do |input, ctx|
one = ctx.task_output(COND_START)["random_number"]
two = ctx.task_output(WAIT_FOR_EVENT)["random_number"]
three = ctx.task_output(WAIT_FOR_SLEEP)["random_number"]
four = ctx.was_skipped?(SKIP_ON_EVENT) ? 0 : ctx.task_output(SKIP_ON_EVENT)["random_number"]
five = ctx.was_skipped?(LEFT_BRANCH) ? 0 : ctx.task_output(LEFT_BRANCH)["random_number"]
six = ctx.was_skipped?(RIGHT_BRANCH) ? 0 : ctx.task_output(RIGHT_BRANCH)["random_number"]
{ "sum" => one + two + three + four + five + six }
end
def main
worker = HATCHET.worker("dag-worker", workflows: [TASK_CONDITION_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,22 @@
# frozen_string_literal: true
require "hatchet-sdk"
hatchet = Hatchet::Client.new
dynamic_cron_workflow = hatchet.workflow(name: "DynamicCronWorkflow")
# > Create
cron_trigger = dynamic_cron_workflow.create_cron(
"customer-a-daily-report",
"0 12 * * *",
input: { "name" => "John Doe" }
)
id = cron_trigger.metadata.id
# > List
cron_triggers = hatchet.cron.list
# > Delete
hatchet.cron.delete(cron_trigger.metadata.id)

View File

@@ -0,0 +1,35 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Cron Workflow Definition
CRON_WORKFLOW = HATCHET.workflow(
name: "CronWorkflow",
on_crons: ["*/5 * * * *"]
)
CRON_WORKFLOW.task(:cron_task) do |input, ctx|
puts "Cron task executed at #{Time.now}"
{ "status" => "success" }
end
# > Programmatic Cron Creation
def create_cron
HATCHET.cron.create(
workflow_name: "CronWorkflow",
cron_name: "my-programmatic-cron",
expression: "*/10 * * * *",
input: { "message" => "hello from cron" }
)
end
def main
worker = HATCHET.worker("cron-worker", workflows: [CRON_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,7 @@
# frozen_string_literal: true
require_relative "worker"
# > Trigger the DAG
result = DAG_WORKFLOW.run
puts result

View File

@@ -0,0 +1,49 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Define a DAG
DAG_WORKFLOW = HATCHET.workflow(name: "DAGWorkflow")
# > First task
STEP1 = DAG_WORKFLOW.task(:step1, execution_timeout: 5) do |input, ctx|
{ "random_number" => rand(1..100) }
end
STEP2 = DAG_WORKFLOW.task(:step2, execution_timeout: 5) do |input, ctx|
{ "random_number" => rand(1..100) }
end
# > Task with parents
DAG_WORKFLOW.task(:step3, parents: [STEP1, STEP2]) do |input, ctx|
one = ctx.task_output(STEP1)["random_number"]
two = ctx.task_output(STEP2)["random_number"]
{ "sum" => one + two }
end
DAG_WORKFLOW.task(:step4, parents: [STEP1, :step3]) do |input, ctx|
puts(
"executed step4",
Time.now.strftime("%H:%M:%S"),
input.inspect,
ctx.task_output(STEP1).inspect,
ctx.task_output(:step3).inspect
)
{ "step4" => "step4" }
end
# > Declare a worker
def main
worker = HATCHET.worker("dag-worker", workflows: [DAG_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,19 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Task using Struct-based input
# Ruby equivalent of Python dataclass -- use plain hashes
SAY_HELLO = HATCHET.task(name: "say_hello") do |input, ctx|
{ "message" => "Hello, #{input['name']}!" }
end
def main
worker = HATCHET.worker("test-worker", workflows: [SAY_HELLO])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,51 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
DEDUPE_PARENT_WF = HATCHET.workflow(name: "DedupeParent")
DEDUPE_CHILD_WF = HATCHET.workflow(name: "DedupeChild")
DEDUPE_PARENT_WF.task(:spawn, execution_timeout: 60) do |input, ctx|
puts "spawning child"
results = []
2.times do |i|
begin
results << DEDUPE_CHILD_WF.run(
options: Hatchet::TriggerWorkflowOptions.new(
additional_metadata: { "dedupe" => "test" },
key: "child#{i}"
)
)
rescue Hatchet::DedupeViolationError => e
puts "dedupe violation #{e}"
next
end
end
puts "results #{results}"
{ "results" => results }
end
DEDUPE_CHILD_WF.task(:process) do |input, ctx|
sleep 3
puts "child process"
{ "status" => "success" }
end
DEDUPE_CHILD_WF.task(:process2) do |input, ctx|
puts "child process2"
{ "status2" => "success" }
end
def main
worker = HATCHET.worker(
"fanout-worker", slots: 100, workflows: [DEDUPE_PARENT_WF, DEDUPE_CHILD_WF]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,32 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
PRINT_SCHEDULE_WF = HATCHET.workflow(name: "PrintScheduleWorkflow")
PRINT_PRINTER_WF = HATCHET.workflow(name: "PrintPrinterWorkflow")
PRINT_SCHEDULE_WF.task(:schedule) do |input, ctx|
now = Time.now.utc
puts "the time is \t #{now.strftime('%H:%M:%S')}"
future_time = now + 15
puts "scheduling for \t #{future_time.strftime('%H:%M:%S')}"
PRINT_PRINTER_WF.schedule(future_time, input: input)
end
PRINT_PRINTER_WF.task(:step1) do |input, ctx|
now = Time.now.utc
puts "printed at \t #{now.strftime('%H:%M:%S')}"
puts "message \t #{input['message']}"
end
def main
worker = HATCHET.worker(
"delayed-worker", slots: 4, workflows: [PRINT_SCHEDULE_WF, PRINT_PRINTER_WF]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,140 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: false) unless defined?(HATCHET)
SYNC_DEPENDENCY_VALUE = "sync_dependency_value"
ASYNC_DEPENDENCY_VALUE = "async_dependency_value"
SYNC_CM_DEPENDENCY_VALUE = "sync_cm_dependency_value"
ASYNC_CM_DEPENDENCY_VALUE = "async_cm_dependency_value"
CHAINED_CM_VALUE = "chained_cm_value"
CHAINED_ASYNC_CM_VALUE = "chained_async_cm_value"
# > Declare dependencies (Ruby uses callable objects instead of Python's Depends)
sync_dep = ->(_input, _ctx) { SYNC_DEPENDENCY_VALUE }
async_dep = ->(_input, _ctx) { ASYNC_DEPENDENCY_VALUE }
sync_cm_dep = lambda { |_input, _ctx, deps|
"#{SYNC_CM_DEPENDENCY_VALUE}_#{deps[:sync_dep]}"
}
async_cm_dep = lambda { |_input, _ctx, deps|
"#{ASYNC_CM_DEPENDENCY_VALUE}_#{deps[:async_dep]}"
}
chained_dep = ->(_input, _ctx, deps) { "chained_#{CHAINED_CM_VALUE}" }
chained_async_dep = ->(_input, _ctx, deps) { "chained_#{CHAINED_ASYNC_CM_VALUE}" }
# > Inject dependencies
ASYNC_TASK_WITH_DEPS = HATCHET.task(
name: "async_task_with_dependencies",
deps: {
sync_dep: sync_dep,
async_dep: async_dep,
sync_cm_dep: sync_cm_dep,
async_cm_dep: async_cm_dep,
chained_dep: chained_dep,
chained_async_dep: chained_async_dep
}
) do |input, ctx|
{
"sync_dep" => ctx.deps[:sync_dep],
"async_dep" => ctx.deps[:async_dep],
"async_cm_dep" => ctx.deps[:async_cm_dep],
"sync_cm_dep" => ctx.deps[:sync_cm_dep],
"chained_dep" => ctx.deps[:chained_dep],
"chained_async_dep" => ctx.deps[:chained_async_dep]
}
end
SYNC_TASK_WITH_DEPS = HATCHET.task(
name: "sync_task_with_dependencies",
deps: {
sync_dep: sync_dep,
async_dep: async_dep,
sync_cm_dep: sync_cm_dep,
async_cm_dep: async_cm_dep,
chained_dep: chained_dep,
chained_async_dep: chained_async_dep
}
) do |input, ctx|
{
"sync_dep" => ctx.deps[:sync_dep],
"async_dep" => ctx.deps[:async_dep],
"async_cm_dep" => ctx.deps[:async_cm_dep],
"sync_cm_dep" => ctx.deps[:sync_cm_dep],
"chained_dep" => ctx.deps[:chained_dep],
"chained_async_dep" => ctx.deps[:chained_async_dep]
}
end
DURABLE_ASYNC_TASK_WITH_DEPS = HATCHET.durable_task(
name: "durable_async_task_with_dependencies",
deps: {
sync_dep: sync_dep,
async_dep: async_dep,
sync_cm_dep: sync_cm_dep,
async_cm_dep: async_cm_dep,
chained_dep: chained_dep,
chained_async_dep: chained_async_dep
}
) do |input, ctx|
{
"sync_dep" => ctx.deps[:sync_dep],
"async_dep" => ctx.deps[:async_dep],
"async_cm_dep" => ctx.deps[:async_cm_dep],
"sync_cm_dep" => ctx.deps[:sync_cm_dep],
"chained_dep" => ctx.deps[:chained_dep],
"chained_async_dep" => ctx.deps[:chained_async_dep]
}
end
DURABLE_SYNC_TASK_WITH_DEPS = HATCHET.durable_task(
name: "durable_sync_task_with_dependencies",
deps: {
sync_dep: sync_dep,
async_dep: async_dep,
sync_cm_dep: sync_cm_dep,
async_cm_dep: async_cm_dep,
chained_dep: chained_dep,
chained_async_dep: chained_async_dep
}
) do |input, ctx|
{
"sync_dep" => ctx.deps[:sync_dep],
"async_dep" => ctx.deps[:async_dep],
"async_cm_dep" => ctx.deps[:async_cm_dep],
"sync_cm_dep" => ctx.deps[:sync_cm_dep],
"chained_dep" => ctx.deps[:chained_dep],
"chained_async_dep" => ctx.deps[:chained_async_dep]
}
end
DI_WORKFLOW = HATCHET.workflow(name: "dependency-injection-workflow")
# Workflow tasks with dependencies follow the same pattern
DI_WORKFLOW.task(:wf_task_with_dependencies) do |input, ctx|
{
"sync_dep" => SYNC_DEPENDENCY_VALUE,
"async_dep" => ASYNC_DEPENDENCY_VALUE
}
end
def main
worker = HATCHET.worker(
"dependency-injection-worker",
workflows: [
ASYNC_TASK_WITH_DEPS,
SYNC_TASK_WITH_DEPS,
DURABLE_ASYNC_TASK_WITH_DEPS,
DURABLE_SYNC_TASK_WITH_DEPS,
DI_WORKFLOW
]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,108 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Create a durable workflow
DURABLE_WORKFLOW = HATCHET.workflow(name: "DurableWorkflow")
EPHEMERAL_WORKFLOW = HATCHET.workflow(name: "EphemeralWorkflow")
# > Add durable task
DURABLE_EVENT_KEY = "durable-example:event"
DURABLE_SLEEP_TIME = 5
DURABLE_WORKFLOW.task(:ephemeral_task) do |input, ctx|
puts "Running non-durable task"
end
DURABLE_WORKFLOW.durable_task(:durable_task, execution_timeout: 60) do |input, ctx|
puts "Waiting for sleep"
ctx.sleep_for(duration: DURABLE_SLEEP_TIME)
puts "Sleep finished"
puts "Waiting for event"
ctx.wait_for(
"event",
Hatchet::UserEventCondition.new(event_key: DURABLE_EVENT_KEY, expression: "true")
)
puts "Event received"
{ "status" => "success" }
end
# > Add durable tasks that wait for or groups
DURABLE_WORKFLOW.durable_task(:wait_for_or_group_1, execution_timeout: 60) do |input, ctx|
start = Time.now
wait_result = ctx.wait_for(
SecureRandom.hex(16),
Hatchet.or_(
Hatchet::SleepCondition.new(DURABLE_SLEEP_TIME),
Hatchet::UserEventCondition.new(event_key: DURABLE_EVENT_KEY)
)
)
key = wait_result.keys.first
event_id = wait_result[key].keys.first
{
"runtime" => (Time.now - start).to_i,
"key" => key,
"event_id" => event_id
}
end
DURABLE_WORKFLOW.durable_task(:wait_for_or_group_2, execution_timeout: 120) do |input, ctx|
start = Time.now
wait_result = ctx.wait_for(
SecureRandom.hex(16),
Hatchet.or_(
Hatchet::SleepCondition.new(6 * DURABLE_SLEEP_TIME),
Hatchet::UserEventCondition.new(event_key: DURABLE_EVENT_KEY)
)
)
key = wait_result.keys.first
event_id = wait_result[key].keys.first
{
"runtime" => (Time.now - start).to_i,
"key" => key,
"event_id" => event_id
}
end
DURABLE_WORKFLOW.durable_task(:wait_for_multi_sleep, execution_timeout: 120) do |input, ctx|
start = Time.now
3.times do
ctx.sleep_for(duration: DURABLE_SLEEP_TIME)
end
{ "runtime" => (Time.now - start).to_i }
end
EPHEMERAL_WORKFLOW.task(:ephemeral_task_2) do |input, ctx|
puts "Running non-durable task"
end
WAIT_FOR_SLEEP_TWICE = HATCHET.durable_task(name: "wait_for_sleep_twice", execution_timeout: 60) do |input, ctx|
start = Time.now
ctx.sleep_for(duration: DURABLE_SLEEP_TIME)
{ "runtime" => (Time.now - start).to_i }
end
def main
worker = HATCHET.worker(
"durable-worker",
workflows: [DURABLE_WORKFLOW, EPHEMERAL_WORKFLOW, WAIT_FOR_SLEEP_TWICE]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,42 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
DURABLE_EVENT_TASK_KEY = "user:update"
# > Durable Event
DURABLE_EVENT_TASK = HATCHET.durable_task(name: "DurableEventTask") do |input, ctx|
res = ctx.wait_for(
"event",
Hatchet::UserEventCondition.new(event_key: "user:update")
)
puts "got event #{res}"
end
DURABLE_EVENT_TASK_WITH_FILTER = HATCHET.durable_task(name: "DurableEventWithFilterTask") do |input, ctx|
# > Durable Event With Filter
res = ctx.wait_for(
"event",
Hatchet::UserEventCondition.new(
event_key: "user:update",
expression: "input.user_id == '1234'"
)
)
puts "got event #{res}"
end
def main
worker = HATCHET.worker(
"durable-event-worker",
workflows: [DURABLE_EVENT_TASK, DURABLE_EVENT_TASK_WITH_FILTER]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,20 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Durable Sleep
DURABLE_SLEEP_TASK = HATCHET.durable_task(name: "DurableSleepTask") do |input, ctx|
res = ctx.sleep_for(duration: 5)
puts "got result #{res}"
end
def main
worker = HATCHET.worker("durable-sleep-worker", workflows: [DURABLE_SLEEP_TASK])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,15 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
# > Event trigger
HATCHET.event.push("user:create", { "should_skip" => false })
# > Event trigger with metadata
HATCHET.event.push(
"user:create",
{ "userId" => "1234", "should_skip" => false },
additional_metadata: { "source" => "api" }
)

View File

@@ -0,0 +1,32 @@
# frozen_string_literal: true
require "hatchet-sdk"
require_relative "worker"
HATCHET_CLIENT = Hatchet::Client.new
# > Create a filter
HATCHET_CLIENT.filters.create(
workflow_id: EVENT_WORKFLOW.id,
expression: "input.should_skip == false",
scope: "foobarbaz",
payload: {
"main_character" => "Anna",
"supporting_character" => "Stiva",
"location" => "Moscow"
}
)
# > Skip a run
HATCHET_CLIENT.event.push(
EVENT_KEY,
{ "should_skip" => true },
scope: "foobarbaz"
)
# > Trigger a run
HATCHET_CLIENT.event.push(
EVENT_KEY,
{ "should_skip" => false },
scope: "foobarbaz"
)

View File

@@ -0,0 +1,52 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
# > Event trigger
EVENT_KEY = "user:create"
SECONDARY_KEY = "foobarbaz"
WILDCARD_KEY = "subscription:*"
EVENT_WORKFLOW = HATCHET.workflow(
name: "EventWorkflow",
on_events: [EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY]
)
# > Event trigger with filter
EVENT_WORKFLOW_WITH_FILTER = HATCHET.workflow(
name: "EventWorkflow",
on_events: [EVENT_KEY, SECONDARY_KEY, WILDCARD_KEY],
default_filters: [
Hatchet::DefaultFilter.new(
expression: "true",
scope: "example-scope",
payload: {
"main_character" => "Anna",
"supporting_character" => "Stiva",
"location" => "Moscow"
}
)
]
)
EVENT_WORKFLOW.task(:task) do |input, ctx|
puts "event received"
ctx.filter_payload
end
# > Accessing the filter payload
EVENT_WORKFLOW_WITH_FILTER.task(:filtered_task) do |input, ctx|
puts ctx.filter_payload.inspect
end
def main
worker = HATCHET.worker(name: "EventWorker", workflows: [EVENT_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,13 @@
# frozen_string_literal: true
require_relative "worker"
# > Child spawn
FANOUT_CHILD_WF.run({ "a" => "b" })
# > Error handling
begin
FANOUT_CHILD_WF.run({ "a" => "b" })
rescue StandardError => e
puts "Child workflow failed: #{e.message}"
end

View File

@@ -0,0 +1,14 @@
# frozen_string_literal: true
require_relative "worker"
# > Bulk run children
def run_child_workflows(n)
FANOUT_CHILD_WF.run_many(
n.times.map do |i|
FANOUT_CHILD_WF.create_bulk_run_item(
input: { "a" => i.to_s }
)
end
)
end

View File

@@ -0,0 +1,50 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > FanoutParent
FANOUT_PARENT_WF = HATCHET.workflow(name: "FanoutParent")
FANOUT_CHILD_WF = HATCHET.workflow(name: "FanoutChild")
FANOUT_PARENT_WF.task(:spawn, execution_timeout: 300) do |input, ctx|
puts "spawning child"
n = input["n"] || 100
result = FANOUT_CHILD_WF.run_many(
n.times.map do |i|
FANOUT_CHILD_WF.create_bulk_run_item(
input: { "a" => i.to_s },
options: Hatchet::TriggerWorkflowOptions.new(
additional_metadata: { "hello" => "earth" },
key: "child#{i}"
)
)
end
)
puts "results #{result}"
{ "results" => result }
end
# > FanoutChild
FANOUT_CHILD_PROCESS = FANOUT_CHILD_WF.task(:process) do |input, ctx|
puts "child process #{input['a']}"
{ "status" => input["a"] }
end
FANOUT_CHILD_WF.task(:process2, parents: [FANOUT_CHILD_PROCESS]) do |input, ctx|
process_output = ctx.task_output(FANOUT_CHILD_PROCESS)
a = process_output["status"]
{ "status2" => "#{a}2" }
end
def main
worker = HATCHET.worker("fanout-worker", slots: 40, workflows: [FANOUT_PARENT_WF, FANOUT_CHILD_WF])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,49 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true)
SYNC_FANOUT_PARENT = HATCHET.workflow(name: "SyncFanoutParent")
SYNC_FANOUT_CHILD = HATCHET.workflow(name: "SyncFanoutChild")
SYNC_FANOUT_PARENT.task(:spawn, execution_timeout: 300) do |input, ctx|
puts "spawning child"
n = input["n"] || 5
results = SYNC_FANOUT_CHILD.run_many(
n.times.map do |i|
SYNC_FANOUT_CHILD.create_bulk_run_item(
input: { "a" => i.to_s },
key: "child#{i}",
options: Hatchet::TriggerWorkflowOptions.new(
additional_metadata: { "hello" => "earth" }
)
)
end
)
puts "results #{results}"
{ "results" => results }
end
SYNC_PROCESS = SYNC_FANOUT_CHILD.task(:process) do |input, ctx|
{ "status" => "success #{input['a']}" }
end
SYNC_FANOUT_CHILD.task(:process2, parents: [SYNC_PROCESS]) do |input, ctx|
process_output = ctx.task_output(SYNC_PROCESS)
a = process_output["status"]
{ "status2" => "#{a}2" }
end
def main
worker = HATCHET.worker(
"sync-fanout-worker",
slots: 40,
workflows: [SYNC_FANOUT_PARENT, SYNC_FANOUT_CHILD]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env ruby
require 'hatchet-sdk'
# require_relative '../src/lib/hatchet-sdk'
# Initialize the Hatchet client
HATCHET = Hatchet::Client.new() unless defined?(HATCHET)
result = HATCHET.events.create(
key: "test-event",
data: {
message: "test"
}
)
puts "Event created: #{result.inspect}"
run = HATCHET.runs.create(
name: "simple",
input: {
Message: "test workflow run"
},
)
puts "TriggeredRun ID: #{run.metadata.id}"
result = HATCHET.runs.poll(run.metadata.id)
puts "Runs client initialized: #{result.inspect}"
puts "Run status: #{result.status}"

View File

@@ -0,0 +1,26 @@
# frozen_string_literal: true
# > Lifespan
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true)
# Ruby lifespan uses a block with yield for setup/teardown
LIFESPAN_PROC = proc do
{ foo: "bar", pi: 3.14 }
end
LIFESPAN_TASK = HATCHET.task(name: "LifespanWorkflow") do |input, ctx|
ctx.lifespan
end
def main
worker = HATCHET.worker(
"test-worker", slots: 1, workflows: [LIFESPAN_TASK], lifespan: LIFESPAN_PROC
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,45 @@
# frozen_string_literal: true
# > LoggingWorkflow
require "hatchet-sdk"
require "logger"
logger = Logger.new($stdout)
logger.level = Logger::INFO
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
LOGGING_WORKFLOW = HATCHET.workflow(name: "LoggingWorkflow")
LOGGING_WORKFLOW.task(:root_logger) do |input, ctx|
12.times do |i|
logger.info("executed step1 - #{i}")
logger.info({ "step1" => "step1" }.inspect)
sleep 0.1
end
{ "status" => "success" }
end
# > ContextLogger
LOGGING_WORKFLOW.task(:context_logger) do |input, ctx|
12.times do |i|
ctx.log("executed step1 - #{i}")
ctx.log({ "step1" => "step1" }.inspect)
sleep 0.1
end
{ "status" => "success" }
end
def main
worker = HATCHET.worker("logger-worker", slots: 5, workflows: [LOGGING_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,20 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
# > SlotRelease
SLOT_RELEASE_WORKFLOW = HATCHET.workflow(name: "SlotReleaseWorkflow")
SLOT_RELEASE_WORKFLOW.task(:step1) do |input, ctx|
puts "RESOURCE INTENSIVE PROCESS"
sleep 10
# Release the slot after the resource-intensive process, so that other steps can run
ctx.release_slot
puts "NON RESOURCE INTENSIVE PROCESS"
{ "status" => "success" }
end

View File

@@ -0,0 +1,28 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
NON_RETRYABLE_WORKFLOW = HATCHET.workflow(name: "NonRetryableWorkflow")
# > Non-retryable task
NON_RETRYABLE_WORKFLOW.task(:should_not_retry, retries: 1) do |input, ctx|
raise Hatchet::NonRetryableError, "This task should not retry"
end
NON_RETRYABLE_WORKFLOW.task(:should_retry_wrong_exception_type, retries: 1) do |input, ctx|
raise TypeError, "This task should retry because it's not a NonRetryableError"
end
NON_RETRYABLE_WORKFLOW.task(:should_not_retry_successful_task, retries: 1) do |input, ctx|
# no-op
end
def main
worker = HATCHET.worker("non-retry-worker", workflows: [NON_RETRYABLE_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,73 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: false) unless defined?(HATCHET)
ERROR_TEXT = "step1 failed"
# > OnFailure Step
# This workflow will fail because the step will throw an error
# we define an onFailure step to handle this case
ON_FAILURE_WF = HATCHET.workflow(name: "OnFailureWorkflow")
ON_FAILURE_WF.task(:step1, execution_timeout: 1) do |input, ctx|
# This step will always raise an exception
raise ERROR_TEXT
end
# After the workflow fails, this special step will run
ON_FAILURE_WF.on_failure_task do |input, ctx|
# We can do things like perform cleanup logic
# or notify a user here
# Fetch the errors from upstream step runs from the context
puts ctx.task_run_errors.inspect
{ "status" => "success" }
end
# > OnFailure With Details
# We can access the failure details in the onFailure step
# via the context method
ON_FAILURE_WF_WITH_DETAILS = HATCHET.workflow(name: "OnFailureWorkflowWithDetails")
DETAILS_STEP1 = ON_FAILURE_WF_WITH_DETAILS.task(:details_step1, execution_timeout: 1) do |input, ctx|
raise ERROR_TEXT
end
# After the workflow fails, this special step will run
ON_FAILURE_WF_WITH_DETAILS.on_failure_task do |input, ctx|
error = ctx.get_task_run_error(DETAILS_STEP1)
unless error
next { "status" => "unexpected success" }
end
# We can access the failure details here
raise "Expected Hatchet::TaskRunError" unless error.is_a?(Hatchet::TaskRunError)
if error.message.include?("step1 failed")
next {
"status" => "success",
"failed_run_external_id" => error.task_run_external_id
}
end
raise "unexpected failure"
end
def main
worker = HATCHET.worker(
"on-failure-worker",
slots: 4,
workflows: [ON_FAILURE_WF, ON_FAILURE_WF_WITH_DETAILS]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,34 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
ON_SUCCESS_WORKFLOW = HATCHET.workflow(name: "OnSuccessWorkflow")
FIRST_TASK = ON_SUCCESS_WORKFLOW.task(:first_task) do |input, ctx|
puts "First task completed successfully"
end
SECOND_TASK = ON_SUCCESS_WORKFLOW.task(:second_task, parents: [FIRST_TASK]) do |input, ctx|
puts "Second task completed successfully"
end
ON_SUCCESS_WORKFLOW.task(:third_task, parents: [FIRST_TASK, SECOND_TASK]) do |input, ctx|
puts "Third task completed successfully"
end
ON_SUCCESS_WORKFLOW.task(:fourth_task) do |input, ctx|
puts "Fourth task completed successfully"
end
ON_SUCCESS_WORKFLOW.on_success_task do |input, ctx|
puts "On success task completed successfully"
end
def main
worker = HATCHET.worker("on-success-worker", workflows: [ON_SUCCESS_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,32 @@
# frozen_string_literal: true
require_relative "worker"
# > Runtime priority
low_prio = PRIORITY_WORKFLOW.run_no_wait(
{},
options: Hatchet::TriggerWorkflowOptions.new(
priority: 1,
additional_metadata: { "priority" => "low", "key" => 1 }
)
)
high_prio = PRIORITY_WORKFLOW.run_no_wait(
{},
options: Hatchet::TriggerWorkflowOptions.new(
priority: 3,
additional_metadata: { "priority" => "high", "key" => 1 }
)
)
# > Scheduled priority
schedule = PRIORITY_WORKFLOW.schedule(
Time.now + 60,
options: Hatchet::TriggerWorkflowOptions.new(priority: 3)
)
cron = PRIORITY_WORKFLOW.create_cron(
"my-scheduled-cron",
"0 * * * *",
input: {},
)

View File

@@ -0,0 +1,31 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Default priority
DEFAULT_PRIORITY = 1
SLEEP_TIME = 0.25
PRIORITY_WORKFLOW = HATCHET.workflow(
name: "PriorityWorkflow",
default_priority: DEFAULT_PRIORITY
)
PRIORITY_WORKFLOW.task(:priority_task) do |input, ctx|
puts "Priority: #{ctx.priority}"
sleep SLEEP_TIME
end
def main
worker = HATCHET.worker(
"priority-worker",
slots: 1,
workflows: [PRIORITY_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,7 @@
# frozen_string_literal: true
require_relative "workflows/first_task"
# > Run a task
result = FIRST_TASK.run({ "message" => "Hello World!" })
puts "Finished running task: #{result['transformed_message']}"

View File

@@ -0,0 +1,11 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
# > Simple task
FIRST_TASK = HATCHET.task(name: "first-task") do |input, ctx|
puts "first-task called"
{ "transformed_message" => input["message"].downcase }
end

View File

@@ -0,0 +1,49 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > Workflow
RATE_LIMIT_WORKFLOW = HATCHET.workflow(name: "RateLimitWorkflow")
# > Static
RATE_LIMIT_KEY = "test-limit"
RATE_LIMIT_WORKFLOW.task(
:step_1,
rate_limits: [Hatchet::RateLimit.new(static_key: RATE_LIMIT_KEY, units: 1)]
) do |input, ctx|
puts "executed step_1"
end
# > Dynamic
RATE_LIMIT_WORKFLOW.task(
:step_2,
rate_limits: [
Hatchet::RateLimit.new(
dynamic_key: "input.user_id",
units: 1,
limit: 10,
duration: :minute
)
]
) do |input, ctx|
puts "executed step_2"
end
# > Create a rate limit
def main
HATCHET.rate_limits.put(RATE_LIMIT_KEY, 2, :second)
worker = HATCHET.worker(
"rate-limit-worker", slots: 10, workflows: [RATE_LIMIT_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,45 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
SIMPLE_RETRY_WORKFLOW = HATCHET.workflow(name: "SimpleRetryWorkflow")
BACKOFF_WORKFLOW = HATCHET.workflow(name: "BackoffWorkflow")
# > Simple Step Retries
SIMPLE_RETRY_WORKFLOW.task(:always_fail, retries: 3) do |input, ctx|
raise "simple task failed"
end
# > Retries with Count
SIMPLE_RETRY_WORKFLOW.task(:fail_twice, retries: 3) do |input, ctx|
raise "simple task failed" if ctx.retry_count < 2
{ "status" => "success" }
end
# > Retries with Backoff
BACKOFF_WORKFLOW.task(
:backoff_task,
retries: 10,
# Maximum number of seconds to wait between retries
backoff_max_seconds: 10,
# Factor to increase the wait time between retries.
# This sequence will be 2s, 4s, 8s, 10s, 10s, 10s... due to the maxSeconds limit
backoff_factor: 2.0
) do |input, ctx|
raise "backoff task failed" if ctx.retry_count < 3
{ "status" => "success" }
end
def main
worker = HATCHET.worker("backoff-worker", slots: 4, workflows: [BACKOFF_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,13 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
RETURN_EXCEPTIONS_TASK = HATCHET.task(name: "return_exceptions_task") do |input, ctx|
if input["index"].to_i.even?
raise "error in task with index #{input['index']}"
end
{ "message" => "this is a successful task." }
end

View File

@@ -0,0 +1,51 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
RUN_DETAIL_TEST_WORKFLOW = HATCHET.workflow(name: "RunDetailTest")
DETAIL_STEP1 = RUN_DETAIL_TEST_WORKFLOW.task(:step1) do |input, ctx|
{ "random_number" => rand(1..100) }
end
RUN_DETAIL_TEST_WORKFLOW.task(:cancel_step) do |input, ctx|
ctx.cancel
10.times { sleep 1 }
end
RUN_DETAIL_TEST_WORKFLOW.task(:fail_step) do |input, ctx|
raise "Intentional Failure"
end
DETAIL_STEP2 = RUN_DETAIL_TEST_WORKFLOW.task(:step2) do |input, ctx|
sleep 5
{ "random_number" => rand(1..100) }
end
RUN_DETAIL_TEST_WORKFLOW.task(:step3, parents: [DETAIL_STEP1, DETAIL_STEP2]) do |input, ctx|
one = ctx.task_output(DETAIL_STEP1)["random_number"]
two = ctx.task_output(DETAIL_STEP2)["random_number"]
{ "sum" => one + two }
end
RUN_DETAIL_TEST_WORKFLOW.task(:step4, parents: [DETAIL_STEP1, :step3]) do |input, ctx|
puts(
"executed step4",
Time.now.strftime("%H:%M:%S"),
input.inspect,
ctx.task_output(DETAIL_STEP1).inspect,
ctx.task_output(:step3).inspect
)
{ "step4" => "step4" }
end
def main
worker = HATCHET.worker("run-detail-worker", workflows: [RUN_DETAIL_TEST_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,35 @@
# frozen_string_literal: true
require "hatchet-sdk"
hatchet = Hatchet::Client.new
# > Create
scheduled_run = hatchet.scheduled.create(
workflow_name: "simple-workflow",
trigger_at: Time.now + 10,
input: { "data" => "simple-workflow-data" },
additional_metadata: { "customer_id" => "customer-a" }
)
id = scheduled_run.metadata.id
# > Reschedule
hatchet.scheduled.update(
scheduled_run.metadata.id,
trigger_at: Time.now + 3600
)
# > Delete
hatchet.scheduled.delete(scheduled_run.metadata.id)
# > List
scheduled_runs = hatchet.scheduled.list
# > Bulk delete
hatchet.scheduled.bulk_delete(scheduled_ids: [id])
# > Bulk reschedule
hatchet.scheduled.bulk_update(
[[id, Time.now + 7200]]
)

View File

@@ -0,0 +1,26 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
SCHEDULED_WORKFLOW = HATCHET.workflow(name: "ScheduledWorkflow")
SCHEDULED_WORKFLOW.task(:scheduled_task) do |input, ctx|
puts "Scheduled task executed at #{Time.now}"
{ "status" => "success" }
end
# > Programmatic Schedule
def schedule_workflow
future_time = Time.now + 60 # 1 minute from now
SCHEDULED_WORKFLOW.schedule(future_time, input: { "message" => "scheduled run" })
end
def main
worker = HATCHET.worker("scheduled-worker", workflows: [SCHEDULED_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,30 @@
# frozen_string_literal: true
# > Custom Serialization/Deserialization
require "hatchet-sdk"
require "base64"
require "zlib"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
SERDE_WORKFLOW = HATCHET.workflow(name: "serde-example-workflow")
GENERATE_RESULT = SERDE_WORKFLOW.task(:generate_result) do |input, ctx|
compressed = Base64.strict_encode64(Zlib::Deflate.deflate("my_result"))
{ "result" => compressed }
end
SERDE_WORKFLOW.task(:read_result, parents: [GENERATE_RESULT]) do |input, ctx|
encoded = ctx.task_output(GENERATE_RESULT)["result"]
decoded = Zlib::Inflate.inflate(Base64.strict_decode64(encoded))
{ "final_result" => decoded }
end
def main
worker = HATCHET.worker("test-worker", workflows: [SERDE_WORKFLOW])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,9 @@
# frozen_string_literal: true
require_relative "worker"
# > Schedule a task
schedule = SIMPLE.schedule(Time.now + 86_400, input: { "message" => "Hello, World!" })
## do something with the id
puts schedule.metadata.id

View File

@@ -0,0 +1,7 @@
# frozen_string_literal: true
require_relative "worker"
# > Run a task
result = SIMPLE.run({ "message" => "Hello, World!" })
puts result

View File

@@ -0,0 +1,11 @@
# frozen_string_literal: true
require_relative "worker"
# > Trigger with metadata
SIMPLE.run(
{},
options: Hatchet::TriggerWorkflowOptions.new(
additional_metadata: { "source" => "api" }
)
)

View File

@@ -0,0 +1,24 @@
# frozen_string_literal: true
# > Simple
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
SIMPLE = HATCHET.task(name: "simple") do |input, ctx|
{ "result" => "Hello, world!" }
end
SIMPLE_DURABLE = HATCHET.durable_task(name: "simple_durable") do |input, ctx|
result = SIMPLE.run(input)
{ "result" => result["result"] }
end
def main
worker = HATCHET.worker("test-worker", workflows: [SIMPLE, SIMPLE_DURABLE])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,9 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
# > Define a workflow
EXAMPLE_WORKFLOW = HATCHET.workflow(name: "example-workflow")

View File

@@ -0,0 +1,51 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > StickyWorker
STICKY_WORKFLOW = HATCHET.workflow(
name: "StickyWorkflow",
# Specify a sticky strategy when declaring the workflow
sticky: :soft
)
STEP1A = STICKY_WORKFLOW.task(:step1a) do |input, ctx|
{ "worker" => ctx.worker.id }
end
STEP1B = STICKY_WORKFLOW.task(:step1b) do |input, ctx|
{ "worker" => ctx.worker.id }
end
# > StickyChild
STICKY_CHILD_WORKFLOW = HATCHET.workflow(
name: "StickyChildWorkflow",
sticky: :soft
)
STICKY_WORKFLOW.task(:step2, parents: [STEP1A, STEP1B]) do |input, ctx|
ref = STICKY_CHILD_WORKFLOW.run_no_wait(
options: Hatchet::TriggerWorkflowOptions.new(sticky: true)
)
ref.result
{ "worker" => ctx.worker.id }
end
STICKY_CHILD_WORKFLOW.task(:child) do |input, ctx|
{ "worker" => ctx.worker.id }
end
def main
worker = HATCHET.worker(
"sticky-worker", slots: 10, workflows: [STICKY_WORKFLOW, STICKY_CHILD_WORKFLOW]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,10 @@
# frozen_string_literal: true
require_relative "worker"
# > Consume
ref = STREAM_TASK.run_no_wait
HATCHET.runs.subscribe_to_stream(ref.workflow_run_id) do |chunk|
print chunk
end

View File

@@ -0,0 +1,32 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: false) unless defined?(HATCHET)
# > Streaming
ANNA_KARENINA = <<~TEXT
Happy families are all alike; every unhappy family is unhappy in its own way.
Everything was in confusion in the Oblonskys' house. The wife had discovered that the husband was carrying on an intrigue with a French girl, who had been a governess in their family, and she had announced to her husband that she could not go on living in the same house with him.
TEXT
STREAM_CHUNKS = ANNA_KARENINA.scan(/.{1,10}/)
STREAM_TASK = HATCHET.task(name: "stream_task") do |input, ctx|
# Sleeping to avoid race conditions
sleep 2
STREAM_CHUNKS.each do |chunk|
ctx.put_stream(chunk)
sleep 0.20
end
end
def main
worker = HATCHET.worker("test-worker", workflows: [STREAM_TASK])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,40 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
# > ScheduleTimeout
TIMEOUT_WF = HATCHET.workflow(
name: "TimeoutWorkflow",
task_defaults: { execution_timeout: 120 } # 2 minutes
)
# > ExecutionTimeout
# Specify an execution timeout on a task
TIMEOUT_WF.task(:timeout_task, execution_timeout: 5, schedule_timeout: 600) do |input, ctx|
sleep 30
{ "status" => "success" }
end
REFRESH_TIMEOUT_WF = HATCHET.workflow(name: "RefreshTimeoutWorkflow")
# > RefreshTimeout
REFRESH_TIMEOUT_WF.task(:refresh_task, execution_timeout: 4) do |input, ctx|
ctx.refresh_timeout(10)
sleep 5
{ "status" => "success" }
end
def main
worker = HATCHET.worker(
"timeout-worker", slots: 4, workflows: [TIMEOUT_WF, REFRESH_TIMEOUT_WF]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,24 @@
# frozen_string_literal: true
require "hatchet-sdk"
hatchet = Hatchet::Client.new
# > Define a task
SAY_HELLO = hatchet.task(name: "say_hello") do |input, ctx|
{ "greeting" => "Hello, #{input['name']}!" }
end
# > Sync
ref = SAY_HELLO.run_no_wait({ "name" => "World" })
# > Async
# In Ruby, run_no_wait is the equivalent of async enqueuing
ref = SAY_HELLO.run_no_wait({ "name" => "World" })
# > Result sync
result = ref.result
# > Result async
# In Ruby, result is synchronous - use poll for async-like behavior
result = ref.result

View File

@@ -0,0 +1,68 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new unless defined?(HATCHET)
# Unit test workflow definitions
SYNC_STANDALONE = HATCHET.task(name: "sync_standalone") do |input, ctx|
{
"key" => input["key"],
"number" => input["number"],
"additional_metadata" => ctx.additional_metadata,
"retry_count" => ctx.retry_count
}
end
ASYNC_STANDALONE = HATCHET.task(name: "async_standalone") do |input, ctx|
{
"key" => input["key"],
"number" => input["number"],
"additional_metadata" => ctx.additional_metadata,
"retry_count" => ctx.retry_count
}
end
DURABLE_SYNC_STANDALONE = HATCHET.durable_task(name: "durable_sync_standalone") do |input, ctx|
{
"key" => input["key"],
"number" => input["number"],
"additional_metadata" => ctx.additional_metadata,
"retry_count" => ctx.retry_count
}
end
DURABLE_ASYNC_STANDALONE = HATCHET.durable_task(name: "durable_async_standalone") do |input, ctx|
{
"key" => input["key"],
"number" => input["number"],
"additional_metadata" => ctx.additional_metadata,
"retry_count" => ctx.retry_count
}
end
SIMPLE_UNIT_TEST_WORKFLOW = HATCHET.workflow(name: "simple-unit-test-workflow")
SIMPLE_UNIT_TEST_WORKFLOW.task(:sync_simple_workflow) do |input, ctx|
{
"key" => input["key"],
"number" => input["number"],
"additional_metadata" => ctx.additional_metadata,
"retry_count" => ctx.retry_count
}
end
COMPLEX_UNIT_TEST_WORKFLOW = HATCHET.workflow(name: "complex-unit-test-workflow")
UNIT_START = COMPLEX_UNIT_TEST_WORKFLOW.task(:start) do |input, ctx|
{
"key" => input["key"],
"number" => input["number"],
"additional_metadata" => ctx.additional_metadata,
"retry_count" => ctx.retry_count
}
end
COMPLEX_UNIT_TEST_WORKFLOW.task(:sync_complex_workflow, parents: [UNIT_START]) do |input, ctx|
ctx.task_output(UNIT_START)
end

View File

@@ -0,0 +1,36 @@
# frozen_string_literal: true
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
WEBHOOK_WITH_SCOPE = HATCHET.task(
name: "webhook_with_scope",
on_events: ["webhook-scope:test"],
default_filters: [
Hatchet::DefaultFilter.new(
expression: "true",
scope: "test-scope-value",
payload: {}
)
]
) do |input, ctx|
input
end
WEBHOOK_WITH_STATIC_PAYLOAD = HATCHET.task(
name: "webhook_with_static_payload",
on_events: ["webhook-static:test"]
) do |input, ctx|
input
end
def main
worker = HATCHET.worker(
"webhook-scope-worker",
workflows: [WEBHOOK_WITH_SCOPE, WEBHOOK_WITH_STATIC_PAYLOAD]
)
worker.start
end
main if __FILE__ == $PROGRAM_NAME

View File

@@ -0,0 +1,25 @@
# frozen_string_literal: true
# > Webhooks
require "hatchet-sdk"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
WEBHOOK_TASK = HATCHET.task(
name: "webhook",
on_events: ["webhook:test"]
) do |input, ctx|
{
"type" => input["type"],
"message" => input["message"]
}
end
def main
worker = HATCHET.worker("webhook-worker", workflows: [WEBHOOK_TASK])
worker.start
end
main if __FILE__ == $PROGRAM_NAME

110
examples/ruby/worker.rb Normal file
View File

@@ -0,0 +1,110 @@
# frozen_string_literal: true
# Main worker that registers all example workflows.
require "hatchet-sdk"
# Load all example workflows
require_relative "simple/worker"
require_relative "dag/worker"
require_relative "events/worker"
require_relative "cancellation/worker"
require_relative "on_failure/worker"
require_relative "on_success/worker"
require_relative "timeout/worker"
require_relative "retries/worker"
require_relative "non_retryable/worker"
require_relative "logger/worker"
require_relative "delayed/worker"
require_relative "priority/worker"
require_relative "run_details/worker"
require_relative "concurrency_limit/worker"
require_relative "concurrency_limit_rr/worker"
require_relative "concurrency_cancel_in_progress/worker"
require_relative "concurrency_cancel_newest/worker"
require_relative "concurrency_multiple_keys/worker"
require_relative "concurrency_workflow_level/worker"
require_relative "rate_limit/worker"
require_relative "child/worker"
require_relative "fanout/worker"
require_relative "bulk_fanout/worker"
require_relative "durable/worker"
require_relative "durable_event/worker"
require_relative "durable_sleep/worker"
require_relative "conditions/worker"
require_relative "dependency_injection/worker"
require_relative "streaming/worker"
require_relative "serde/worker"
require_relative "dataclasses/worker"
require_relative "dedupe/worker"
require_relative "cron/worker"
require_relative "scheduled/worker"
require_relative "bulk_operations/worker"
require_relative "return_exceptions/worker"
require_relative "manual_slot_release/worker"
require_relative "affinity_workers/worker"
require_relative "sticky_workers/worker"
require_relative "webhooks/worker"
require_relative "webhook_with_scope/worker"
require_relative "unit_testing/worker"
HATCHET = Hatchet::Client.new(debug: true) unless defined?(HATCHET)
ALL_WORKFLOWS = [
# Tier 1
SIMPLE, SIMPLE_DURABLE,
DAG_WORKFLOW,
EVENT_WORKFLOW,
CANCELLATION_WORKFLOW,
ON_FAILURE_WF, ON_FAILURE_WF_WITH_DETAILS,
ON_SUCCESS_WORKFLOW,
TIMEOUT_WF, REFRESH_TIMEOUT_WF,
SIMPLE_RETRY_WORKFLOW, BACKOFF_WORKFLOW,
NON_RETRYABLE_WORKFLOW,
LOGGING_WORKFLOW,
PRINT_SCHEDULE_WF, PRINT_PRINTER_WF,
PRIORITY_WORKFLOW,
RUN_DETAIL_TEST_WORKFLOW,
# Tier 2
CONCURRENCY_LIMIT_WORKFLOW,
CONCURRENCY_LIMIT_RR_WORKFLOW,
CONCURRENCY_CANCEL_IN_PROGRESS_WORKFLOW,
CONCURRENCY_CANCEL_NEWEST_WORKFLOW,
CONCURRENCY_MULTIPLE_KEYS_WORKFLOW,
CONCURRENCY_WORKFLOW_LEVEL_WORKFLOW,
RATE_LIMIT_WORKFLOW,
# Tier 3
CHILD_TASK_WF,
FANOUT_PARENT_WF, FANOUT_CHILD_WF,
BULK_PARENT_WF, BULK_CHILD_WF,
DURABLE_WORKFLOW, EPHEMERAL_WORKFLOW, WAIT_FOR_SLEEP_TWICE,
DURABLE_EVENT_TASK, DURABLE_EVENT_TASK_WITH_FILTER,
DURABLE_SLEEP_TASK,
TASK_CONDITION_WORKFLOW,
ASYNC_TASK_WITH_DEPS, SYNC_TASK_WITH_DEPS,
DURABLE_ASYNC_TASK_WITH_DEPS, DURABLE_SYNC_TASK_WITH_DEPS,
DI_WORKFLOW,
# Tier 4-5
STREAM_TASK,
SERDE_WORKFLOW,
SAY_HELLO,
DEDUPE_PARENT_WF, DEDUPE_CHILD_WF,
CRON_WORKFLOW,
SCHEDULED_WORKFLOW,
BULK_REPLAY_TEST_1, BULK_REPLAY_TEST_2, BULK_REPLAY_TEST_3,
RETURN_EXCEPTIONS_TASK,
SLOT_RELEASE_WORKFLOW,
AFFINITY_WORKER_WORKFLOW,
STICKY_WORKFLOW, STICKY_CHILD_WORKFLOW,
WEBHOOK_TASK,
WEBHOOK_WITH_SCOPE, WEBHOOK_WITH_STATIC_PAYLOAD,
SYNC_STANDALONE, ASYNC_STANDALONE,
DURABLE_SYNC_STANDALONE, DURABLE_ASYNC_STANDALONE,
SIMPLE_UNIT_TEST_WORKFLOW, COMPLEX_UNIT_TEST_WORKFLOW
].freeze
worker = HATCHET.worker("all-examples-worker", slots: 40, workflows: ALL_WORKFLOWS)
worker.start

View File

@@ -0,0 +1,97 @@
# frozen_string_literal: true
require "open3"
require "net/http"
require "logger"
require "timeout"
module HatchetWorkerFixture
LOGGER = Logger.new($stdout)
# Wait for the worker health check endpoint to respond
#
# @param port [Integer] Health check port
# @param max_attempts [Integer] Maximum number of attempts
# @return [Boolean] true if healthy
# @raise [RuntimeError] if worker fails to start
def self.wait_for_worker_health(port:, max_attempts: 25)
attempts = 0
loop do
if attempts > max_attempts
raise "Worker failed to start within #{max_attempts} seconds"
end
begin
uri = URI("http://localhost:#{port}/health")
response = Net::HTTP.get_response(uri)
return true if response.code == "200"
rescue StandardError
# Worker not ready yet
end
sleep 1
attempts += 1
end
end
# Start a worker subprocess and wait for it to be healthy
#
# @param command [Array<String>] Command to run
# @param healthcheck_port [Integer] Port for health checks
# @yield [pid] Yields the process PID
# @return [void]
def self.with_worker(command, healthcheck_port: 8001)
LOGGER.info("Starting background worker: #{command.join(' ')}")
ENV["HATCHET_CLIENT_WORKER_HEALTHCHECK_PORT"] = healthcheck_port.to_s
stdin, stdout, stderr, wait_thr = Open3.popen3(*command)
pid = wait_thr.pid
# Log output in background threads
Thread.new do
stdout.each_line { |line| puts line.chomp }
rescue IOError
# Stream closed
end
Thread.new do
stderr.each_line { |line| $stderr.puts line.chomp }
rescue IOError
# Stream closed
end
wait_for_worker_health(port: healthcheck_port)
yield pid
ensure
LOGGER.info("Cleaning up background worker (PID: #{pid})")
if pid
begin
# Kill process group to get children too
Process.kill("TERM", -Process.getpgid(pid))
rescue Errno::ESRCH, Errno::EPERM
# Process already gone
end
begin
Timeout.timeout(5) { Process.wait(pid) }
rescue Timeout::Error
begin
Process.kill("KILL", pid)
Process.wait(pid)
rescue Errno::ESRCH, Errno::ECHILD
# Already gone
end
rescue Errno::ECHILD
# Already reaped
end
end
[stdin, stdout, stderr].each do |io|
io&.close rescue nil
end
end
end

Some files were not shown because too many files have changed in this diff Show More